├── .env.sample
├── .github
└── workflows
│ └── ci.yml
├── .gitignore
├── Cargo.toml
├── LICENSE-APACHE
├── LICENSE-MIT
├── Makefile
├── README.md
├── chaindexing-tests
├── Cargo.toml
└── src
│ ├── db.rs
│ ├── factory.rs
│ ├── factory
│ ├── contracts.rs
│ ├── events.rs
│ ├── handlers.rs
│ └── providers.rs
│ ├── lib.rs
│ ├── main.rs
│ ├── test_runner.rs
│ ├── tests.rs
│ └── tests
│ ├── handlers.rs
│ ├── ingester.rs
│ ├── repos.rs
│ ├── repos
│ └── postgres_repo.rs
│ └── states.rs
├── chaindexing
├── Cargo.toml
└── src
│ ├── augmenting_std.rs
│ ├── booting.rs
│ ├── chain_reorg.rs
│ ├── chains.rs
│ ├── config.rs
│ ├── contracts.rs
│ ├── deferred_futures.rs
│ ├── diesel.rs
│ ├── events.rs
│ ├── events
│ └── event.rs
│ ├── handlers.rs
│ ├── handlers
│ ├── handle_events.rs
│ ├── handler_context.rs
│ ├── maybe_handle_chain_reorg.rs
│ ├── pure_handler.rs
│ └── side_effect_handler.rs
│ ├── ingester.rs
│ ├── ingester
│ ├── error.rs
│ ├── filters.rs
│ ├── ingest_events.rs
│ ├── maybe_handle_chain_reorg.rs
│ └── provider.rs
│ ├── lib.rs
│ ├── nodes.rs
│ ├── nodes
│ ├── node.rs
│ ├── node_heartbeat.rs
│ ├── node_task.rs
│ ├── node_tasks.rs
│ └── node_tasks_runner.rs
│ ├── pruning.rs
│ ├── repos.rs
│ ├── repos
│ ├── postgres_repo.rs
│ ├── postgres_repo
│ │ ├── migrations.rs
│ │ └── raw_queries.rs
│ ├── repo.rs
│ └── streams.rs
│ ├── root.rs
│ ├── states.rs
│ └── states
│ ├── chain_state.rs
│ ├── contract_state.rs
│ ├── filters.rs
│ ├── migrations.rs
│ ├── multi_chain_state.rs
│ ├── state.rs
│ ├── state_versions.rs
│ ├── state_views.rs
│ └── updates.rs
├── docker-compose.yml
├── renovate.json
└── rustfmt.toml
/.env.sample:
--------------------------------------------------------------------------------
1 | # db
2 | TEST_DATABASE_URL=postgres://postgres:postgres@localhost:5432/chaindexing_tests
3 | SETUP_TEST_DB=true
--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: CI
2 |
3 | on:
4 | push:
5 | branches: [main]
6 | pull_request:
7 | branches: [main]
8 | types: [opened, synchronize, reopened]
9 | workflow_dispatch:
10 | schedule: [cron: "44 4 * * 6"]
11 |
12 | permissions:
13 | contents: read
14 |
15 | env:
16 | # RUSTFLAGS: -Dwarnings
17 | TEST_DATABASE_URL: postgres://postgres:postgres@localhost:5432/chaindexing_tests
18 |
19 | jobs:
20 | test:
21 | name: Rust ${{matrix.rust}}
22 | runs-on: ubuntu-latest
23 | timeout-minutes: 45
24 | strategy:
25 | fail-fast: false
26 | matrix:
27 | rust: [stable]
28 |
29 | services:
30 | postgres:
31 | image: postgres
32 | env:
33 | POSTGRES_PASSWORD: postgres
34 | POSTGRES_USER: postgres
35 |
36 | ports:
37 | - 5432:5432
38 |
39 | options: >-
40 | --health-cmd pg_isready
41 | --health-interval 10s
42 | --health-timeout 5s
43 | --health-retries 5
44 |
45 | steps:
46 | - uses: actions/checkout@v4
47 | - uses: dtolnay/rust-toolchain@stable
48 | with:
49 | toolchain: ${{matrix.rust}}
50 | - name: Setup Database
51 | run: cargo run -p chaindexing-tests
52 | - name: Run Tests
53 | run: cargo test
54 |
55 | clippy:
56 | name: Clippy
57 | runs-on: ubuntu-latest
58 | timeout-minutes: 45
59 | steps:
60 | - uses: actions/checkout@v4
61 | - uses: dtolnay/rust-toolchain@clippy
62 | - run: cargo clippy --tests -- -Dclippy::all
63 |
64 | fmt:
65 | name: fmt
66 | runs-on: ubuntu-latest
67 | steps:
68 | - uses: actions/checkout@v4
69 | - uses: dtolnay/rust-toolchain@stable
70 | with:
71 | components: rustfmt
72 | - uses: Swatinem/rust-cache@v2
73 | # Check fmt
74 | - name: "rustfmt --check"
75 | # Workaround for rust-lang/cargo#7732
76 | run: |
77 | if ! rustfmt --check --edition 2021 $(git ls-files '*.rs'); then
78 | printf "Please run \`rustfmt --edition 2021 \$(git ls-files '*.rs')\` to fix rustfmt errors.\n" >&2
79 | exit 1
80 | fi
81 |
82 | outdated:
83 | name: Outdated
84 | runs-on: ubuntu-latest
85 | timeout-minutes: 45
86 | steps:
87 | - uses: actions/checkout@v4
88 | - uses: dtolnay/install@cargo-outdated
89 | - run: cargo outdated --workspace --exit-code 1
90 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | Cargo.lock
2 | target/
3 | guide/build/
4 | /gh-pages
5 |
6 | *.so
7 | *.out
8 | *.pyc
9 | *.pid
10 | *.sock
11 | *~
12 | .DS_Store
13 |
14 | # These are backup files generated by rustfmt
15 | **/*.rs.bk
16 |
17 | # Configuration directory generated by CLion
18 | .idea
19 |
20 | # Configuration directory generated by VSCode
21 | .vscode
22 |
23 | .env
24 | postgres-data
--------------------------------------------------------------------------------
/Cargo.toml:
--------------------------------------------------------------------------------
1 | [workspace]
2 | resolver = "2"
3 | members = [
4 | "chaindexing",
5 | "chaindexing-tests"
6 | ]
--------------------------------------------------------------------------------
/LICENSE-APACHE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | Copyright 2023-2024 Abolaji Oladele
179 |
180 | Licensed under the Apache License, Version 2.0 (the "License");
181 | you may not use this file except in compliance with the License.
182 | You may obtain a copy of the License at
183 |
184 | http://www.apache.org/licenses/LICENSE-2.0
185 |
186 | Unless required by applicable law or agreed to in writing, software
187 | distributed under the License is distributed on an "AS IS" BASIS,
188 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
189 | See the License for the specific language governing permissions and
190 | limitations under the License.
--------------------------------------------------------------------------------
/LICENSE-MIT:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2024 Abolaji Oladele
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | db.start:
2 | docker-compose up
3 |
4 | db.stop:
5 | docker-compose down
6 |
7 | db.drop:
8 | rm -rf ./postgres-data
9 |
10 | db.reset:
11 | make db.stop && make db.drop && make db.start
12 |
13 | tests.setup:
14 | cargo run -p chaindexing-tests
15 |
16 | tests:
17 | make tests.setup && cargo test -- --nocapture
18 |
19 | tests.without.capture:
20 | make tests.setup && cargo test -- --nocapture
21 |
22 | tests.with.name:
23 | cargo test -p chaindexing-tests -- $(name)
24 |
25 | tests.with.name.and.backtrace:
26 | RUST_BACKTRACE=1 cargo test -p chaindexing-tests -- $(name)
27 |
28 | tests.with.backtrace:
29 | RUST_BACKTRACE=1 make tests
30 |
31 | doc:
32 | cargo doc --open
33 |
34 | publish:
35 | cargo publish -p chaindexing
36 |
37 | publish.dry:
38 | make publish -- --dry-run
39 |
40 | publish.dirty:
41 | cargo publish -p chaindexing --allow-dirty
42 |
43 | publish.dirty.dry:
44 | cargo publish -p chaindexing --allow-dirty --dry-run
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Chaindexing
2 |
3 | [ ](https://github.com/jurshsmith/chaindexing-rs)
4 | [ ](https://crates.io/crates/chaindexing)
5 | [ ](https://github.com/jurshsmith/chaindexing-rs/actions?query=branch%3Amain)
6 |
7 | Index any EVM chain and query in SQL
8 |
9 | [Getting Started](#getting-started) | [Examples](https://github.com/chaindexing/chaindexing-examples/tree/main/rust) | [Design Goals & Features](#design-goals--features) | [RoadMap](#roadmap) | [Contributing](#contributing)
10 |
11 | ## Getting Started
12 |
13 | 📊 Here is what indexing and tracking owers of your favorite NFTs looks like:
14 |
15 | ```rust
16 | use chaindexing::states::{ContractState, Filters, Updates};
17 | use chaindexing::{EventContext, EventHandler};
18 |
19 | use crate::states::Nft;
20 |
21 | pub struct TransferHandler;
22 |
23 | #[chaindexing::augmenting_std::async_trait]
24 | impl EventHandler for TransferHandler {
25 | fn abi(&self) -> &'static str {
26 | "event Transfer(address indexed from, address indexed to, uint256 indexed tokenId)"
27 | }
28 | async fn handle_event<'a, 'b>(&self, context: EventContext<'a, 'b>) {
29 | let event_params = context.get_event_params();
30 |
31 | let _from = event_params.get_address_string("from");
32 | let to = event_params.get_address_string("to");
33 | let token_id = event_params.get_u32("tokenId");
34 |
35 | if let Some(existing_nft) =
36 | Nft::read_one(&Filters::new("token_id", token_id), &context).await
37 | {
38 | let updates = Updates::new("owner_address", &to);
39 | existing_nft.update(&updates, &context).await;
40 | } else {
41 | let new_nft = Nft {
42 | token_id,
43 | owner_address: to,
44 | };
45 |
46 | new_nft.create(&context).await;
47 | }
48 | }
49 | }
50 | ```
51 |
52 | A quick and effective way to get started is by exploring the comprehensive examples provided here: [https://github.com/chaindexing/chaindexing-examples/tree/main/rust](https://github.com/chaindexing/chaindexing-examples/tree/main/rust).
53 |
54 | ## Design Goals & Features
55 |
56 | - 💸 Free forever
57 | - ⚡ Real-time use-cases
58 | - 🌐 Multi-chain
59 | - 🧂 Granular, 🧩 Modular & 📈 Scalable
60 | - 🌍 Environment-agnostic to allow inspecting 🔍 & replicating indexes anywhere!
61 | - 🔓 ORM-agnostic, use any ORM to access indexed data
62 | - 📤 Easy export to any data lake: S3, Snowflake, etc.
63 | - 🚫 No complex YAML/JSON/CLI config
64 | - 💪 Index contracts discovered at runtime
65 | - ✨ Handles re-org with no UX impact
66 | - 🔥 Side effect handling for notifications & bridging use cases
67 | - 💸 Optimize RPC cost by indexing when certain activities happen in your DApp
68 | - 💎 Language-agnostic, so no macros!
69 |
70 | ## RoadMap
71 |
72 | - ⬜ Expose `is_at_block_tail` flag to improve op heuristics for applications
73 | - ⬜ Support SQLite Database (Currently supports only Postgres)
74 | - ⬜ Support indexing raw transactions & call traces.
75 | - ⬜ Improved error handling/messages/reporting (Please feel free to open an issue when an opaque runtime error is encountered)
76 | - ⬜ Support TLS connections
77 | - ⬜ Minimal UI for inspecting events and indexed states
78 |
79 | ## Contributing
80 |
81 | All contributions are welcome. Before working on a PR, please consider opening an issue detailing the feature/bug. Equally, when submitting a PR, please ensure that all checks pass to facilitate a smooth review process.
82 |
--------------------------------------------------------------------------------
/chaindexing-tests/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "chaindexing-tests"
3 | version = "0.1.0"
4 | edition = "2021"
5 |
6 | [dependencies]
7 | chaindexing = { path = "../chaindexing", features = ["postgres"] }
8 | ethers = "2.0"
9 | futures-util = "0.3"
10 | dotenvy = "0.15"
11 | diesel = { version = "2", features = ["postgres", "chrono"] }
12 | rand = "0.8.5"
13 | tokio = { version = "1.37", features = ["full"] }
14 |
15 |
--------------------------------------------------------------------------------
/chaindexing-tests/src/db.rs:
--------------------------------------------------------------------------------
1 | use diesel::pg::PgConnection;
2 | use diesel::prelude::*;
3 | use dotenvy::dotenv;
4 | use std::env;
5 |
6 | pub fn setup() {
7 | let db_url = database_url();
8 |
9 | let _conn = PgConnection::establish(&db_url).unwrap_or_else(|_error| {
10 | let (db_name, db_raw_url) = get_db_name_and_raw_url(&db_url);
11 |
12 | let mut raw_conn = connect_to_database_url_or_panic(&db_raw_url);
13 |
14 | create_database(&db_name, &mut raw_conn);
15 |
16 | connect()
17 | });
18 | }
19 |
20 | fn connect() -> PgConnection {
21 | connect_to_database_url_or_panic(&database_url())
22 | }
23 |
24 | pub fn database_url() -> String {
25 | dotenv().ok();
26 |
27 | env::var("TEST_DATABASE_URL").expect("TEST_DATABASE_URL env variable needs to be set.")
28 | }
29 |
30 | fn get_db_name_and_raw_url(url: &str) -> (String, String) {
31 | let mut url_split = url.split('/').collect::>();
32 |
33 | let db_name = url_split.pop().expect("DATABASE NAME needs to be specified. See: sample.env");
34 | let db_raw_url = url_split.join("/");
35 |
36 | (db_name.to_string(), db_raw_url)
37 | }
38 |
39 | #[allow(clippy::uninlined_format_args)]
40 | fn create_database(db_name: &str, conn: &mut PgConnection) {
41 | diesel::sql_query(format!(r#"CREATE DATABASE "{}""#, db_name))
42 | .execute(conn)
43 | .unwrap();
44 | }
45 |
46 | #[allow(clippy::uninlined_format_args)]
47 | fn connect_to_database_url_or_panic(db_url: &str) -> PgConnection {
48 | PgConnection::establish(db_url).unwrap_or_else(|_| panic!("Error connecting to {}", db_url))
49 | }
50 |
--------------------------------------------------------------------------------
/chaindexing-tests/src/factory.rs:
--------------------------------------------------------------------------------
1 | mod contracts;
2 | mod events;
3 | mod handlers;
4 | mod providers;
5 |
6 | pub use contracts::*;
7 | pub use events::*;
8 | pub use handlers::*;
9 | pub use providers::*;
10 |
--------------------------------------------------------------------------------
/chaindexing-tests/src/factory/contracts.rs:
--------------------------------------------------------------------------------
1 | use chaindexing::{ChainId, Contract};
2 |
3 | use super::{ApprovalForAllTestHandler, TransferTestHandler};
4 |
5 | pub const BAYC_CONTRACT_ADDRESS: &str = "0xBC4CA0EdA7647A8aB7C2061c2E118A18a936f13D";
6 | pub const BAYC_CONTRACT_START_BLOCK_NUMBER: u32 = 17773490;
7 | pub fn bayc_contract(name: &str, two_digit_nonce: &str) -> Contract<()> {
8 | Contract::new(name)
9 | .add_event_handler(TransferTestHandler)
10 | .add_event_handler(ApprovalForAllTestHandler)
11 | .add_address(
12 | &format!("0xBC4CA0EdA7647A8aB7C2061c2E118A18a936f{two_digit_nonce}D"),
13 | &ChainId::Mainnet,
14 | 17773490,
15 | )
16 | }
17 |
--------------------------------------------------------------------------------
/chaindexing-tests/src/factory/events.rs:
--------------------------------------------------------------------------------
1 | use chaindexing::{ChainId, Contract, ContractEvent, Event};
2 |
3 | use super::{transfer_log, BAYC_CONTRACT_ADDRESS};
4 |
5 | pub fn transfer_event_with_contract(contract: Contract<()>) -> Event {
6 | let contract_address = BAYC_CONTRACT_ADDRESS;
7 | let transfer_log = transfer_log(contract_address);
8 |
9 | Event::new(
10 | &transfer_log,
11 | &ContractEvent::new(
12 | "event Transfer(address indexed from, address indexed to, uint256 indexed tokenId)",
13 | ),
14 | &ChainId::Mainnet,
15 | &contract.name,
16 | 1_i64,
17 | )
18 | }
19 |
--------------------------------------------------------------------------------
/chaindexing-tests/src/factory/handlers.rs:
--------------------------------------------------------------------------------
1 | use chaindexing::{EventContext, EventHandler};
2 |
3 | #[derive(Clone, Debug)]
4 | pub struct NftState;
5 |
6 | pub struct TransferTestHandler;
7 |
8 | #[chaindexing::augmenting_std::async_trait]
9 | impl EventHandler for TransferTestHandler {
10 | fn abi(&self) -> &'static str {
11 | "event Transfer(address indexed from, address indexed to, uint256 indexed tokenId)"
12 | }
13 | async fn handle_event<'a, 'b>(&self, _context: EventContext<'a, 'b>) {}
14 | }
15 |
16 | pub struct ApprovalForAllTestHandler;
17 |
18 | #[chaindexing::augmenting_std::async_trait]
19 | impl EventHandler for ApprovalForAllTestHandler {
20 | fn abi(&self) -> &'static str {
21 | "event ApprovalForAll(address indexed owner, address indexed operator, bool approved)"
22 | }
23 | async fn handle_event<'a, 'b>(&self, _context: EventContext<'a, 'b>) {}
24 | }
25 |
--------------------------------------------------------------------------------
/chaindexing-tests/src/factory/providers.rs:
--------------------------------------------------------------------------------
1 | use chaindexing::IngesterProvider;
2 | use ethers::providers::ProviderError;
3 | use ethers::types::{Block, Filter, Log, TxHash, U64};
4 |
5 | use rand::seq::SliceRandom;
6 |
7 | pub fn empty_provider() -> impl IngesterProvider {
8 | #[derive(Clone)]
9 | struct Provider;
10 | #[chaindexing::augmenting_std::async_trait]
11 | impl IngesterProvider for Provider {
12 | async fn get_block_number(&self) -> Result {
13 | Ok(U64::from(0))
14 | }
15 |
16 | async fn get_logs(&self, _filter: &Filter) -> Result, ProviderError> {
17 | Ok(vec![])
18 | }
19 |
20 | async fn get_block(&self, block_number: U64) -> Result, ProviderError> {
21 | Ok(Block {
22 | number: Some(block_number),
23 | ..Default::default()
24 | })
25 | }
26 | }
27 |
28 | Provider
29 | }
30 |
31 | use ethers::types::{Bytes, H160, H256};
32 | use std::str::FromStr;
33 |
34 | pub fn transfer_log(contract_address: &str) -> Log {
35 | let log_index = *(1..800).collect::>().choose(&mut rand::thread_rng()).unwrap();
36 |
37 | Log {
38 | address: H160::from_str(contract_address).unwrap(),
39 | topics: vec![
40 | h256("0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef"),
41 | h256("0x000000000000000000000000b518b3136e491101f22b77f385fe22269c515188"),
42 | h256("0x0000000000000000000000007dfd6013cf8d92b751e63d481b51fe0e4c5abf5e"),
43 | h256("0x000000000000000000000000000000000000000000000000000000000000067d"),
44 | ],
45 | data: Bytes("0x".into()),
46 | block_hash: Some(h256(
47 | "0x8fd4ca304a2e81854059bc3e42f32064cca8b6b453f6286f95060edc6382c6f8",
48 | )),
49 | block_number: Some(18115958.into()),
50 | transaction_hash: Some(h256(
51 | "0x83d751998ff98cd609bc9b18bb36bdef8659cde2f74d6d7a1b0fef2c2bf8f839",
52 | )),
53 | transaction_index: Some(89.into()),
54 | log_index: Some(log_index.into()),
55 | transaction_log_index: None,
56 | log_type: None,
57 | removed: Some(false),
58 | }
59 | }
60 |
61 | fn h256(str: &str) -> H256 {
62 | H256::from_str(str).unwrap()
63 | }
64 |
65 | #[macro_export]
66 | macro_rules! provider_with_logs {
67 | ($contract_address:expr) => {{
68 | use $crate::provider_with_logs;
69 |
70 | provider_with_logs!($contract_address, 17774490)
71 | }};
72 | ($contract_address:expr, $current_block_number:expr) => {{
73 | use chaindexing::IngesterProvider;
74 | use ethers::providers::ProviderError;
75 | use ethers::types::{Block, Filter, Log, TxHash, U64};
76 | use $crate::factory::transfer_log;
77 |
78 | #[derive(Clone)]
79 | struct Provider {
80 | contract_address: String,
81 | }
82 | #[chaindexing::augmenting_std::async_trait]
83 | impl IngesterProvider for Provider {
84 | async fn get_block_number(&self) -> Result {
85 | Ok(U64::from($current_block_number))
86 | }
87 |
88 | async fn get_logs(&self, _filter: &Filter) -> Result, ProviderError> {
89 | Ok(vec![transfer_log(&self.contract_address)])
90 | }
91 |
92 | async fn get_block(&self, block_number: U64) -> Result, ProviderError> {
93 | Ok(Block {
94 | number: Some(block_number),
95 | ..Default::default()
96 | })
97 | }
98 | }
99 |
100 | Provider {
101 | contract_address: $contract_address.to_string(),
102 | }
103 | }};
104 | }
105 |
106 | #[macro_export]
107 | macro_rules! provider_with_filter_stubber {
108 | ($contract_address:expr, $filter_stubber: expr) => {{
109 | use chaindexing::IngesterProvider;
110 | use ethers::providers::ProviderError;
111 | use ethers::types::{Block, Filter, Log, TxHash, U64};
112 |
113 | #[derive(Clone)]
114 | struct Provider;
115 | #[chaindexing::augmenting_std::async_trait]
116 | impl IngesterProvider for Provider {
117 | async fn get_block_number(&self) -> Result {
118 | Ok(U64::from(3))
119 | }
120 |
121 | async fn get_logs(&self, filter: &Filter) -> Result, ProviderError> {
122 | let filter_stubber = $filter_stubber;
123 |
124 | filter_stubber(filter);
125 |
126 | Ok(vec![])
127 | }
128 |
129 | async fn get_block(&self, block_number: U64) -> Result, ProviderError> {
130 | Ok(Block {
131 | number: Some(block_number),
132 | ..Default::default()
133 | })
134 | }
135 | }
136 |
137 | Provider
138 | }};
139 | }
140 |
141 | #[macro_export]
142 | macro_rules! provider_with_empty_logs {
143 | ($contract_address:expr) => {{
144 | use chaindexing::IngesterProvider;
145 | use ethers::providers::ProviderError;
146 | use ethers::types::{Block, Filter, Log, TxHash, U64};
147 |
148 | #[derive(Clone)]
149 | struct Provider;
150 | #[chaindexing::augmenting_std::async_trait]
151 | impl IngesterProvider for Provider {
152 | async fn get_block_number(&self) -> Result {
153 | Ok(U64::from(3))
154 | }
155 |
156 | async fn get_logs(&self, _filter: &Filter) -> Result, ProviderError> {
157 | Ok(vec![])
158 | }
159 |
160 | async fn get_block(&self, block_number: U64) -> Result, ProviderError> {
161 | Ok(Block {
162 | number: Some(block_number),
163 | ..Default::default()
164 | })
165 | }
166 | }
167 |
168 | Provider
169 | }};
170 | }
171 |
--------------------------------------------------------------------------------
/chaindexing-tests/src/lib.rs:
--------------------------------------------------------------------------------
1 | pub mod db;
2 | pub mod factory;
3 | pub mod test_runner;
4 | pub mod tests;
5 |
6 | use std::sync::Arc;
7 |
8 | use chaindexing::{
9 | streams::ContractAddressesStream, ChainId, ChaindexingRepoClient, ContractAddress,
10 | };
11 | use futures_util::StreamExt;
12 | use tokio::sync::Mutex;
13 |
14 | pub async fn find_contract_address_by_contract_name(
15 | repo_client: &Arc>,
16 | contract_name: &str,
17 | chain_id: &ChainId,
18 | ) -> Option {
19 | let mut contract_addresses_stream = ContractAddressesStream::new(repo_client, *chain_id as i64);
20 | contract_addresses_stream
21 | .next()
22 | .await
23 | .iter()
24 | .flatten()
25 | .find(|ca| ca.contract_name == contract_name)
26 | .cloned()
27 | }
28 |
--------------------------------------------------------------------------------
/chaindexing-tests/src/main.rs:
--------------------------------------------------------------------------------
1 | use chaindexing::{ChaindexingRepo, HasRawQueryClient};
2 | use chaindexing_tests::{db, tests};
3 |
4 | #[tokio::main]
5 | async fn main() {
6 | db::setup();
7 | let repo = ChaindexingRepo::new(db::database_url().as_str());
8 | let repo_client = repo.get_client().await;
9 | chaindexing::booting::setup_root(&repo_client).await;
10 | chaindexing::booting::run_internal_migrations(&repo_client).await;
11 |
12 | tests::setup().await;
13 | }
14 |
--------------------------------------------------------------------------------
/chaindexing-tests/src/test_runner.rs:
--------------------------------------------------------------------------------
1 | use crate::db;
2 | use chaindexing::{
3 | ChaindexingRepo, ChaindexingRepoAsyncConnection, ChaindexingRepoClient, ChaindexingRepoConn,
4 | ChaindexingRepoPool, ExecutesWithRawQuery, HasRawQueryClient, Repo,
5 | };
6 | use dotenvy::dotenv;
7 | use std::env;
8 | use std::future::Future;
9 |
10 | pub async fn get_pool() -> ChaindexingRepoPool {
11 | new_repo().get_pool(1).await
12 | }
13 |
14 | pub async fn run_test<'a, TestFn, Fut>(pool: &'a ChaindexingRepoPool, test_fn: TestFn)
15 | where
16 | TestFn: Fn(ChaindexingRepoConn<'a>) -> Fut,
17 | Fut: Future,
18 | {
19 | let mut conn = ChaindexingRepo::get_conn(pool).await;
20 |
21 | if should_setup_test_db() {
22 | db::setup();
23 |
24 | let repo_client = new_repo().get_client().await;
25 | chaindexing::booting::setup_root(&repo_client).await;
26 | chaindexing::booting::run_internal_migrations(&repo_client).await;
27 | }
28 |
29 | conn.begin_test_transaction().await.unwrap();
30 |
31 | test_fn(conn).await;
32 | }
33 |
34 | pub async fn run_test_new(test_fn: TestFn)
35 | where
36 | TestFn: Fn(ChaindexingRepoClient) -> Fut,
37 | Fut: Future,
38 | {
39 | let repo_client = new_repo().get_client().await;
40 |
41 | if should_setup_test_db() {
42 | db::setup();
43 |
44 | chaindexing::booting::setup_root(&repo_client).await;
45 | chaindexing::booting::run_internal_migrations(&repo_client).await;
46 |
47 | truncate_all_tables(&repo_client).await;
48 | }
49 |
50 | test_fn(repo_client).await;
51 | }
52 |
53 | pub fn new_repo() -> ChaindexingRepo {
54 | ChaindexingRepo::new(db::database_url().as_str())
55 | }
56 |
57 | fn should_setup_test_db() -> bool {
58 | dotenv().ok();
59 |
60 | env::var("SETUP_TEST_DB").is_ok()
61 | }
62 |
63 | const ALL_TABLE_NAMES: [&str; 5] = [
64 | "chaindexing_contract_addresses",
65 | "chaindexing_events",
66 | "chaindexing_reorged_blocks",
67 | "chaindexing_root_states",
68 | "nfts",
69 | ];
70 |
71 | async fn truncate_all_tables(repo_client: &ChaindexingRepoClient) {
72 | for table_name in ALL_TABLE_NAMES {
73 | ChaindexingRepo::execute(
74 | repo_client,
75 | &format!("DO $$
76 | BEGIN
77 | IF EXISTS (SELECT 1 FROM information_schema.tables WHERE table_schema = 'public' AND table_name = '{table_name}') THEN
78 | EXECUTE 'TRUNCATE TABLE {table_name}';
79 | END IF;
80 | END $$"),
81 | )
82 | .await;
83 | }
84 | }
85 |
--------------------------------------------------------------------------------
/chaindexing-tests/src/tests.rs:
--------------------------------------------------------------------------------
1 | mod ingester;
2 | mod repos;
3 | mod states;
4 |
5 | pub async fn setup() {
6 | states::setup().await;
7 | }
8 |
--------------------------------------------------------------------------------
/chaindexing-tests/src/tests/handlers.rs:
--------------------------------------------------------------------------------
1 | // TODO: Event Handlers Test here
2 |
--------------------------------------------------------------------------------
/chaindexing-tests/src/tests/ingester.rs:
--------------------------------------------------------------------------------
1 | #[cfg(test)]
2 | mod tests {
3 | use std::collections::HashMap;
4 | use std::sync::Arc;
5 | use tokio::sync::Mutex;
6 |
7 | use crate::db::database_url;
8 | use crate::factory::{bayc_contract, empty_provider, BAYC_CONTRACT_START_BLOCK_NUMBER};
9 | use crate::{
10 | find_contract_address_by_contract_name, provider_with_empty_logs,
11 | provider_with_filter_stubber, provider_with_logs, test_runner,
12 | };
13 | use chaindexing::{
14 | ingester, ChainId, ChaindexingRepo, Config, ExecutesWithRawQuery, HasRawQueryClient,
15 | PostgresRepo, Repo,
16 | };
17 |
18 | #[tokio::test]
19 | pub async fn creates_contract_events() {
20 | let pool = test_runner::get_pool().await;
21 |
22 | test_runner::run_test(&pool, |mut conn| async move {
23 | let repo_client = test_runner::new_repo().get_client().await;
24 | let bayc_contract = bayc_contract("BoredApeYachtClub-9", "01");
25 | let config =
26 | Config::new(PostgresRepo::new(&database_url())).add_contract(bayc_contract.clone());
27 |
28 | static CURRENT_BLOCK_NUMBER: u32 = BAYC_CONTRACT_START_BLOCK_NUMBER + 20;
29 | let contract_address = bayc_contract.addresses.first().cloned().unwrap();
30 | let contract_address = &contract_address.address;
31 | let provider = Arc::new(provider_with_logs!(&contract_address, CURRENT_BLOCK_NUMBER));
32 |
33 | assert!(ChaindexingRepo::get_all_events(&mut conn).await.is_empty());
34 | ChaindexingRepo::create_contract_addresses(&repo_client, &bayc_contract.addresses)
35 | .await;
36 |
37 | let conn = Arc::new(Mutex::new(conn));
38 | let repo_client = Arc::new(Mutex::new(repo_client));
39 | ingester::ingest_for_chain(
40 | &ChainId::Mainnet,
41 | provider,
42 | conn.clone(),
43 | &repo_client,
44 | &config,
45 | &mut HashMap::new(),
46 | )
47 | .await
48 | .unwrap();
49 |
50 | let mut conn = conn.lock().await;
51 | let ingested_events = ChaindexingRepo::get_all_events(&mut conn).await;
52 | let first_event = ingested_events.first().unwrap();
53 | assert_eq!(
54 | first_event.contract_address,
55 | contract_address.to_lowercase()
56 | );
57 | })
58 | .await;
59 | }
60 |
61 | #[tokio::test]
62 | pub async fn starts_from_start_block_number() {
63 | let pool = test_runner::get_pool().await;
64 |
65 | test_runner::run_test(&pool, |conn| async move {
66 | let repo_client = test_runner::new_repo().get_client().await;
67 | let bayc_contract = bayc_contract("BoredApeYachtClub-10", "02");
68 | let config =
69 | Config::new(PostgresRepo::new(&database_url())).add_contract(bayc_contract.clone());
70 |
71 | ChaindexingRepo::create_contract_addresses(&repo_client, &bayc_contract.addresses)
72 | .await;
73 | let provider = Arc::new(provider_with_filter_stubber!(
74 | BAYC_CONTRACT_ADDRESS,
75 | |filter: &Filter| {
76 | assert_eq!(
77 | filter.get_from_block().unwrap().as_u32(),
78 | BAYC_CONTRACT_START_BLOCK_NUMBER
79 | );
80 | }
81 | ));
82 |
83 | let conn = Arc::new(Mutex::new(conn));
84 | let repo_client = Arc::new(Mutex::new(repo_client));
85 | ingester::ingest_for_chain(
86 | &ChainId::Mainnet,
87 | provider,
88 | conn.clone(),
89 | &repo_client,
90 | &config,
91 | &mut HashMap::new(),
92 | )
93 | .await
94 | .unwrap();
95 | })
96 | .await;
97 | }
98 |
99 | // Remove ignore after refactoring EventingIngester to no use diesel
100 | // Currently, it fails because we stream contract addresses
101 | // outside the diesel transaction session
102 | #[ignore]
103 | #[tokio::test]
104 | pub async fn updates_next_block_number_to_ingest_from_for_a_given_batch() {
105 | let pool = test_runner::get_pool().await;
106 |
107 | test_runner::run_test(&pool, |conn| async move {
108 | let repo_client = test_runner::new_repo().get_client().await;
109 | let bayc_contract = bayc_contract("BoredApeYachtClub-8", "03");
110 | let config =
111 | Config::new(PostgresRepo::new(&database_url())).add_contract(bayc_contract.clone());
112 |
113 | static CURRENT_BLOCK_NUMBER: u32 = BAYC_CONTRACT_START_BLOCK_NUMBER + 20;
114 | let contract_address = bayc_contract.addresses.first().cloned().unwrap();
115 | let contract_address = &contract_address.address;
116 | let provider = Arc::new(provider_with_logs!(contract_address, CURRENT_BLOCK_NUMBER));
117 |
118 | ChaindexingRepo::create_contract_addresses(&repo_client, &bayc_contract.addresses)
119 | .await;
120 |
121 | let conn = Arc::new(Mutex::new(conn));
122 | let blocks_per_batch = 10;
123 |
124 | let repo_client = Arc::new(Mutex::new(repo_client));
125 | let config = config.with_blocks_per_batch(blocks_per_batch);
126 | ingester::ingest_for_chain(
127 | &ChainId::Mainnet,
128 | provider,
129 | conn.clone(),
130 | &repo_client,
131 | &config,
132 | &mut HashMap::new(),
133 | )
134 | .await
135 | .unwrap();
136 |
137 | let bayc_contract_address = find_contract_address_by_contract_name(
138 | &repo_client,
139 | "BoredApeYachtClub-8",
140 | &ChainId::Mainnet,
141 | )
142 | .await
143 | .unwrap();
144 | let next_block_number_to_ingest_from =
145 | bayc_contract_address.next_block_number_to_ingest_from as u64;
146 | assert_eq!(
147 | next_block_number_to_ingest_from,
148 | BAYC_CONTRACT_START_BLOCK_NUMBER as u64 + blocks_per_batch + 1
149 | );
150 | })
151 | .await;
152 | }
153 |
154 | // TODO:
155 | #[tokio::test]
156 | pub async fn continues_from_next_block_number_to_ingest_from() {}
157 |
158 | #[tokio::test]
159 | pub async fn does_nothing_when_there_are_no_contracts() {
160 | let pool = test_runner::get_pool().await;
161 |
162 | test_runner::run_test(&pool, |conn| async move {
163 | let repo_client = test_runner::new_repo().get_client().await;
164 | let config: Config<()> = Config::new(PostgresRepo::new(&database_url()));
165 |
166 | let provider = Arc::new(empty_provider());
167 | let conn = Arc::new(Mutex::new(conn));
168 | let repo_client = Arc::new(Mutex::new(repo_client));
169 |
170 | ingester::ingest_for_chain(
171 | &ChainId::Mainnet,
172 | provider,
173 | conn.clone(),
174 | &repo_client,
175 | &config,
176 | &mut HashMap::new(),
177 | )
178 | .await
179 | .unwrap();
180 | let mut conn = conn.lock().await;
181 | assert!(ChaindexingRepo::get_all_events(&mut conn).await.is_empty());
182 | })
183 | .await;
184 | }
185 |
186 | #[tokio::test]
187 | pub async fn does_nothing_when_there_are_no_events_from_contracts() {
188 | let pool = test_runner::get_pool().await;
189 |
190 | test_runner::run_test(&pool, |conn| async move {
191 | let repo_client = test_runner::new_repo().get_client().await;
192 | let bayc_contract = bayc_contract("BoredApeYachtClub-11", "04");
193 | let config =
194 | Config::new(PostgresRepo::new(&database_url())).add_contract(bayc_contract.clone());
195 |
196 | let provider = Arc::new(provider_with_empty_logs!(BAYC_CONTRACT_ADDRESS));
197 |
198 | ChaindexingRepo::create_contract_addresses(&repo_client, &bayc_contract.addresses)
199 | .await;
200 |
201 | let conn = Arc::new(Mutex::new(conn));
202 | let repo_client = Arc::new(Mutex::new(repo_client));
203 | ingester::ingest_for_chain(
204 | &ChainId::Mainnet,
205 | provider,
206 | conn.clone(),
207 | &repo_client,
208 | &config,
209 | &mut HashMap::new(),
210 | )
211 | .await
212 | .unwrap();
213 | })
214 | .await;
215 | }
216 | }
217 |
--------------------------------------------------------------------------------
/chaindexing-tests/src/tests/repos.rs:
--------------------------------------------------------------------------------
1 | mod postgres_repo;
2 |
--------------------------------------------------------------------------------
/chaindexing-tests/src/tests/repos/postgres_repo.rs:
--------------------------------------------------------------------------------
1 | #[cfg(test)]
2 | mod create_contract_addresses {
3 | use std::sync::Arc;
4 |
5 | use chaindexing::{ChainId, ChaindexingRepo, ExecutesWithRawQuery, UnsavedContractAddress};
6 |
7 | use tokio::sync::Mutex;
8 |
9 | use crate::{find_contract_address_by_contract_name, test_runner};
10 |
11 | #[tokio::test]
12 | pub async fn creates_contract_addresses() {
13 | test_runner::run_test_new(|repo_client| async move {
14 | let contract_name = "contract-name-1";
15 | let contract_address_value = "0x8a90CAb2b38dba80c64b7734e58Ee1dB38B8993e";
16 | let chain_id = ChainId::Arbitrum;
17 | let start_block_number = 0;
18 |
19 | let contract_addresses = vec![UnsavedContractAddress::new(
20 | contract_name,
21 | contract_address_value,
22 | &chain_id,
23 | start_block_number,
24 | )];
25 | ChaindexingRepo::create_contract_addresses(&repo_client, &contract_addresses).await;
26 |
27 | let repo_client = Arc::new(Mutex::new(repo_client));
28 | let contract_address =
29 | find_contract_address_by_contract_name(&repo_client, contract_name, &chain_id)
30 | .await;
31 |
32 | assert!(contract_address.is_some());
33 |
34 | let contract_address = contract_address.unwrap();
35 | assert_eq!(
36 | contract_address.address,
37 | contract_address_value.to_lowercase()
38 | );
39 | assert_eq!(
40 | contract_address.start_block_number,
41 | start_block_number as i64
42 | );
43 | })
44 | .await;
45 | }
46 |
47 | #[tokio::test]
48 | pub async fn sets_next_block_numbers() {
49 | test_runner::run_test_new(|repo_client| async move {
50 | let contract_name = "contract-name-20";
51 | let contract_address_value = "0x8a90CAb2b38dba80c64b7734e58Ee1dB38B8942e";
52 | let chain_id = ChainId::Arbitrum;
53 | let start_block_number = 30;
54 |
55 | let contract_addresses = vec![UnsavedContractAddress::new(
56 | contract_name,
57 | contract_address_value,
58 | &chain_id,
59 | start_block_number,
60 | )];
61 | ChaindexingRepo::create_contract_addresses(&repo_client, &contract_addresses).await;
62 |
63 | let repo_client = Arc::new(Mutex::new(repo_client));
64 | let contract_address =
65 | find_contract_address_by_contract_name(&repo_client, contract_name, &chain_id)
66 | .await
67 | .unwrap();
68 |
69 | assert_eq!(
70 | contract_address.next_block_number_to_ingest_from,
71 | start_block_number as i64
72 | );
73 | assert_eq!(
74 | contract_address.next_block_number_to_handle_from,
75 | start_block_number as i64
76 | );
77 | assert_eq!(contract_address.next_block_number_for_side_effects, 0);
78 | })
79 | .await;
80 | }
81 |
82 | #[tokio::test]
83 | pub async fn does_not_overwrite_contract_name_of_contract_addresses() {
84 | test_runner::run_test_new(|repo_client| async move {
85 | let chain_id = &ChainId::Arbitrum;
86 | let initial_contract_address = UnsavedContractAddress::new(
87 | "initial-contract-name-3",
88 | "0x8a90CAb2b38dba80c64b7734e58Ee1dB38B8992e",
89 | chain_id,
90 | 0,
91 | );
92 |
93 | let contract_addresses = vec![initial_contract_address];
94 | ChaindexingRepo::create_contract_addresses(&repo_client, &contract_addresses).await;
95 |
96 | let updated_contract_address = UnsavedContractAddress::new(
97 | "updated-contract-name-3",
98 | "0x8a90CAb2b38dba80c64b7734e58Ee1dB38B8992e",
99 | chain_id,
100 | 0,
101 | );
102 | let contract_addresses = vec![updated_contract_address];
103 |
104 | ChaindexingRepo::create_contract_addresses(&repo_client, &contract_addresses).await;
105 |
106 | let repo_client = Arc::new(Mutex::new(repo_client));
107 |
108 | assert!(find_contract_address_by_contract_name(
109 | &repo_client,
110 | "initial-contract-name-3",
111 | chain_id
112 | )
113 | .await
114 | .is_some());
115 |
116 | assert!(find_contract_address_by_contract_name(
117 | &repo_client,
118 | "updated-contract-name-3",
119 | chain_id
120 | )
121 | .await
122 | .is_none());
123 | })
124 | .await;
125 | }
126 |
127 | #[tokio::test]
128 | pub async fn does_not_update_any_block_number() {
129 | test_runner::run_test_new(|repo_client| async move {
130 | let initial_start_block_number = 400;
131 |
132 | let chain_id = &ChainId::Arbitrum;
133 | let initial_contract_address = UnsavedContractAddress::new(
134 | "contract-name-4",
135 | "0x8a90CAb2b38dba80c64b7734e58Ee1dB38B8192e",
136 | chain_id,
137 | initial_start_block_number,
138 | );
139 |
140 | let contract_addresses = vec![initial_contract_address];
141 | ChaindexingRepo::create_contract_addresses(&repo_client, &contract_addresses).await;
142 |
143 | let updated_contract_address_start_block_number = 2000;
144 | let updated_contract_address = UnsavedContractAddress::new(
145 | "contract-name-4",
146 | "0x8a90CAb2b38dba80c64b7734e58Ee1dB38B8192e",
147 | chain_id,
148 | updated_contract_address_start_block_number,
149 | );
150 | let contract_addresses = vec![updated_contract_address];
151 |
152 | ChaindexingRepo::create_contract_addresses(&repo_client, &contract_addresses).await;
153 |
154 | let repo_client = Arc::new(Mutex::new(repo_client));
155 |
156 | let contract_address =
157 | find_contract_address_by_contract_name(&repo_client, "contract-name-4", chain_id)
158 | .await
159 | .unwrap();
160 |
161 | assert_eq!(
162 | contract_address.start_block_number as u64,
163 | initial_start_block_number
164 | );
165 | assert_eq!(
166 | contract_address.next_block_number_to_handle_from as u64,
167 | initial_start_block_number
168 | );
169 | assert_eq!(
170 | contract_address.next_block_number_to_ingest_from as u64,
171 | initial_start_block_number
172 | );
173 | assert_eq!(
174 | contract_address.next_block_number_for_side_effects as u64,
175 | 0
176 | );
177 | })
178 | .await;
179 | }
180 | }
181 |
--------------------------------------------------------------------------------
/chaindexing-tests/src/tests/states.rs:
--------------------------------------------------------------------------------
1 | #[cfg(test)]
2 | mod tests {
3 | use std::sync::Arc;
4 |
5 | use chaindexing::deferred_futures::DeferredFutures;
6 | use chaindexing::states::{Filters, Updates};
7 | use chaindexing::{ChaindexingRepo, EventContext, HasRawQueryClient};
8 | use tokio::sync::Mutex;
9 |
10 | use super::*;
11 | use crate::factory::{bayc_contract, transfer_event_with_contract};
12 | use crate::test_runner;
13 |
14 | #[tokio::test]
15 | pub async fn creates_state() {
16 | let bayc_contract =
17 | bayc_contract("BoredApeYachtClub-1", "09").add_state_migrations(NftMigrations);
18 | let mut repo_client = test_runner::new_repo().get_client().await;
19 | let repo_txn_client = ChaindexingRepo::get_txn_client(&mut repo_client).await;
20 | let event_context: EventContext<'_, '_> = EventContext::new(
21 | &transfer_event_with_contract(bayc_contract),
22 | &repo_txn_client,
23 | &Arc::new(Mutex::new(test_runner::new_repo().get_client().await)),
24 | &DeferredFutures::new(),
25 | );
26 |
27 | let new_state = Nft { token_id: 2 };
28 |
29 | new_state.create(&event_context).await;
30 |
31 | let returned_state =
32 | Nft::read_one(&Filters::new("token_id", 2), &event_context).await.unwrap();
33 |
34 | assert_eq!(new_state, returned_state);
35 | }
36 |
37 | #[tokio::test]
38 | pub async fn updates_state() {
39 | let bayc_contract =
40 | bayc_contract("BoredApeYachtClub-2", "07").add_state_migrations(NftMigrations);
41 | let mut repo_client = test_runner::new_repo().get_client().await;
42 | let repo_txn_client = ChaindexingRepo::get_txn_client(&mut repo_client).await;
43 | let event_context: EventContext<'_, '_> = EventContext::new(
44 | &transfer_event_with_contract(bayc_contract),
45 | &repo_txn_client,
46 | &Arc::new(Mutex::new(test_runner::new_repo().get_client().await)),
47 | &DeferredFutures::new(),
48 | );
49 |
50 | let new_state = Nft { token_id: 1 };
51 | new_state.create(&event_context).await;
52 | new_state.update(&Updates::new("token_id", 4), &event_context).await;
53 |
54 | let initial_state = Nft::read_one(&Filters::new("token_id", 1), &event_context).await;
55 | assert_eq!(initial_state, None);
56 |
57 | let updated_state = Nft::read_one(&Filters::new("token_id", 4), &event_context).await;
58 | assert!(updated_state.is_some());
59 | }
60 |
61 | #[tokio::test]
62 | pub async fn deletes_state() {
63 | let bayc_contract =
64 | bayc_contract("BoredApeYachtClub-3", "05").add_state_migrations(NftMigrations);
65 | let mut repo_client = test_runner::new_repo().get_client().await;
66 | let repo_txn_client = ChaindexingRepo::get_txn_client(&mut repo_client).await;
67 | let event_context: EventContext<'_, '_> = EventContext::new(
68 | &transfer_event_with_contract(bayc_contract),
69 | &repo_txn_client,
70 | &Arc::new(Mutex::new(test_runner::new_repo().get_client().await)),
71 | &DeferredFutures::new(),
72 | );
73 |
74 | let new_state = Nft { token_id: 9 };
75 | new_state.create(&event_context).await;
76 | new_state.delete(&event_context).await;
77 |
78 | let state = Nft::read_one(&Filters::new("token_id", 9), &event_context).await;
79 | assert_eq!(state, None);
80 | }
81 | }
82 |
83 | use chaindexing::augmenting_std::serde::{Deserialize, Serialize};
84 | use chaindexing::{
85 | states::{ContractState, StateMigrations},
86 | HasRawQueryClient,
87 | };
88 |
89 | use crate::{factory::bayc_contract, test_runner};
90 |
91 | #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
92 | #[serde(crate = "chaindexing::augmenting_std::serde")]
93 | struct Nft {
94 | token_id: i32,
95 | }
96 | impl ContractState for Nft {
97 | fn table_name() -> &'static str {
98 | "nfts"
99 | }
100 | }
101 | struct NftMigrations;
102 | impl StateMigrations for NftMigrations {
103 | fn migrations(&self) -> &'static [&'static str] {
104 | &["CREATE TABLE IF NOT EXISTS nfts (
105 | token_id INTEGER NOT NULL)"]
106 | }
107 | }
108 |
109 | pub async fn setup() {
110 | let bayc_contract =
111 | bayc_contract("BoredApeYachtClub", "06").add_state_migrations(NftMigrations);
112 | let repo_client = test_runner::new_repo().get_client().await;
113 | chaindexing::booting::run_user_migrations(&repo_client, &[bayc_contract]).await;
114 | }
115 |
--------------------------------------------------------------------------------
/chaindexing/Cargo.toml:
--------------------------------------------------------------------------------
1 | [package]
2 | name = "chaindexing"
3 | version = "0.1.77"
4 | authors = ["Chaindexing Contributors "]
5 | description = "Index any EVM chain and query in SQL"
6 | keywords = ["index", "multi-chain", "ethereum", "evm", "web3"]
7 | categories = ["encoding", "cryptography::cryptocurrencies", "asynchronous", "concurrency"]
8 | edition = "2021"
9 | license = "MIT OR Apache-2.0"
10 | readme="../README.md"
11 | repository = "https://github.com/chaindexing/chaindexing-rs"
12 |
13 | [features]
14 | default = ["postgres"]
15 | postgres = ["tokio-postgres"]
16 |
17 | [dependencies]
18 | async-trait = "0.1"
19 | bb8 = "0.8"
20 | derive_more = "0.99"
21 | chrono = { version = "0.4", features = ["serde"] }
22 | diesel = { version = "2", features = ["postgres", "uuid", "chrono", "serde_json"] }
23 | diesel-async = { version = "0.4", features = ["bb8", "postgres"] }
24 | pin-project-lite = "0.2.14"
25 | ethers = "2.0"
26 | serde = { version = "1.0", features = ["derive"] }
27 | serde_json = "1"
28 | tokio-postgres = { version = "0.7", features = ["with-serde_json-1"], optional = true }
29 | tokio = { version = "1", features = ["full"] }
30 | uuid = { version = "1", features = ["v4", "serde"] }
31 | futures-core = { version = "0.3", features = ["alloc"] }
32 | futures-util = "0.3"
33 |
34 |
--------------------------------------------------------------------------------
/chaindexing/src/augmenting_std.rs:
--------------------------------------------------------------------------------
1 | #[doc(hidden)]
2 | pub use async_trait::async_trait;
3 |
4 | #[doc(hidden)]
5 | pub use serde;
6 |
--------------------------------------------------------------------------------
/chaindexing/src/booting.rs:
--------------------------------------------------------------------------------
1 | use crate::{
2 | contracts, root, ChaindexingError, ChaindexingRepo, ChaindexingRepoClient, Config, Contract,
3 | ExecutesWithRawQuery, LoadsDataWithRawQuery, Migratable, RepoMigrations,
4 | };
5 |
6 | pub async fn setup_nodes(
7 | config: &Config,
8 | client: &ChaindexingRepoClient,
9 | ) {
10 | ChaindexingRepo::migrate(client, ChaindexingRepo::create_nodes_migration().to_vec()).await;
11 | ChaindexingRepo::prune_nodes(client, config.max_concurrent_node_count).await;
12 | }
13 |
14 | pub async fn setup<'a, S: Sync + Send + Clone>(
15 | Config {
16 | contracts,
17 | reset_count,
18 | reset_including_side_effects_count,
19 | reset_queries,
20 | ..
21 | }: &Config,
22 | client: &ChaindexingRepoClient,
23 | ) -> Result<(), ChaindexingError> {
24 | setup_root(client).await;
25 |
26 | maybe_reset(
27 | *reset_count,
28 | *reset_including_side_effects_count,
29 | reset_queries,
30 | contracts,
31 | client,
32 | )
33 | .await;
34 |
35 | run_internal_migrations(client).await;
36 | run_user_migrations(client, contracts).await;
37 |
38 | let contract_addresses: Vec<_> =
39 | contracts.clone().into_iter().flat_map(|c| c.addresses).collect();
40 | ChaindexingRepo::create_contract_addresses(client, &contract_addresses).await;
41 |
42 | Ok(())
43 | }
44 |
45 | /// Root migrations are immutable and should never really be dropped
46 | pub async fn setup_root(client: &ChaindexingRepoClient) {
47 | ChaindexingRepo::migrate(
48 | client,
49 | ChaindexingRepo::create_root_states_migration().to_vec(),
50 | )
51 | .await;
52 |
53 | ChaindexingRepo::migrate(
54 | client,
55 | ChaindexingRepo::create_contract_addresses_migration().to_vec(),
56 | )
57 | .await;
58 |
59 | if ChaindexingRepo::load_last_root_state(client).await.is_none() {
60 | ChaindexingRepo::append_root_state(client, &Default::default()).await;
61 | }
62 |
63 | ChaindexingRepo::prune_root_states(client, root::states::MAX_COUNT).await;
64 | }
65 |
66 | async fn maybe_reset(
67 | reset_count: u64,
68 | reset_including_side_effects_count: u64,
69 | reset_queries: &Vec,
70 | contracts: &[Contract],
71 | client: &ChaindexingRepoClient,
72 | ) {
73 | let mut root_state = ChaindexingRepo::load_last_root_state(client).await.unwrap();
74 |
75 | let should_reset_normally = reset_count > root_state.reset_count;
76 | let should_reset_including_side_effects =
77 | reset_including_side_effects_count > root_state.reset_including_side_effects_count;
78 |
79 | if should_reset_normally {
80 | reset(reset_queries, contracts, client).await;
81 |
82 | root_state.update_reset_count(reset_count);
83 | }
84 |
85 | if should_reset_including_side_effects {
86 | reset(reset_queries, contracts, client).await;
87 |
88 | ChaindexingRepo::migrate(
89 | client,
90 | ChaindexingRepo::zero_next_block_number_for_side_effects_migration().to_vec(),
91 | )
92 | .await;
93 |
94 | root_state.update_reset_including_side_effects_count(reset_including_side_effects_count);
95 | }
96 |
97 | let reset_happened = should_reset_normally || should_reset_including_side_effects;
98 | if reset_happened {
99 | ChaindexingRepo::append_root_state(client, &root_state).await;
100 | }
101 | }
102 |
103 | async fn reset(
104 | reset_queries: &Vec,
105 | contracts: &[Contract],
106 | client: &ChaindexingRepoClient,
107 | ) {
108 | reset_internal_migrations(client).await;
109 | reset_user_migrations(client, contracts).await;
110 | run_user_reset_queries(client, reset_queries).await;
111 | }
112 |
113 | pub async fn run_internal_migrations(client: &ChaindexingRepoClient) {
114 | ChaindexingRepo::migrate(client, ChaindexingRepo::get_internal_migrations()).await;
115 | }
116 | async fn reset_internal_migrations(client: &ChaindexingRepoClient) {
117 | ChaindexingRepo::migrate(client, ChaindexingRepo::get_reset_internal_migrations()).await;
118 | }
119 |
120 | pub async fn run_user_migrations(
121 | client: &ChaindexingRepoClient,
122 | contracts: &[Contract],
123 | ) {
124 | for state_migration in contracts::get_state_migrations(contracts) {
125 | ChaindexingRepo::migrate(client, state_migration.get_migrations()).await;
126 | }
127 | }
128 | async fn reset_user_migrations(
129 | client: &ChaindexingRepoClient,
130 | contracts: &[Contract],
131 | ) {
132 | for state_migration in contracts::get_state_migrations(contracts) {
133 | ChaindexingRepo::migrate(client, state_migration.get_reset_migrations()).await;
134 | }
135 | }
136 | async fn run_user_reset_queries(client: &ChaindexingRepoClient, reset_queries: &Vec) {
137 | for reset_query in reset_queries {
138 | ChaindexingRepo::execute(client, reset_query).await;
139 | }
140 | }
141 |
--------------------------------------------------------------------------------
/chaindexing/src/chain_reorg.rs:
--------------------------------------------------------------------------------
1 | use std::cmp::max;
2 | use std::collections::HashMap;
3 |
4 | use crate::diesel::schema::chaindexing_reorged_blocks;
5 | use crate::ChainId;
6 | use diesel::prelude::Insertable;
7 | use serde::Deserialize;
8 |
9 | /// Tolerance for chain re-organization
10 | #[derive(Clone, Debug)]
11 | pub struct MinConfirmationCount {
12 | value: u8,
13 | }
14 |
15 | impl MinConfirmationCount {
16 | pub fn new(value: u8) -> Self {
17 | Self { value }
18 | }
19 |
20 | pub fn deduct_from(&self, block_number: u64, start_block_number: u64) -> u64 {
21 | let deduction = max(0, (block_number as i64) - (self.value as i64));
22 |
23 | max(start_block_number, deduction as u64)
24 | }
25 |
26 | pub fn is_in_confirmation_window(
27 | &self,
28 | next_block_number: u64,
29 | current_block_number: u64,
30 | ) -> bool {
31 | if self.value as u64 >= current_block_number {
32 | false
33 | } else {
34 | next_block_number >= current_block_number - (self.value as u64)
35 | }
36 | }
37 | }
38 |
39 | #[derive(Clone)]
40 | pub enum Execution<'a> {
41 | Main,
42 | Confirmation(&'a MinConfirmationCount),
43 | }
44 |
45 | #[derive(Debug, Clone, PartialEq, Eq, Hash, Deserialize)]
46 | pub struct ReorgedBlock {
47 | pub id: i32,
48 | pub block_number: i64,
49 | pub chain_id: i64,
50 | handled_at: Option,
51 | }
52 |
53 | #[derive(Debug, Clone, Insertable)]
54 | #[diesel(table_name = chaindexing_reorged_blocks)]
55 | pub struct UnsavedReorgedBlock {
56 | pub block_number: i64,
57 | pub chain_id: i64,
58 | }
59 |
60 | impl UnsavedReorgedBlock {
61 | pub fn new(block_number: i64, chain_id: &ChainId) -> Self {
62 | Self {
63 | block_number,
64 | chain_id: *chain_id as i64,
65 | }
66 | }
67 | }
68 |
69 | pub struct ReorgedBlocks;
70 |
71 | impl ReorgedBlocks {
72 | pub fn only_earliest_per_chain(reorged_blocks: &[ReorgedBlock]) -> Vec<&ReorgedBlock> {
73 | reorged_blocks
74 | .iter()
75 | .fold(
76 | HashMap::::new(),
77 | |mut reorged_blocks_by_chain, reorged_block| {
78 | let ReorgedBlock { chain_id, .. } = reorged_block;
79 |
80 | if let Some(earliest_reorged_block) = reorged_blocks_by_chain.get(chain_id) {
81 | if reorged_block.block_number < earliest_reorged_block.block_number {
82 | reorged_blocks_by_chain.insert(*chain_id, reorged_block);
83 | }
84 | } else {
85 | reorged_blocks_by_chain.insert(reorged_block.chain_id, reorged_block);
86 | }
87 |
88 | reorged_blocks_by_chain
89 | },
90 | )
91 | .into_values()
92 | .collect()
93 | }
94 |
95 | pub fn get_ids<'a>(reorged_blocks: &'a [&'a ReorgedBlock]) -> Vec {
96 | reorged_blocks.iter().map(|r| r.id).collect()
97 | }
98 | }
99 |
--------------------------------------------------------------------------------
/chaindexing/src/chains.rs:
--------------------------------------------------------------------------------
1 | /// Represents the network ID for an EVM Chain
2 | /// For example, `ChainId::Mainnet`, `ChainId::Polygon`, etc.
3 | pub type ChainId = ethers::types::Chain;
4 |
5 | /// Represents an EVM chain network
6 | #[derive(Clone, Debug)]
7 | pub struct Chain {
8 | pub id: ChainId,
9 | pub json_rpc_url: String,
10 | }
11 |
12 | impl Chain {
13 | /// Builds the chain network
14 | ///
15 | ///
16 | /// # Example
17 | /// ```
18 | /// use chaindexing::{Chain, ChainId};
19 | ///
20 | /// Chain::new(ChainId::Polygon, "https://polygon-mainnet.g.alchemy.com/v2/...");
21 | /// ```
22 | pub fn new(id: ChainId, json_rpc_url: &str) -> Self {
23 | Self {
24 | id,
25 | json_rpc_url: json_rpc_url.to_string(),
26 | }
27 | }
28 | }
29 |
--------------------------------------------------------------------------------
/chaindexing/src/config.rs:
--------------------------------------------------------------------------------
1 | use std::sync::Arc;
2 |
3 | use tokio::sync::Mutex;
4 |
5 | use crate::chain_reorg::MinConfirmationCount;
6 | use crate::chains::Chain;
7 | use crate::nodes::{self, NodeHeartbeat};
8 | use crate::pruning::PruningConfig;
9 | use crate::{ChaindexingRepo, Contract};
10 |
11 | pub enum ConfigError {
12 | NoContract,
13 | NoChain,
14 | }
15 |
16 | impl std::fmt::Debug for ConfigError {
17 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
18 | match self {
19 | ConfigError::NoContract => {
20 | write!(f, "At least one contract is required")
21 | }
22 | ConfigError::NoChain => {
23 | write!(f, "At least one chain is required")
24 | }
25 | }
26 | }
27 | }
28 |
29 | /// Used to configure managing a chaindexing's node heartbeat
30 | /// to cut down JSON-RPC's (Alchemy, Infura, etc.) cost.
31 | #[derive(Clone, Debug)]
32 | pub struct OptimizationConfig {
33 | pub(crate) node_heartbeat: NodeHeartbeat,
34 | pub(crate) start_after_in_secs: u64,
35 | }
36 |
37 | impl OptimizationConfig {
38 | /// Optimization starts after the seconds specified here.
39 | /// This is the typically the estimated time to complete initial indexing
40 | /// i.e. the estimated time in seconds for chaindexing to reach
41 | /// the current block for all chains being indexed.
42 | pub fn new(node_heartbeat: &NodeHeartbeat, start_after_in_secs: u64) -> Self {
43 | Self {
44 | node_heartbeat: node_heartbeat.clone(),
45 | start_after_in_secs,
46 | }
47 | }
48 | }
49 |
50 | /// Configuration for indexing states
51 | #[derive(Clone, Debug)]
52 | pub struct Config {
53 | pub chains: Vec,
54 | pub repo: ChaindexingRepo,
55 | pub contracts: Vec>,
56 | pub(crate) min_confirmation_count: MinConfirmationCount,
57 | pub blocks_per_batch: u64,
58 | pub handler_rate_ms: u64,
59 | pub ingestion_rate_ms: u64,
60 | pub chain_concurrency: u32,
61 | node_election_rate_ms: Option,
62 | pub reset_count: u64,
63 | pub(crate) reset_including_side_effects_count: u64,
64 | pub reset_queries: Vec,
65 | pub shared_state: Option>>,
66 | pub max_concurrent_node_count: u16,
67 | pub optimization_config: Option,
68 | pub(crate) pruning_config: Option,
69 | }
70 |
71 | impl Config {
72 | pub fn new(repo: ChaindexingRepo) -> Self {
73 | Self {
74 | repo,
75 | chains: vec![],
76 | contracts: vec![],
77 | min_confirmation_count: MinConfirmationCount::new(40),
78 | blocks_per_batch: 8_000,
79 | handler_rate_ms: 4_000,
80 | ingestion_rate_ms: 20_000,
81 | chain_concurrency: 4,
82 | node_election_rate_ms: None,
83 | reset_count: 0,
84 | reset_including_side_effects_count: 0,
85 | reset_queries: vec![],
86 | shared_state: None,
87 | max_concurrent_node_count: nodes::DEFAULT_MAX_CONCURRENT_NODE_COUNT,
88 | optimization_config: None,
89 | pruning_config: None,
90 | }
91 | }
92 |
93 | // Includes chain in config
94 | pub fn add_chain(mut self, chain: Chain) -> Self {
95 | self.chains.push(chain);
96 |
97 | self
98 | }
99 |
100 | // Includes contract in config
101 | pub fn add_contract(mut self, contract: Contract) -> Self {
102 | self.contracts.push(contract);
103 |
104 | self
105 | }
106 |
107 | /// Allows managing derived app states (derived from indexed states)
108 | pub fn add_reset_query(mut self, reset_query: &str) -> Self {
109 | self.reset_queries.push(reset_query.to_string());
110 |
111 | self
112 | }
113 |
114 | /// Restarts indexing from scratch for EventHandlers. SideEffectHandlers
115 | /// will not run if they ran already
116 | pub fn reset(mut self, count: u64) -> Self {
117 | self.reset_count = count;
118 |
119 | self
120 | }
121 |
122 | /// Restarts indexing from scratch for all Handlers. SideEffectHandlers
123 | /// will RUN even if they ran already
124 | pub fn reset_including_side_effects_dangerously(mut self, count: u64) -> Self {
125 | self.reset_including_side_effects_count = count;
126 |
127 | self
128 | }
129 |
130 | /// Defines the initial state for side effect handlers
131 | pub fn with_initial_state(mut self, initial_state: SharedState) -> Self {
132 | self.shared_state = Some(Arc::new(Mutex::new(initial_state)));
133 |
134 | self
135 | }
136 |
137 | /// The minimum confirmation count for detecting chain-reorganizations or uncled blocks
138 | pub fn with_min_confirmation_count(mut self, min_confirmation_count: u8) -> Self {
139 | self.min_confirmation_count = MinConfirmationCount::new(min_confirmation_count);
140 |
141 | self
142 | }
143 |
144 | /// Advance config: How many blocks per batch should be ingested and handled.
145 | /// Default is 8_000
146 | pub fn with_blocks_per_batch(mut self, blocks_per_batch: u64) -> Self {
147 | self.blocks_per_batch = blocks_per_batch;
148 |
149 | self
150 | }
151 |
152 | /// Advance config: How often should the events handlers processes run.
153 | /// Default is 4_000
154 | pub fn with_handler_rate_ms(mut self, handler_rate_ms: u64) -> Self {
155 | self.handler_rate_ms = handler_rate_ms;
156 |
157 | self
158 | }
159 |
160 | /// Advance config: How often should the events ingester processes run.
161 | /// Default is 20_000
162 | pub fn with_ingestion_rate_ms(mut self, ingestion_rate_ms: u64) -> Self {
163 | self.ingestion_rate_ms = ingestion_rate_ms;
164 |
165 | self
166 | }
167 |
168 | /// Configures number of chain batches to be processed concurrently
169 | pub fn with_chain_concurrency(mut self, chain_concurrency: u32) -> Self {
170 | self.chain_concurrency = chain_concurrency;
171 |
172 | self
173 | }
174 |
175 | pub fn with_node_election_rate_ms(mut self, node_election_rate_ms: u64) -> Self {
176 | self.node_election_rate_ms = Some(node_election_rate_ms);
177 |
178 | self
179 | }
180 |
181 | pub fn with_max_concurrent_node_count(mut self, max_concurrent_node_count: u16) -> Self {
182 | self.max_concurrent_node_count = max_concurrent_node_count;
183 |
184 | self
185 | }
186 |
187 | /// Deletes stale events and related-internal data
188 | pub fn with_pruning(mut self) -> Self {
189 | self.pruning_config = Some(Default::default());
190 |
191 | self
192 | }
193 |
194 | pub fn with_prune_n_blocks_away(mut self, prune_n_blocks_away: u64) -> Self {
195 | self.pruning_config = Some(PruningConfig {
196 | prune_n_blocks_away,
197 | ..self.pruning_config.unwrap_or_default()
198 | });
199 |
200 | self
201 | }
202 |
203 | pub fn with_prune_interval(mut self, prune_interval: u64) -> Self {
204 | self.pruning_config = Some(PruningConfig {
205 | prune_interval,
206 | ..self.pruning_config.unwrap_or_default()
207 | });
208 |
209 | self
210 | }
211 |
212 | /// This enables optimization for indexing with the CAVEAT that you have to
213 | /// manually keep chaindexing alive e.g. when a user enters certain pages
214 | /// in your DApp
215 | pub fn enable_optimization(mut self, optimization_config: &OptimizationConfig) -> Self {
216 | self.optimization_config = Some(optimization_config.clone());
217 |
218 | self
219 | }
220 | pub fn is_optimization_enabled(&self) -> bool {
221 | self.optimization_config.is_some()
222 | }
223 |
224 | pub(super) fn get_node_election_rate_ms(&self) -> u64 {
225 | self.node_election_rate_ms.unwrap_or(self.ingestion_rate_ms)
226 | }
227 |
228 | pub(super) fn validate(&self) -> Result<(), ConfigError> {
229 | if self.contracts.is_empty() {
230 | Err(ConfigError::NoContract)
231 | } else if self.chains.is_empty() {
232 | Err(ConfigError::NoChain)
233 | } else {
234 | Ok(())
235 | }
236 | }
237 | }
238 |
--------------------------------------------------------------------------------
/chaindexing/src/contracts.rs:
--------------------------------------------------------------------------------
1 | use std::fmt::Debug;
2 | use std::{collections::HashMap, str::FromStr, sync::Arc};
3 |
4 | use crate::diesel::schema::chaindexing_contract_addresses;
5 | use crate::handlers::PureHandler;
6 | use crate::states::StateMigrations;
7 | use crate::ChainId;
8 | use crate::{EventHandler, SideEffectHandler};
9 | use diesel::{Identifiable, Insertable, Queryable};
10 |
11 | use ethers::types::U64;
12 | use ethers::{
13 | abi::{Address, Event, HumanReadableParser},
14 | types::H256,
15 | };
16 | use serde::Deserialize;
17 |
18 | pub type ContractEventTopic = H256;
19 |
20 | #[derive(Debug, Clone)]
21 | pub struct ContractEvent {
22 | pub abi: String,
23 | pub value: Event,
24 | }
25 |
26 | impl ContractEvent {
27 | pub fn new(abi: &str) -> Self {
28 | Self {
29 | abi: abi.to_string(),
30 | value: HumanReadableParser::parse_event(abi).unwrap(),
31 | }
32 | }
33 | }
34 |
35 | /// Human Readable ABI defined for ingesting events.
36 | /// For example, `event Transfer(address indexed from, address indexed to, uint256 indexed tokenId)`
37 | pub type EventAbi = &'static str;
38 |
39 | /// Represents the template/specification/interface for a given contract.
40 | #[derive(Clone)]
41 | pub struct Contract {
42 | pub addresses: Vec,
43 | pub name: String,
44 | pub pure_handlers: HashMap>,
45 | pub side_effect_handlers: HashMap>>,
46 | pub state_migrations: Vec>,
47 | }
48 |
49 | impl Contract {
50 | /// Builds the contract's template/spec/interface.
51 | ///
52 | ///
53 | /// # Example
54 | /// ```
55 | /// use chaindexing::Contract;
56 | ///
57 | /// Contract::<()>::new("ERC20");
58 | /// ```
59 | pub fn new(name: &str) -> Self {
60 | Self {
61 | addresses: vec![],
62 | state_migrations: vec![],
63 | name: name.to_string(),
64 | pure_handlers: HashMap::new(),
65 | side_effect_handlers: HashMap::new(),
66 | }
67 | }
68 |
69 | /// Adds a contract address to a contract
70 | pub fn add_address(
71 | mut self,
72 | address: &str,
73 | chain_id: &ChainId,
74 | start_block_number: u64,
75 | ) -> Self {
76 | self.addresses.push(UnsavedContractAddress::new(
77 | &self.name,
78 | address,
79 | chain_id,
80 | start_block_number,
81 | ));
82 |
83 | self
84 | }
85 |
86 | /// Adds an event handler
87 | pub fn add_event_handler(mut self, handler: impl EventHandler + 'static) -> Self {
88 | self.pure_handlers.insert(handler.abi(), Arc::new(handler));
89 |
90 | self
91 | }
92 |
93 | /// Adds a side-effect handler
94 | pub fn add_side_effect_handler(
95 | mut self,
96 | handler: impl SideEffectHandler + 'static,
97 | ) -> Self {
98 | self.side_effect_handlers.insert(handler.abi(), Arc::new(handler));
99 |
100 | self
101 | }
102 |
103 | /// Adds state migrations for the contract states being indexed
104 | pub fn add_state_migrations(mut self, state_migration: impl StateMigrations + 'static) -> Self {
105 | self.state_migrations.push(Arc::new(state_migration));
106 |
107 | self
108 | }
109 |
110 | pub(crate) fn get_event_abis(&self) -> Vec {
111 | let mut event_abis: Vec<_> = self.pure_handlers.clone().into_keys().collect();
112 | let side_effect_abis: Vec<_> = self.pure_handlers.clone().into_keys().collect();
113 |
114 | event_abis.extend(side_effect_abis);
115 | event_abis.dedup();
116 |
117 | event_abis
118 | }
119 |
120 | pub(crate) fn get_event_topics(&self) -> Vec {
121 | self.get_event_abis()
122 | .iter()
123 | .map(|abi| HumanReadableParser::parse_event(abi).unwrap().signature())
124 | .collect()
125 | }
126 |
127 | pub(crate) fn build_events(&self) -> Vec {
128 | self.get_event_abis().iter().map(|abi| ContractEvent::new(abi)).collect()
129 | }
130 | }
131 |
132 | impl Debug for Contract {
133 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
134 | f.debug_struct("Contract")
135 | .field("name", &self.name)
136 | .field("addresses", &self.addresses)
137 | .finish()
138 | }
139 | }
140 |
141 | pub fn get_state_migrations(
142 | contracts: &[Contract],
143 | ) -> Vec> {
144 | contracts.iter().flat_map(|c| c.state_migrations.clone()).collect()
145 | }
146 |
147 | pub fn get_pure_handlers(
148 | contracts: &[Contract],
149 | ) -> HashMap> {
150 | contracts.iter().fold(HashMap::new(), |mut handlers_by_event_abi, contract| {
151 | contract.pure_handlers.iter().for_each(|(event_abi, handler)| {
152 | handlers_by_event_abi.insert(event_abi, handler.clone());
153 | });
154 | handlers_by_event_abi
155 | })
156 | }
157 |
158 | pub fn get_side_effect_handlers(
159 | contracts: &[Contract],
160 | ) -> HashMap>> {
161 | contracts.iter().fold(HashMap::new(), |mut handlers_by_event_abi, contract| {
162 | contract.side_effect_handlers.iter().for_each(|(event_abi, handler)| {
163 | handlers_by_event_abi.insert(event_abi, handler.clone());
164 | });
165 | handlers_by_event_abi
166 | })
167 | }
168 |
169 | pub fn group_event_topics_by_names(
170 | contracts: &[Contract],
171 | ) -> HashMap> {
172 | contracts.iter().fold(HashMap::new(), |mut topics_by_contract_name, contract| {
173 | topics_by_contract_name.insert(contract.name.clone(), contract.get_event_topics());
174 |
175 | topics_by_contract_name
176 | })
177 | }
178 |
179 | pub fn group_events_by_topics(
180 | contracts: &[Contract],
181 | ) -> HashMap {
182 | contracts
183 | .iter()
184 | .flat_map(|c| c.build_events())
185 | .map(|e| (e.value.signature(), e))
186 | .collect()
187 | }
188 |
189 | #[derive(Debug, Clone, PartialEq, Insertable)]
190 | #[diesel(table_name = chaindexing_contract_addresses)]
191 | pub struct UnsavedContractAddress {
192 | pub contract_name: String,
193 | pub address: String,
194 | pub chain_id: i64,
195 | pub start_block_number: i64,
196 | next_block_number_to_ingest_from: i64,
197 | }
198 |
199 | impl UnsavedContractAddress {
200 | pub fn new(
201 | contract_name: &str,
202 | address: &str,
203 | chain_id: &ChainId,
204 | start_block_number: u64,
205 | ) -> Self {
206 | let start_block_number = start_block_number as i64;
207 |
208 | UnsavedContractAddress {
209 | contract_name: contract_name.to_string(),
210 | address: address.to_lowercase().to_string(),
211 | chain_id: *chain_id as i64,
212 | start_block_number,
213 | next_block_number_to_ingest_from: start_block_number,
214 | }
215 | }
216 | }
217 |
218 | // N/B: The order has to match ./schema.rs to stop diesel from mixing up fields
219 | /// Helps manage subscription of ingesting and handling events per contract address
220 | #[derive(Debug, Clone, PartialEq, Queryable, Identifiable, Deserialize)]
221 | #[diesel(table_name = chaindexing_contract_addresses)]
222 | #[diesel(primary_key(id))]
223 | pub struct ContractAddress {
224 | pub id: i64,
225 | pub chain_id: i64,
226 | pub next_block_number_to_ingest_from: i64,
227 | pub next_block_number_to_handle_from: i64,
228 | pub next_block_number_for_side_effects: i64,
229 | pub start_block_number: i64,
230 | pub address: String,
231 | pub contract_name: String,
232 | }
233 |
234 | impl ContractAddress {
235 | fn get_chain_id(&self) -> ChainId {
236 | U64::from(self.chain_id).try_into().unwrap()
237 | }
238 |
239 | pub fn group_contract_addresses_by_address_and_chain_id(
240 | contract_addresses: &[ContractAddress],
241 | ) -> HashMap<(Address, ChainId), &ContractAddress> {
242 | contract_addresses.iter().fold(
243 | HashMap::new(),
244 | |mut contracts_by_addresses, contract_address @ ContractAddress { address, .. }| {
245 | contracts_by_addresses.insert(
246 | (
247 | Address::from_str(address.as_str()).unwrap(),
248 | contract_address.get_chain_id(),
249 | ),
250 | contract_address,
251 | );
252 |
253 | contracts_by_addresses
254 | },
255 | )
256 | }
257 | }
258 |
--------------------------------------------------------------------------------
/chaindexing/src/deferred_futures.rs:
--------------------------------------------------------------------------------
1 | use futures_core::Future;
2 | use futures_util::future::join_all;
3 | use std::{pin::Pin, sync::Arc};
4 | use tokio::sync::Mutex;
5 |
6 | type DeferredFuture<'a> = Pin + 'a + Send>>;
7 |
8 | #[derive(Clone)]
9 | pub struct DeferredFutures<'a> {
10 | futures: Arc>>>,
11 | }
12 |
13 | impl<'a> Default for DeferredFutures<'a> {
14 | fn default() -> Self {
15 | Self::new()
16 | }
17 | }
18 |
19 | impl<'a> DeferredFutures<'a> {
20 | pub fn new() -> Self {
21 | Self {
22 | futures: Arc::new(Mutex::new(Vec::new())),
23 | }
24 | }
25 | pub async fn add<'b: 'a, F>(&self, future: F)
26 | where
27 | F: Future + Send + 'b,
28 | {
29 | let mut futures = self.futures.lock().await;
30 | futures.push(Box::pin(future));
31 | }
32 | pub async fn consume(&self) {
33 | let mut futures = self.futures.lock().await;
34 |
35 | join_all(futures.iter_mut()).await;
36 |
37 | *futures = Vec::new();
38 | }
39 | }
40 |
--------------------------------------------------------------------------------
/chaindexing/src/diesel.rs:
--------------------------------------------------------------------------------
1 | pub mod schema {
2 | // @generated automatically by Diesel CLI.
3 |
4 | diesel::table! {
5 | chaindexing_nodes(id) {
6 | id -> Int4,
7 | last_active_at -> Int8,
8 | inserted_at -> Int8,
9 | }
10 | }
11 |
12 | diesel::table! {
13 | chaindexing_contract_addresses (id) {
14 | id -> Int8,
15 | chain_id -> Int8,
16 | next_block_number_to_ingest_from -> Int8,
17 | next_block_number_to_handle_from -> Int8,
18 | next_block_number_for_side_effects -> Int8,
19 | start_block_number -> Int8,
20 | address -> VarChar,
21 | contract_name -> VarChar,
22 | }
23 | }
24 |
25 | diesel::table! {
26 | chaindexing_events (id) {
27 | id -> Uuid,
28 | chain_id -> Int8,
29 | contract_address -> VarChar,
30 | contract_name -> VarChar,
31 | abi -> Text,
32 | parameters -> Json,
33 | topics -> Json,
34 | block_hash -> VarChar,
35 | block_number -> Int8,
36 | block_timestamp -> Int8,
37 | transaction_hash -> VarChar,
38 | transaction_index -> Int4,
39 | log_index -> Int4,
40 | removed -> Bool,
41 | }
42 | }
43 |
44 | diesel::table! {
45 | chaindexing_reorged_blocks (id) {
46 | id -> Int4,
47 | block_number -> Int8,
48 | chain_id -> Int8,
49 | handled_at -> Nullable,
50 | }
51 | }
52 |
53 | diesel::allow_tables_to_appear_in_same_query!(
54 | chaindexing_contract_addresses,
55 | chaindexing_events,
56 | );
57 | }
58 |
--------------------------------------------------------------------------------
/chaindexing/src/events.rs:
--------------------------------------------------------------------------------
1 | mod event;
2 |
3 | pub use event::{Event, EventParam, PartialEvent};
4 |
5 | use std::collections::HashMap;
6 |
7 | use crate::{contracts, ChainId, Contract, ContractAddress};
8 | use ethers::types::{Block, Log, TxHash, U64};
9 |
10 | pub fn get(
11 | logs: &[Log],
12 | contracts: &[Contract],
13 | contract_addresses: &[ContractAddress],
14 | chain_id: &ChainId,
15 | blocks_by_number: &HashMap>,
16 | ) -> Vec {
17 | let events_by_topics = contracts::group_events_by_topics(contracts);
18 | let contract_addresses_by_address =
19 | ContractAddress::group_contract_addresses_by_address_and_chain_id(contract_addresses);
20 |
21 | logs.iter()
22 | .map(
23 | |log @ Log {
24 | topics,
25 | address,
26 | block_number,
27 | ..
28 | }| {
29 | let contract_address =
30 | contract_addresses_by_address.get(&(*address, *chain_id)).unwrap();
31 | let block = blocks_by_number.get(&block_number.unwrap()).unwrap();
32 |
33 | Event::new(
34 | log,
35 | events_by_topics.get(&topics[0]).unwrap(),
36 | chain_id,
37 | &contract_address.contract_name,
38 | block.timestamp.as_u64() as i64,
39 | )
40 | },
41 | )
42 | .collect()
43 | }
44 |
--------------------------------------------------------------------------------
/chaindexing/src/handlers.rs:
--------------------------------------------------------------------------------
1 | use std::cmp::max;
2 | use std::fmt::Debug;
3 | use std::{sync::Arc, time::Duration};
4 |
5 | mod handle_events;
6 | mod handler_context;
7 | mod maybe_handle_chain_reorg;
8 | mod pure_handler;
9 | mod side_effect_handler;
10 |
11 | pub use handler_context::HandlerContext;
12 | pub use pure_handler::{PureHandler, PureHandlerContext};
13 | pub use side_effect_handler::{SideEffectHandler, SideEffectHandlerContext};
14 |
15 | use tokio::{sync::Mutex, time::interval};
16 |
17 | use crate::deferred_futures::DeferredFutures;
18 | use crate::nodes::NodeTask;
19 | use crate::Config;
20 | use crate::{contracts, states, HasRawQueryClient};
21 |
22 | pub async fn start(config: &Config) -> NodeTask {
23 | let node_task = NodeTask::new();
24 | let config = config.clone();
25 |
26 | node_task
27 | .add_subtask(tokio::spawn({
28 | let node_task = node_task.clone();
29 |
30 | // MultiChainStates are indexed in an order-agnostic fashion, so no need for txn client
31 | let repo_client_for_mcs = Arc::new(Mutex::new(config.repo.get_client().await));
32 | let deferred_mutations_for_mcs = DeferredFutures::new();
33 |
34 | async move {
35 | for chain_ids in get_chunked_chain_ids(&config) {
36 | let config = config.clone();
37 | let repo_client_for_mcs = repo_client_for_mcs.clone();
38 | let deferred_mutations_for_mcs = deferred_mutations_for_mcs.clone();
39 |
40 | node_task
41 | .clone()
42 | .add_subtask(tokio::spawn(async move {
43 | let mut interval =
44 | interval(Duration::from_millis(config.handler_rate_ms));
45 |
46 | let repo_client = Arc::new(Mutex::new(config.repo.get_client().await));
47 | let pure_handlers = contracts::get_pure_handlers(&config.contracts);
48 | let side_effect_handlers =
49 | contracts::get_side_effect_handlers(&config.contracts);
50 |
51 | loop {
52 | handle_events::run(
53 | &pure_handlers,
54 | &side_effect_handlers,
55 | (&chain_ids, config.blocks_per_batch),
56 | (&repo_client, &repo_client_for_mcs),
57 | &deferred_mutations_for_mcs,
58 | &config.shared_state,
59 | )
60 | .await;
61 |
62 | interval.tick().await;
63 | }
64 | }))
65 | .await;
66 | }
67 |
68 | let mut repo_client = config.repo.get_client().await;
69 |
70 | let state_migrations = contracts::get_state_migrations(&config.contracts);
71 | let state_table_names = states::get_all_table_names(&state_migrations);
72 |
73 | let mut interval = interval(Duration::from_millis(2 * config.handler_rate_ms));
74 |
75 | loop {
76 | maybe_handle_chain_reorg::run(&mut repo_client, &state_table_names).await;
77 |
78 | deferred_mutations_for_mcs.consume().await;
79 |
80 | interval.tick().await;
81 | }
82 | }
83 | }))
84 | .await;
85 |
86 | node_task
87 | }
88 |
89 | fn get_chunked_chain_ids(
90 | config: &Config,
91 | ) -> Vec> {
92 | let chain_ids: Vec<_> = config.chains.iter().map(|c| c.id as u64).collect();
93 | let chain_ids_count = chain_ids.len();
94 | let chunk_size = max(chain_ids_count / config.chain_concurrency as usize, 1);
95 |
96 | chain_ids.chunks(chunk_size).map(|c| c.to_vec()).collect()
97 | }
98 |
--------------------------------------------------------------------------------
/chaindexing/src/handlers/handle_events.rs:
--------------------------------------------------------------------------------
1 | use std::fmt::Debug;
2 | use std::{collections::HashMap, sync::Arc};
3 |
4 | use futures_util::StreamExt;
5 | use tokio::sync::Mutex;
6 |
7 | use crate::deferred_futures::DeferredFutures;
8 | use crate::streams::ContractAddressesStream;
9 | use crate::{ChaindexingRepo, ChaindexingRepoClientMutex};
10 | use crate::{EventAbi, ExecutesWithRawQuery, HasRawQueryClient, LoadsDataWithRawQuery};
11 |
12 | use super::pure_handler::{PureHandler, PureHandlerContext};
13 | use super::side_effect_handler::{SideEffectHandler, SideEffectHandlerContext};
14 |
15 | pub async fn run<'a, S: Send + Sync + Clone + Debug>(
16 | pure_handlers: &HashMap>,
17 | side_effect_handlers: &HashMap>>,
18 | (chain_ids, blocks_per_batch): (&[u64], u64),
19 | (repo_client, repo_client_for_mcs): (&ChaindexingRepoClientMutex, &ChaindexingRepoClientMutex),
20 | deferred_mutations_for_mcs: &DeferredFutures<'a>,
21 | shared_state: &Option>>,
22 | ) {
23 | for chain_id in chain_ids {
24 | let mut contract_addresses_stream =
25 | ContractAddressesStream::new(repo_client, *chain_id as i64).with_chunk_size(200);
26 |
27 | while let Some(contract_addresses) = contract_addresses_stream.next().await {
28 | for contract_address in contract_addresses {
29 | let from_block_number = contract_address.next_block_number_to_handle_from as u64;
30 |
31 | let client = repo_client.clone();
32 | let mut client = client.lock().await;
33 |
34 | // return ordered by block_number and log_index
35 | let events = ChaindexingRepo::load_events(
36 | &client,
37 | *chain_id,
38 | &contract_address.address,
39 | from_block_number,
40 | blocks_per_batch,
41 | )
42 | .await;
43 |
44 | // ChainStates which include ContractState have to be handled orderly
45 | let txn_client = ChaindexingRepo::get_txn_client(&mut client).await;
46 |
47 | for event in &events {
48 | {
49 | if let Some(handler) = pure_handlers.get(event.get_abi()) {
50 | let handler_context = PureHandlerContext::new(
51 | event,
52 | &txn_client,
53 | repo_client_for_mcs,
54 | deferred_mutations_for_mcs,
55 | );
56 |
57 | handler.handle_event(handler_context).await;
58 | }
59 | }
60 |
61 | {
62 | if event.block_number >= contract_address.next_block_number_for_side_effects
63 | {
64 | if let Some(handler) = side_effect_handlers.get(event.get_abi()) {
65 | let handler_context =
66 | SideEffectHandlerContext::new(event, &txn_client, shared_state);
67 |
68 | handler.handle_event(handler_context).await;
69 | }
70 | }
71 | }
72 | }
73 |
74 | if let Some(last_event) = events.last() {
75 | let next_block_number_to_handle_from = last_event.block_number as u64 + 1;
76 |
77 | ChaindexingRepo::update_next_block_number_to_handle_from(
78 | &txn_client,
79 | &contract_address.address,
80 | *chain_id,
81 | next_block_number_to_handle_from,
82 | )
83 | .await;
84 |
85 | if next_block_number_to_handle_from
86 | > contract_address.next_block_number_for_side_effects as u64
87 | {
88 | ChaindexingRepo::update_next_block_number_for_side_effects(
89 | &txn_client,
90 | &contract_address.address,
91 | *chain_id,
92 | next_block_number_to_handle_from,
93 | )
94 | .await;
95 | }
96 | }
97 |
98 | ChaindexingRepo::commit_txns(txn_client).await;
99 | }
100 | }
101 | }
102 | }
103 |
--------------------------------------------------------------------------------
/chaindexing/src/handlers/handler_context.rs:
--------------------------------------------------------------------------------
1 | use crate::{ChaindexingRepoTxnClient, Event};
2 |
3 | pub trait HandlerContext<'a>: Send + Sync {
4 | fn get_event(&self) -> &Event;
5 | fn get_client(&self) -> &ChaindexingRepoTxnClient<'a>;
6 | }
7 |
--------------------------------------------------------------------------------
/chaindexing/src/handlers/maybe_handle_chain_reorg.rs:
--------------------------------------------------------------------------------
1 | use crate::chain_reorg::{ReorgedBlock, ReorgedBlocks};
2 | use crate::{states, ChaindexingRepo, LoadsDataWithRawQuery};
3 | use crate::{ChaindexingRepoClient, ExecutesWithRawQuery, HasRawQueryClient};
4 |
5 | pub async fn run(repo_client: &mut ChaindexingRepoClient, table_names: &Vec) {
6 | let reorged_blocks = ChaindexingRepo::load_unhandled_reorged_blocks(repo_client).await;
7 |
8 | if !reorged_blocks.is_empty() {
9 | let repo_txn_client = ChaindexingRepo::get_txn_client(repo_client).await;
10 |
11 | let reorged_blocks = ReorgedBlocks::only_earliest_per_chain(&reorged_blocks);
12 |
13 | for ReorgedBlock {
14 | block_number,
15 | chain_id,
16 | ..
17 | } in &reorged_blocks
18 | {
19 | states::backtrack_states(table_names, *chain_id, *block_number, &repo_txn_client).await;
20 | ChaindexingRepo::update_next_block_numbers_to_handle_from(
21 | &repo_txn_client,
22 | *chain_id as u64,
23 | *block_number as u64,
24 | )
25 | .await
26 | }
27 |
28 | let reorged_block_ids = ReorgedBlocks::get_ids(&reorged_blocks);
29 | ChaindexingRepo::update_reorged_blocks_as_handled(&repo_txn_client, &reorged_block_ids)
30 | .await;
31 |
32 | ChaindexingRepo::commit_txns(repo_txn_client).await;
33 | }
34 | }
35 |
--------------------------------------------------------------------------------
/chaindexing/src/handlers/pure_handler.rs:
--------------------------------------------------------------------------------
1 | use std::sync::Arc;
2 |
3 | use tokio::sync::Mutex;
4 |
5 | use crate::deferred_futures::DeferredFutures;
6 | use crate::events::Event;
7 | use crate::{ChaindexingRepoClient, ChaindexingRepoTxnClient, EventParam};
8 |
9 | use super::handler_context::HandlerContext;
10 |
11 | /// Pure handlers do not contain any side effects. They are simple reducers
12 | /// that derive or index states deterministically.
13 | #[crate::augmenting_std::async_trait]
14 | pub trait PureHandler: Send + Sync {
15 | /// The human-readable ABI of the event being handled.
16 | /// For example, Uniswap's PoolCreated event's abi is:
17 | /// `PoolCreated(address indexed token0, address indexed token1, uint24 indexed fee, int24 tickSpacing, address pool)`.
18 | /// The chain explorer's event section can also be used to infer this.
19 | fn abi(&self) -> &'static str;
20 | async fn handle_event<'a, 'b>(&self, context: PureHandlerContext<'a, 'b>);
21 | }
22 |
23 | /// Event's context in a pure event handler
24 | #[derive(Clone)]
25 | pub struct PureHandlerContext<'a, 'b> {
26 | pub event: Event,
27 | pub(crate) repo_client: &'a ChaindexingRepoTxnClient<'a>,
28 | pub(crate) repo_client_for_mcs: Arc>,
29 | pub(crate) deferred_mutations_for_mcs: DeferredFutures<'b>,
30 | }
31 |
32 | impl<'a, 'b> PureHandlerContext<'a, 'b> {
33 | pub fn new(
34 | event: &Event,
35 | repo_client: &'a ChaindexingRepoTxnClient<'a>,
36 | repo_client_for_mcs: &Arc>,
37 | deferred_mutations_for_mcs: &DeferredFutures<'b>,
38 | ) -> Self {
39 | Self {
40 | event: event.clone(),
41 | repo_client,
42 | repo_client_for_mcs: repo_client_for_mcs.clone(),
43 | deferred_mutations_for_mcs: deferred_mutations_for_mcs.clone(),
44 | }
45 | }
46 |
47 | pub fn get_event_params(&self) -> EventParam {
48 | self.event.get_params()
49 | }
50 | }
51 |
52 | impl<'a, 'b> HandlerContext<'a> for PureHandlerContext<'a, 'b> {
53 | fn get_event(&self) -> &Event {
54 | &self.event
55 | }
56 |
57 | fn get_client(&self) -> &ChaindexingRepoTxnClient<'a> {
58 | self.repo_client
59 | }
60 | }
61 |
--------------------------------------------------------------------------------
/chaindexing/src/handlers/side_effect_handler.rs:
--------------------------------------------------------------------------------
1 | use std::fmt::Debug;
2 | use std::sync::Arc;
3 |
4 | use tokio::sync::Mutex;
5 |
6 | use crate::events::Event;
7 | use crate::{ChaindexingRepoTxnClient, EventParam};
8 |
9 | use super::handler_context::HandlerContext;
10 |
11 | /// SideEffectHandlers are event handlers that help handle side-effects for events.
12 | /// This is useful for handling events only ONCE and can rely on a non-deterministic
13 | /// shared state. Some use-cases are notifications, bridging etc. Chaindexing ensures
14 | /// that the side-effect handlers are called once immutably regardless of resets.
15 | /// However, one can dangerously reset including side effects with the `reset_including_side_effects`
16 | /// exposed in the Config API.
17 | #[crate::augmenting_std::async_trait]
18 | pub trait SideEffectHandler: Send + Sync {
19 | type SharedState: Send + Sync + Clone + Debug;
20 |
21 | /// The human-readable ABI of the event being handled.
22 | /// For example, Uniswap's PoolCreated event's abi is:
23 | /// `PoolCreated(address indexed token0, address indexed token1, uint24 indexed fee, int24 tickSpacing, address pool)`.
24 | /// The chain explorer's event section can also be used to infer this.
25 | fn abi(&self) -> &'static str;
26 | async fn handle_event<'a>(&self, context: SideEffectHandlerContext<'a, Self::SharedState>);
27 | }
28 |
29 | /// Event's context in a side effect handler
30 | #[derive(Clone)]
31 | pub struct SideEffectHandlerContext<'a, SharedState: Sync + Send + Clone> {
32 | pub event: Event,
33 | pub(crate) repo_client: &'a ChaindexingRepoTxnClient<'a>,
34 | shared_state: Option>>,
35 | }
36 |
37 | impl<'a, SharedState: Sync + Send + Clone> SideEffectHandlerContext<'a, SharedState> {
38 | pub fn new(
39 | event: &Event,
40 | repo_client: &'a ChaindexingRepoTxnClient<'a>,
41 | shared_state: &Option>>,
42 | ) -> Self {
43 | Self {
44 | event: event.clone(),
45 | repo_client,
46 | shared_state: shared_state.clone(),
47 | }
48 | }
49 |
50 | pub async fn get_shared_state(&self) -> SharedState {
51 | let shared_state = self.shared_state.clone().unwrap();
52 | let shared_state = shared_state.lock().await;
53 | shared_state.clone()
54 | }
55 |
56 | pub fn get_event_params(&self) -> EventParam {
57 | self.event.get_params()
58 | }
59 | }
60 |
61 | impl<'a, SharedState: Sync + Send + Clone> HandlerContext<'a>
62 | for SideEffectHandlerContext<'a, SharedState>
63 | {
64 | fn get_event(&self) -> &Event {
65 | &self.event
66 | }
67 |
68 | fn get_client(&self) -> &ChaindexingRepoTxnClient<'a> {
69 | self.repo_client
70 | }
71 | }
72 |
--------------------------------------------------------------------------------
/chaindexing/src/ingester.rs:
--------------------------------------------------------------------------------
1 | mod error;
2 | mod filters;
3 | mod ingest_events;
4 | mod maybe_handle_chain_reorg;
5 | mod provider;
6 |
7 | pub use error::IngesterError;
8 | pub use provider::{Provider, ProviderError};
9 |
10 | use std::cmp::max;
11 | use std::collections::HashMap;
12 | use std::sync::Arc;
13 | use std::time::Duration;
14 |
15 | use chrono::Utc;
16 | use futures_util::StreamExt;
17 | use tokio::sync::Mutex;
18 | use tokio::time::interval;
19 |
20 | use crate::contracts;
21 | use crate::nodes::NodeTask;
22 | use crate::pruning::PruningConfig;
23 | use crate::states;
24 | use crate::streams::ContractAddressesStream;
25 | use crate::Chain;
26 | use crate::ChainId;
27 | use crate::Config;
28 | use crate::Contract;
29 | use crate::ContractAddress;
30 | use crate::{ChaindexingRepo, ChaindexingRepoClient, ChaindexingRepoConn};
31 | use crate::{ExecutesWithRawQuery, HasRawQueryClient, Repo};
32 |
33 | pub async fn start(config: &Config) -> NodeTask {
34 | let node_task = NodeTask::new();
35 |
36 | for chains in get_chunked_chains(config) {
37 | let config = config.clone();
38 |
39 | node_task
40 | .add_subtask(tokio::spawn(async move {
41 | let mut interval = interval(Duration::from_millis(config.ingestion_rate_ms));
42 | let mut last_pruned_at_per_chain_id = HashMap::new();
43 |
44 | loop {
45 | for chain in chains.iter() {
46 | let provider = provider::get(&chain.json_rpc_url);
47 | let repo_client = Arc::new(Mutex::new(config.repo.get_client().await));
48 | let pool = config.repo.get_pool(1).await;
49 | let conn = ChaindexingRepo::get_conn(&pool).await;
50 | let conn = Arc::new(Mutex::new(conn));
51 |
52 | ingest_for_chain(
53 | &chain.id,
54 | provider,
55 | conn.clone(),
56 | &repo_client,
57 | &config,
58 | &mut last_pruned_at_per_chain_id,
59 | )
60 | .await
61 | .unwrap();
62 | }
63 |
64 | interval.tick().await;
65 | }
66 | }))
67 | .await;
68 | }
69 |
70 | node_task
71 | }
72 |
73 | pub fn get_chunked_chains(config: &Config) -> Vec> {
74 | let chains: Vec<_> = config.chains.clone();
75 | let chunk_size = max(chains.len() / config.chain_concurrency as usize, 1);
76 |
77 | chains.chunks(chunk_size).map(|c| c.to_vec()).collect()
78 | }
79 |
80 | pub async fn ingest_for_chain<'a, S: Send + Sync + Clone>(
81 | chain_id: &ChainId,
82 | provider: Arc,
83 | conn: Arc>>,
84 | repo_client: &Arc>,
85 | config @ Config {
86 | contracts,
87 | pruning_config,
88 | ..
89 | }: &Config,
90 | last_pruned_at_per_chain_id: &mut HashMap,
91 | ) -> Result<(), IngesterError> {
92 | let current_block_number = provider::fetch_current_block_number(&provider).await;
93 | let mut contract_addresses_stream =
94 | ContractAddressesStream::new(repo_client, *chain_id as i64).with_chunk_size(5);
95 |
96 | while let Some(contract_addresses) = contract_addresses_stream.next().await {
97 | let contract_addresses =
98 | filter_uningested_contract_addresses(&contract_addresses, current_block_number);
99 |
100 | let mut conn = conn.lock().await;
101 | let repo_client = &*repo_client.lock().await;
102 |
103 | ingest_events::run(
104 | &mut conn,
105 | repo_client,
106 | contract_addresses.clone(),
107 | &provider,
108 | chain_id,
109 | current_block_number,
110 | config,
111 | )
112 | .await?;
113 |
114 | maybe_handle_chain_reorg::run(
115 | &mut conn,
116 | contract_addresses,
117 | &provider,
118 | chain_id,
119 | current_block_number,
120 | config,
121 | )
122 | .await?;
123 | }
124 |
125 | maybe_prune(
126 | pruning_config,
127 | last_pruned_at_per_chain_id,
128 | contracts,
129 | *chain_id as u64,
130 | current_block_number,
131 | &*repo_client.lock().await,
132 | )
133 | .await;
134 |
135 | Ok(())
136 | }
137 |
138 | async fn maybe_prune(
139 | pruning_config: &Option,
140 | last_pruned_at_per_chain_id: &mut HashMap,
141 | contracts: &[Contract],
142 | chain_id: u64,
143 | current_block_number: u64,
144 | repo_client: &ChaindexingRepoClient,
145 | ) {
146 | if let Some(pruning_config @ PruningConfig { prune_interval, .. }) = pruning_config {
147 | let now = Utc::now().timestamp() as u64;
148 | let last_pruned_at = last_pruned_at_per_chain_id.get(&chain_id).unwrap_or(&now);
149 | if now - *last_pruned_at >= *prune_interval {
150 | let min_pruning_block_number =
151 | pruning_config.get_min_block_number(current_block_number);
152 |
153 | ChaindexingRepo::prune_events(repo_client, min_pruning_block_number, chain_id).await;
154 |
155 | let state_migrations = contracts::get_state_migrations(contracts);
156 | let state_table_names = states::get_all_table_names(&state_migrations);
157 | states::prune_state_versions(
158 | &state_table_names,
159 | repo_client,
160 | min_pruning_block_number,
161 | chain_id,
162 | )
163 | .await;
164 | }
165 | last_pruned_at_per_chain_id.insert(chain_id, Utc::now().timestamp() as u64);
166 | }
167 | }
168 |
169 | fn filter_uningested_contract_addresses(
170 | contract_addresses: &[ContractAddress],
171 | current_block_number: u64,
172 | ) -> Vec {
173 | contract_addresses
174 | .iter()
175 | .filter(|ca| current_block_number >= ca.next_block_number_to_ingest_from as u64)
176 | .cloned()
177 | .collect()
178 | }
179 |
--------------------------------------------------------------------------------
/chaindexing/src/ingester/error.rs:
--------------------------------------------------------------------------------
1 | use crate::RepoError;
2 |
3 | #[derive(Debug)]
4 | pub enum IngesterError {
5 | RepoConnectionError,
6 | GenericError(String),
7 | }
8 |
9 | impl From for IngesterError {
10 | fn from(value: RepoError) -> Self {
11 | match value {
12 | RepoError::NotConnected => IngesterError::RepoConnectionError,
13 | RepoError::Unknown(error) => IngesterError::GenericError(error),
14 | }
15 | }
16 | }
17 |
--------------------------------------------------------------------------------
/chaindexing/src/ingester/filters.rs:
--------------------------------------------------------------------------------
1 | use std::collections::HashMap;
2 |
3 | use ethers::types::{Address, Filter as EthersFilter};
4 | use std::cmp::min;
5 |
6 | use crate::chain_reorg::Execution;
7 | use crate::contracts;
8 | use crate::contracts::Contract;
9 | use crate::contracts::ContractEventTopic;
10 | use crate::ContractAddress;
11 |
12 | pub fn get(
13 | contract_addresses: &[ContractAddress],
14 | contracts: &[Contract],
15 | current_block_number: u64,
16 | blocks_per_batch: u64,
17 | execution: &Execution,
18 | ) -> Vec {
19 | let topics_by_contract_name = contracts::group_event_topics_by_names(contracts);
20 |
21 | contract_addresses
22 | .iter()
23 | .filter_map(|contract_address| {
24 | topics_by_contract_name.get(contract_address.contract_name.as_str()).and_then(
25 | |topics| {
26 | Filter::maybe_new(
27 | contract_address,
28 | topics,
29 | current_block_number,
30 | blocks_per_batch,
31 | execution,
32 | )
33 | },
34 | )
35 | })
36 | .collect()
37 | }
38 |
39 | pub fn group_by_contract_address_id(filters: &[Filter]) -> HashMap> {
40 | let empty_filter_group = vec![];
41 |
42 | filters.iter().fold(
43 | HashMap::new(),
44 | |mut filters_by_contract_address_id, filter| {
45 | let mut filter_group = filters_by_contract_address_id
46 | .get(&filter.contract_address_id)
47 | .unwrap_or(&empty_filter_group)
48 | .to_vec();
49 |
50 | filter_group.push(filter.clone());
51 |
52 | filters_by_contract_address_id.insert(filter.contract_address_id, filter_group);
53 |
54 | filters_by_contract_address_id
55 | },
56 | )
57 | }
58 |
59 | pub fn get_latest(filters: &Vec) -> Option {
60 | let mut filters = filters.to_owned();
61 | filters.sort_by_key(|f| f.value.get_to_block());
62 |
63 | filters.last().cloned()
64 | }
65 |
66 | #[derive(Clone, Debug)]
67 | pub struct Filter {
68 | pub contract_address_id: i64,
69 | pub address: String,
70 | pub value: EthersFilter,
71 | }
72 |
73 | impl Filter {
74 | fn maybe_new(
75 | contract_address: &ContractAddress,
76 | topics: &[ContractEventTopic],
77 | current_block_number: u64,
78 | blocks_per_batch: u64,
79 | execution: &Execution,
80 | ) -> Option {
81 | let ContractAddress {
82 | id: contract_address_id,
83 | next_block_number_to_ingest_from,
84 | start_block_number,
85 | address,
86 | ..
87 | } = contract_address;
88 |
89 | let next_block_number_to_ingest_from = *next_block_number_to_ingest_from as u64;
90 |
91 | match execution {
92 | Execution::Main => Some((
93 | next_block_number_to_ingest_from,
94 | min(
95 | next_block_number_to_ingest_from + blocks_per_batch,
96 | current_block_number,
97 | ),
98 | )),
99 | Execution::Confirmation(min_confirmation_count) => {
100 | // TODO: Move logic to higher level
101 | if min_confirmation_count.is_in_confirmation_window(
102 | next_block_number_to_ingest_from,
103 | current_block_number,
104 | ) {
105 | Some((
106 | min_confirmation_count.deduct_from(
107 | next_block_number_to_ingest_from,
108 | *start_block_number as u64,
109 | ),
110 | next_block_number_to_ingest_from + blocks_per_batch,
111 | ))
112 | } else {
113 | None
114 | }
115 | }
116 | }
117 | .map(|(from_block_number, to_block_number)| Filter {
118 | contract_address_id: *contract_address_id,
119 | address: address.to_string(),
120 | value: EthersFilter::new()
121 | .address(address.parse::().unwrap())
122 | .topic0(topics.to_vec())
123 | .from_block(from_block_number)
124 | .to_block(to_block_number),
125 | })
126 | }
127 | }
128 |
--------------------------------------------------------------------------------
/chaindexing/src/ingester/ingest_events.rs:
--------------------------------------------------------------------------------
1 | use std::collections::HashMap;
2 | use std::sync::Arc;
3 |
4 | use futures_util::FutureExt;
5 |
6 | use super::filters::{self, Filter};
7 | use super::provider::{self, Provider};
8 | use super::IngesterError;
9 |
10 | use crate::chain_reorg::Execution;
11 | use crate::Config;
12 | use crate::{events, ChainId};
13 | use crate::{
14 | ChaindexingRepo, ChaindexingRepoClient, ChaindexingRepoConn, ContractAddress,
15 | LoadsDataWithRawQuery, Repo,
16 | };
17 |
18 | pub async fn run<'a, S: Send + Sync + Clone>(
19 | conn: &mut ChaindexingRepoConn<'a>,
20 | repo_client: &ChaindexingRepoClient,
21 | contract_addresses: Vec,
22 | provider: &Arc,
23 | chain_id: &ChainId,
24 | current_block_number: u64,
25 | Config {
26 | contracts,
27 | blocks_per_batch,
28 | ..
29 | }: &Config,
30 | ) -> Result<(), IngesterError> {
31 | let filters = filters::get(
32 | &contract_addresses,
33 | contracts,
34 | current_block_number,
35 | *blocks_per_batch,
36 | &Execution::Main,
37 | );
38 |
39 | let filters = remove_already_ingested_filters(&filters, &contract_addresses, repo_client).await;
40 |
41 | if !filters.is_empty() {
42 | let logs = provider::fetch_logs(provider, &filters).await;
43 | let blocks_by_tx_hash = provider::fetch_blocks_by_number(provider, &logs).await;
44 | let events = events::get(
45 | &logs,
46 | contracts,
47 | &contract_addresses,
48 | chain_id,
49 | &blocks_by_tx_hash,
50 | );
51 | let contract_addresses = contract_addresses.clone();
52 |
53 | ChaindexingRepo::run_in_transaction(conn, move |conn| {
54 | async move {
55 | ChaindexingRepo::create_events(conn, &events.clone()).await;
56 |
57 | update_next_block_numbers_to_ingest_from(conn, &contract_addresses, &filters).await;
58 |
59 | Ok(())
60 | }
61 | .boxed()
62 | })
63 | .await?;
64 | }
65 |
66 | Ok(())
67 | }
68 |
69 | async fn remove_already_ingested_filters(
70 | filters: &Vec,
71 | contract_addresses: &[ContractAddress],
72 | repo_client: &ChaindexingRepoClient,
73 | ) -> Vec {
74 | let current_block_filters: Vec<_> = filters
75 | .iter()
76 | .filter(|f| f.value.get_from_block() == f.value.get_to_block())
77 | .collect();
78 |
79 | if current_block_filters.is_empty() {
80 | filters.to_owned()
81 | } else {
82 | let addresses: Vec<_> = contract_addresses.iter().map(|c| c.address.clone()).collect();
83 |
84 | let latest_ingested_events =
85 | ChaindexingRepo::load_latest_events(repo_client, &addresses).await;
86 | let latest_ingested_events =
87 | latest_ingested_events
88 | .iter()
89 | .fold(HashMap::new(), |mut events_by_address, event| {
90 | events_by_address.insert(&event.contract_address, event);
91 |
92 | events_by_address
93 | });
94 |
95 | let already_ingested_filters = current_block_filters
96 | .iter()
97 | .filter(|filter| match latest_ingested_events.get(&filter.address) {
98 | Some(latest_event) => {
99 | latest_event.block_number as u64
100 | == filter.value.get_to_block().unwrap().as_u64()
101 | }
102 | None => false,
103 | })
104 | .fold(HashMap::new(), |mut stale_current_block_filters, filter| {
105 | stale_current_block_filters.insert(filter.contract_address_id, filter);
106 |
107 | stale_current_block_filters
108 | });
109 |
110 | filters
111 | .iter()
112 | .filter(|f| !already_ingested_filters.contains_key(&f.contract_address_id))
113 | .cloned()
114 | .collect::>()
115 | }
116 | }
117 |
118 | async fn update_next_block_numbers_to_ingest_from<'a>(
119 | conn: &mut ChaindexingRepoConn<'a>,
120 | contract_addresses: &[ContractAddress],
121 | filters: &[Filter],
122 | ) {
123 | let filters_by_contract_address_id = filters::group_by_contract_address_id(filters);
124 |
125 | for (contract_address, filters) in contract_addresses
126 | .iter()
127 | .filter_map(|ca| filters_by_contract_address_id.get(&ca.id).map(|f| (ca, f)))
128 | {
129 | if let Some(latest_filter) = filters::get_latest(filters) {
130 | let next_block_number_to_ingest_from = latest_filter.value.get_to_block().unwrap() + 1;
131 |
132 | ChaindexingRepo::update_next_block_number_to_ingest_from(
133 | conn,
134 | contract_address,
135 | next_block_number_to_ingest_from.as_u64() as i64,
136 | )
137 | .await
138 | }
139 | }
140 | }
141 |
--------------------------------------------------------------------------------
/chaindexing/src/ingester/maybe_handle_chain_reorg.rs:
--------------------------------------------------------------------------------
1 | use std::collections::HashSet;
2 | use std::sync::Arc;
3 |
4 | use futures_util::FutureExt;
5 | use std::cmp::min;
6 |
7 | use crate::chain_reorg::{Execution, UnsavedReorgedBlock};
8 | use crate::events::{self, Event};
9 | use crate::Config;
10 | use crate::{ChainId, ChaindexingRepo, ChaindexingRepoConn, ContractAddress, Repo};
11 |
12 | use super::filters::{self, Filter};
13 | use super::Provider;
14 | use super::{provider, IngesterError};
15 |
16 | pub async fn run<'a, S: Send + Sync + Clone>(
17 | conn: &mut ChaindexingRepoConn<'a>,
18 | contract_addresses: Vec,
19 | provider: &Arc,
20 | chain_id: &ChainId,
21 | current_block_number: u64,
22 | Config {
23 | contracts,
24 | min_confirmation_count,
25 | blocks_per_batch,
26 | ..
27 | }: &Config,
28 | ) -> Result<(), IngesterError> {
29 | let filters = filters::get(
30 | &contract_addresses,
31 | contracts,
32 | current_block_number,
33 | *blocks_per_batch,
34 | &Execution::Confirmation(min_confirmation_count),
35 | );
36 |
37 | if !filters.is_empty() {
38 | let already_ingested_events = get_already_ingested_events(conn, &filters).await;
39 | let logs = provider::fetch_logs(provider, &filters).await;
40 | let blocks_by_number = provider::fetch_blocks_by_number(provider, &logs).await;
41 |
42 | let provider_events = events::get(
43 | &logs,
44 | contracts,
45 | &contract_addresses,
46 | chain_id,
47 | &blocks_by_number,
48 | );
49 |
50 | if let Some(added_and_removed_events) =
51 | get_provider_added_and_removed_events(&already_ingested_events, &provider_events)
52 | {
53 | handle_chain_reorg(conn, chain_id, added_and_removed_events).await?;
54 | }
55 | }
56 |
57 | Ok(())
58 | }
59 |
60 | async fn get_already_ingested_events<'a>(
61 | conn: &mut ChaindexingRepoConn<'a>,
62 | filters: &Vec,
63 | ) -> Vec {
64 | let mut already_ingested_events = vec![];
65 | for filter in filters {
66 | let from_block = filter.value.get_from_block().unwrap().as_u64();
67 | let to_block = filter.value.get_to_block().unwrap().as_u64();
68 |
69 | let mut events =
70 | ChaindexingRepo::get_events(conn, filter.address.to_owned(), from_block, to_block)
71 | .await;
72 | already_ingested_events.append(&mut events);
73 | }
74 |
75 | already_ingested_events
76 | }
77 |
78 | async fn handle_chain_reorg<'a>(
79 | conn: &mut ChaindexingRepoConn<'a>,
80 | chain_id: &ChainId,
81 | (added_events, removed_events): (Vec, Vec),
82 | ) -> Result<(), IngesterError> {
83 | let earliest_block_number = get_earliest_block_number((&added_events, &removed_events));
84 | let new_reorged_block = UnsavedReorgedBlock::new(earliest_block_number, chain_id);
85 |
86 | ChaindexingRepo::run_in_transaction(conn, move |conn| {
87 | async move {
88 | ChaindexingRepo::create_reorged_block(conn, &new_reorged_block).await;
89 |
90 | let event_ids: Vec<_> = removed_events.iter().map(|e| e.id).collect();
91 | ChaindexingRepo::delete_events_by_ids(conn, &event_ids).await;
92 |
93 | ChaindexingRepo::create_events(conn, &added_events).await;
94 |
95 | Ok(())
96 | }
97 | .boxed()
98 | })
99 | .await?;
100 |
101 | Ok(())
102 | }
103 |
104 | fn get_provider_added_and_removed_events(
105 | already_ingested_events: &[Event],
106 | provider_events: &[Event],
107 | ) -> Option<(Vec, Vec)> {
108 | let already_ingested_events_set: HashSet<_> = already_ingested_events.iter().cloned().collect();
109 | let provider_events_set: HashSet<_> = provider_events.iter().cloned().collect();
110 |
111 | let added_events: Vec<_> = provider_events
112 | .iter()
113 | .filter(|e| !already_ingested_events_set.contains(e))
114 | .cloned()
115 | .collect();
116 |
117 | let removed_events: Vec<_> = already_ingested_events
118 | .iter()
119 | .filter(|e| !provider_events_set.contains(e))
120 | .cloned()
121 | .collect();
122 |
123 | if added_events.is_empty() && removed_events.is_empty() {
124 | None
125 | } else {
126 | Some((added_events, removed_events))
127 | }
128 | }
129 |
130 | fn get_earliest_block_number((added_events, removed_events): (&Vec, &Vec)) -> i64 {
131 | let earliest_added_event = added_events.iter().min_by_key(|e| e.block_number);
132 | let earliest_removed_event = removed_events.iter().min_by_key(|e| e.block_number);
133 |
134 | match (earliest_added_event, earliest_removed_event) {
135 | (Some(event), None) | (None, Some(event)) => event.block_number,
136 | (Some(earliest_added), Some(earliest_removed)) => {
137 | min(earliest_added.block_number, earliest_removed.block_number)
138 | }
139 | _ => unreachable!("Added Events or Removed Events must have at least one entry"),
140 | }
141 | }
142 |
--------------------------------------------------------------------------------
/chaindexing/src/ingester/provider.rs:
--------------------------------------------------------------------------------
1 | use std::collections::HashMap;
2 | use std::sync::Arc;
3 | use std::time::Duration;
4 |
5 | use ethers::prelude::Middleware;
6 | use ethers::prelude::*;
7 | use ethers::providers::{Http, Provider as EthersProvider, ProviderError as EthersProviderError};
8 | use ethers::types::{Filter as EthersFilter, Log};
9 | use futures_util::future::try_join_all;
10 | use tokio::time::sleep;
11 |
12 | use super::filters::Filter;
13 |
14 | pub type ProviderError = EthersProviderError;
15 |
16 | #[crate::augmenting_std::async_trait]
17 | pub trait Provider: Clone + Sync + Send {
18 | async fn get_block_number(&self) -> Result;
19 | async fn get_logs(&self, filter: &EthersFilter) -> Result, ProviderError>;
20 |
21 | async fn get_block(&self, block_number: U64) -> Result, ProviderError>;
22 | async fn get_blocks_by_number(
23 | &self,
24 | logs: &Vec,
25 | ) -> Result>, ProviderError> {
26 | let mut logs = logs.to_owned();
27 | logs.dedup_by_key(|log| log.block_number);
28 |
29 | const CHUNK_SIZE: usize = 4;
30 | let chunked_logs: Vec<_> = logs.chunks(CHUNK_SIZE).collect();
31 |
32 | let mut blocks = vec![];
33 | for chunked_log in chunked_logs {
34 | blocks.extend(
35 | try_join_all(
36 | chunked_log
37 | .iter()
38 | .map(|Log { block_number, .. }| self.get_block(block_number.unwrap())),
39 | )
40 | .await?,
41 | );
42 | }
43 |
44 | let mut blocks_by_number = HashMap::new();
45 | for block @ Block { number, .. } in blocks {
46 | blocks_by_number.insert(number.unwrap(), block);
47 | }
48 |
49 | Ok(blocks_by_number)
50 | }
51 | }
52 |
53 | #[crate::augmenting_std::async_trait]
54 | impl Provider for EthersProvider {
55 | async fn get_block_number(&self) -> Result {
56 | Middleware::get_block_number(&self).await
57 | }
58 |
59 | async fn get_logs(&self, filter: &EthersFilter) -> Result, ProviderError> {
60 | Middleware::get_logs(&self, filter).await
61 | }
62 |
63 | async fn get_block(&self, block_number: U64) -> Result, ProviderError> {
64 | Ok(Middleware::get_block(&self, block_number).await?.unwrap())
65 | }
66 | }
67 |
68 | pub fn get(json_rpc_url: &str) -> Arc {
69 | Arc::new(EthersProvider::::try_from(json_rpc_url).unwrap())
70 | }
71 |
72 | pub async fn fetch_current_block_number(provider: &Arc) -> u64 {
73 | let mut maybe_current_block_number = None;
74 | let mut retries_so_far = 0;
75 |
76 | while maybe_current_block_number.is_none() {
77 | match provider.get_block_number().await {
78 | Ok(current_block_number) => {
79 | maybe_current_block_number = Some(current_block_number.as_u64())
80 | }
81 | Err(provider_error) => {
82 | eprintln!("Provider Error: {}", provider_error);
83 |
84 | backoff(retries_so_far).await;
85 | retries_so_far += 1;
86 | }
87 | }
88 | }
89 |
90 | maybe_current_block_number.unwrap()
91 | }
92 |
93 | pub async fn fetch_logs(provider: &Arc, filters: &[Filter]) -> Vec {
94 | let mut maybe_logs = None;
95 | let mut retries_so_far = 0;
96 |
97 | while maybe_logs.is_none() {
98 | match try_join_all(filters.iter().map(|f| provider.get_logs(&f.value))).await {
99 | Ok(logs_per_filter) => {
100 | let logs = logs_per_filter.into_iter().flatten().collect();
101 |
102 | maybe_logs = Some(logs)
103 | }
104 | Err(provider_error) => {
105 | eprintln!("Provider Error: {}", provider_error);
106 |
107 | backoff(retries_so_far).await;
108 | retries_so_far += 1;
109 | }
110 | }
111 | }
112 |
113 | maybe_logs.unwrap()
114 | }
115 |
116 | pub async fn fetch_blocks_by_number(
117 | provider: &Arc,
118 | logs: &Vec,
119 | ) -> HashMap> {
120 | let mut maybe_blocks_by_number = None;
121 | let mut retries_so_far = 0;
122 |
123 | while maybe_blocks_by_number.is_none() {
124 | match provider.get_blocks_by_number(logs).await {
125 | Ok(blocks_by_tx_hash) => maybe_blocks_by_number = Some(blocks_by_tx_hash),
126 | Err(provider_error) => {
127 | eprintln!("Provider Error: {}", provider_error);
128 |
129 | backoff(retries_so_far).await;
130 | retries_so_far += 1;
131 | }
132 | }
133 | }
134 |
135 | maybe_blocks_by_number.unwrap()
136 | }
137 |
138 | async fn backoff(retries_so_far: u32) {
139 | sleep(Duration::from_secs(2u64.pow(retries_so_far))).await;
140 | }
141 |
--------------------------------------------------------------------------------
/chaindexing/src/lib.rs:
--------------------------------------------------------------------------------
1 | // TODO: Add back
2 | // #![warn(
3 | // missing_debug_implementations,
4 | // missing_docs,
5 | // rust_2018_idioms,
6 | // unreachable_pub
7 | // )]
8 |
9 | //! # Chaindexing
10 | //! Index any EVM chain and query in SQL.
11 | //!
12 | //! View working examples here: .
13 | mod chain_reorg;
14 | mod chains;
15 | mod config;
16 | mod contracts;
17 | mod diesel;
18 | mod handlers;
19 | mod nodes;
20 | mod pruning;
21 | mod repos;
22 | mod root;
23 |
24 | /// Augmenting modules for standard library to support Chaindexing's operations
25 | pub mod augmenting_std;
26 |
27 | pub use chains::{Chain, ChainId};
28 | pub use config::{Config, OptimizationConfig};
29 | pub use contracts::{Contract, ContractAddress, EventAbi};
30 | pub use events::{Event, EventParam};
31 | pub use handlers::{
32 | PureHandler as EventHandler, PureHandlerContext as EventContext, SideEffectHandler,
33 | SideEffectHandlerContext as SideEffectContext,
34 | };
35 | pub use nodes::NodeHeartbeat as Heartbeat;
36 |
37 | pub use ethers::types::{I256, U256};
38 | use tokio::sync::Mutex;
39 |
40 | /// Houses traits and structs for implementing states that can be indexed.
41 | pub mod states;
42 |
43 | /// Hexadecimal representation of addresses (such as contract addresses)
44 | pub type Address = ethers::types::Address;
45 | /// Represents bytes
46 | pub type Bytes = Vec;
47 | #[cfg(feature = "postgres")]
48 | pub use repos::PostgresRepo;
49 |
50 | #[doc(hidden)]
51 | pub mod booting;
52 | #[doc(hidden)]
53 | pub mod deferred_futures;
54 | #[doc(hidden)]
55 | pub mod events;
56 | #[doc(hidden)]
57 | pub mod ingester;
58 | #[doc(hidden)]
59 | pub use contracts::{ContractEvent, UnsavedContractAddress};
60 | #[doc(hidden)]
61 | pub use ingester::Provider as IngesterProvider;
62 | #[doc(hidden)]
63 | pub use repos::*;
64 |
65 | #[doc(hidden)]
66 | #[cfg(feature = "postgres")]
67 | pub use repos::{PostgresRepoConn, PostgresRepoPool};
68 |
69 | #[cfg(feature = "postgres")]
70 | #[doc(hidden)]
71 | pub type ChaindexingRepo = PostgresRepo;
72 |
73 | #[cfg(feature = "postgres")]
74 | #[doc(hidden)]
75 | pub type ChaindexingRepoPool = PostgresRepoPool;
76 |
77 | #[cfg(feature = "postgres")]
78 | #[doc(hidden)]
79 | pub type ChaindexingRepoConn<'a> = PostgresRepoConn<'a>;
80 |
81 | #[cfg(feature = "postgres")]
82 | #[doc(hidden)]
83 | pub type ChaindexingRepoClient = PostgresRepoClient;
84 |
85 | #[cfg(feature = "postgres")]
86 | #[doc(hidden)]
87 | pub type ChaindexingRepoTxnClient<'a> = PostgresRepoTxnClient<'a>;
88 |
89 | #[cfg(feature = "postgres")]
90 | #[doc(hidden)]
91 | pub use repos::PostgresRepoAsyncConnection as ChaindexingRepoAsyncConnection;
92 |
93 | use std::fmt::Debug;
94 | use std::sync::Arc;
95 | use std::time::Duration;
96 | use tokio::time;
97 |
98 | use config::ConfigError;
99 | use nodes::NodeTasks;
100 |
101 | use crate::nodes::{NodeTask, NodeTasksRunner};
102 |
103 | pub(crate) type ChaindexingRepoClientMutex = Arc>;
104 |
105 | /// Errors from mis-configurations, database connections, internal errors, etc.
106 | pub enum ChaindexingError {
107 | Config(ConfigError),
108 | }
109 |
110 | impl From for ChaindexingError {
111 | fn from(value: ConfigError) -> Self {
112 | ChaindexingError::Config(value)
113 | }
114 | }
115 |
116 | impl Debug for ChaindexingError {
117 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
118 | match self {
119 | ChaindexingError::Config(config_error) => {
120 | write!(f, "Config Error: {:?}", config_error)
121 | }
122 | }
123 | }
124 | }
125 |
126 | /// Starts processes for ingesting events and indexing states as configured.
127 | pub async fn index_states(
128 | config: &Config,
129 | ) -> Result<(), ChaindexingError> {
130 | config.validate()?;
131 |
132 | let client = config.repo.get_client().await;
133 | booting::setup_nodes(config, &client).await;
134 | let current_node = ChaindexingRepo::create_and_load_new_node(&client).await;
135 | wait_for_non_leader_nodes_to_abort(config.get_node_election_rate_ms()).await;
136 |
137 | booting::setup(config, &client).await?;
138 |
139 | let config = config.clone();
140 | tokio::spawn(async move {
141 | let mut interval =
142 | time::interval(Duration::from_millis(config.get_node_election_rate_ms()));
143 |
144 | let pool = config.repo.get_pool(1).await;
145 | let mut conn = ChaindexingRepo::get_conn(&pool).await;
146 | let conn = &mut conn;
147 |
148 | let mut node_tasks = NodeTasks::new(¤t_node);
149 |
150 | loop {
151 | // Keep node active first to guarantee that at least this node is active before election
152 | ChaindexingRepo::keep_node_active(conn, ¤t_node).await;
153 | let active_nodes =
154 | ChaindexingRepo::get_active_nodes(conn, config.get_node_election_rate_ms()).await;
155 |
156 | node_tasks
157 | .orchestrate(
158 | &config.optimization_config,
159 | &active_nodes,
160 | &get_tasks_runner(&config),
161 | )
162 | .await;
163 |
164 | interval.tick().await;
165 | }
166 | });
167 |
168 | Ok(())
169 | }
170 |
171 | /// Includes runtime-discovered contract addresses for indexing.
172 | ///
173 | /// # Arguments
174 | ///
175 | /// * `event_context` - context where the contract was discovered.
176 | /// N/B: Indexing for this contract starts from this point onwards
177 | /// * `name` - name of the contract as defined in the config
178 | /// * `address` - address of discovered contract
179 | ///
180 | /// # Example
181 | ///
182 | /// ```ignore
183 | /// // In an EventHandler...
184 | /// chaindexing::include_contract(&context, "UniswapV3Pool", &pool_contract_address)
185 | /// .await;
186 | /// // Includes a new UniswapV3Pool contract:{pool_contract_address} for indexing...
187 | /// ```
188 | pub async fn include_contract<'a, C: handlers::HandlerContext<'a>>(
189 | event_context: &C,
190 | contract_name: &str,
191 | address: &str,
192 | ) {
193 | let event = event_context.get_event();
194 | let chain_id = event.get_chain_id();
195 | let start_block_number = event.get_block_number();
196 |
197 | let contract_address =
198 | UnsavedContractAddress::new(contract_name, address, &chain_id, start_block_number);
199 |
200 | ChaindexingRepo::create_contract_address(event_context.get_client(), &contract_address).await;
201 | }
202 |
203 | async fn wait_for_non_leader_nodes_to_abort(node_election_rate_ms: u64) {
204 | time::sleep(Duration::from_millis(node_election_rate_ms)).await;
205 | }
206 |
207 | fn get_tasks_runner(
208 | config: &Config,
209 | ) -> impl NodeTasksRunner + '_ {
210 | struct ChaindexingNodeTasksRunner<'a, S: Send + Sync + Clone + Debug + 'static> {
211 | config: &'a Config,
212 | }
213 | #[crate::augmenting_std::async_trait]
214 | impl<'a, S: Send + Sync + Clone + Debug + 'static> NodeTasksRunner
215 | for ChaindexingNodeTasksRunner<'a, S>
216 | {
217 | async fn run(&self) -> Vec {
218 | let ingester = ingester::start(self.config).await;
219 | let handlers = handlers::start(self.config).await;
220 |
221 | vec![ingester, handlers]
222 | }
223 | }
224 | ChaindexingNodeTasksRunner { config }
225 | }
226 |
227 | pub mod prelude {
228 | pub use crate::augmenting_std::{async_trait, serde};
229 | pub use crate::chains::{Chain, ChainId};
230 | pub use crate::config::{Config, OptimizationConfig};
231 | pub use crate::contracts::{Contract, ContractAddress, EventAbi};
232 | pub use crate::events::{Event, EventParam};
233 | pub use crate::handlers::{
234 | PureHandler as EventHandler, PureHandlerContext as EventContext, SideEffectHandler,
235 | SideEffectHandlerContext as SideEffectContext,
236 | };
237 | pub use crate::nodes::NodeHeartbeat as Heartbeat;
238 | pub use crate::states::{
239 | ChainState, ContractState, Filters, MultiChainState, StateMigrations, Updates,
240 | };
241 | pub use crate::Address;
242 | pub use ethers::types::{I256, U256};
243 | }
244 |
--------------------------------------------------------------------------------
/chaindexing/src/nodes.rs:
--------------------------------------------------------------------------------
1 | /// Nodes are Chaindexing instances in a distributed environment
2 | /// Responsible for managing the core tasks of each node including
3 | /// keeping each node alive programmatically, resolving
4 | /// indexing configuration conflicts, etc.
5 | mod node;
6 | mod node_heartbeat;
7 | mod node_task;
8 | mod node_tasks;
9 | mod node_tasks_runner;
10 |
11 | pub use node::Node;
12 | pub use node_heartbeat::NodeHeartbeat;
13 | pub use node_task::NodeTask;
14 | pub use node_tasks::NodeTasks;
15 | pub use node_tasks_runner::NodeTasksRunner;
16 |
17 | pub const DEFAULT_MAX_CONCURRENT_NODE_COUNT: u16 = 50;
18 |
--------------------------------------------------------------------------------
/chaindexing/src/nodes/node.rs:
--------------------------------------------------------------------------------
1 | use diesel::{prelude::Insertable, Queryable};
2 | use serde::Deserialize;
3 | use std::fmt::Debug;
4 |
5 | use crate::diesel::schema::chaindexing_nodes;
6 |
7 | #[derive(Debug, Deserialize, Clone, PartialEq, Eq, Insertable, Queryable)]
8 | #[diesel(table_name = chaindexing_nodes)]
9 | pub struct Node {
10 | pub id: i32,
11 | last_active_at: i64,
12 | inserted_at: i64,
13 | }
14 |
15 | impl Node {
16 | pub fn get_min_active_at_in_secs(node_election_rate_ms: u64) -> i64 {
17 | let now_ms = chrono::Utc::now().timestamp_millis();
18 |
19 | // Not active if not kept active at least 2 elections away
20 | (now_ms - (2 * node_election_rate_ms) as i64) / 1_000
21 | }
22 |
23 | pub fn is_leader(&self, leader: &Node) -> bool {
24 | self.id == leader.id
25 | }
26 | }
27 |
28 | pub fn elect_leader(nodes: &[Node]) -> &Node {
29 | let mut nodes_iter = nodes.iter();
30 | let mut leader: Option<&Node> = nodes_iter.next();
31 |
32 | for node in nodes_iter {
33 | if node.inserted_at > leader.unwrap().inserted_at {
34 | leader = Some(node);
35 | }
36 | }
37 |
38 | leader.unwrap()
39 | }
40 |
--------------------------------------------------------------------------------
/chaindexing/src/nodes/node_heartbeat.rs:
--------------------------------------------------------------------------------
1 | use chrono::Utc;
2 | use std::fmt::Debug;
3 | use std::sync::Arc;
4 | use tokio::sync::Mutex;
5 |
6 | /// A chaindexing node's heartbeat.
7 | /// In a distributed environment, this is useful for managing the indexer's
8 | /// processes manually. A popular motivation to do is to reduce RPC's cost.
9 | #[derive(Clone, Debug)]
10 | pub struct NodeHeartbeat {
11 | /// Both in milliseconds
12 | last_keep_alive_at: Arc>,
13 | active_grace_period: u32,
14 | }
15 |
16 | impl NodeHeartbeat {
17 | /// * `active_grace_period_ms` - how long should the Node wait
18 | /// till it goes inactive
19 | pub fn new(active_grace_period_ms: u32) -> Self {
20 | Self {
21 | last_keep_alive_at: Arc::new(Mutex::new(Self::now())),
22 | active_grace_period: active_grace_period_ms,
23 | }
24 | }
25 | /// Keeps your chaindexing node alive
26 | pub async fn keep_alive(&self) {
27 | let mut last_keep_alive_at = self.last_keep_alive_at.lock().await;
28 | *last_keep_alive_at = Self::now();
29 | }
30 |
31 | fn now() -> u64 {
32 | Utc::now().timestamp_millis() as u64
33 | }
34 |
35 | pub async fn is_stale(&self) -> bool {
36 | !self.is_recent().await
37 | }
38 | pub async fn is_recent(&self) -> bool {
39 | let last_keep_alive_at = *self.last_keep_alive_at.lock().await;
40 | let min_last_at = Self::now() - (self.active_grace_period as u64);
41 |
42 | last_keep_alive_at > min_last_at
43 | }
44 | }
45 |
--------------------------------------------------------------------------------
/chaindexing/src/nodes/node_task.rs:
--------------------------------------------------------------------------------
1 | use std::sync::Arc;
2 | use tokio::sync::Mutex;
3 |
4 | #[derive(Clone, Debug)]
5 | pub struct NodeTask {
6 | subtasks: Arc>>>,
7 | }
8 |
9 | impl Default for NodeTask {
10 | fn default() -> Self {
11 | Self::new()
12 | }
13 | }
14 |
15 | impl NodeTask {
16 | pub fn new() -> Self {
17 | NodeTask {
18 | subtasks: Arc::new(Mutex::new(Vec::new())),
19 | }
20 | }
21 | pub async fn add_subtask(&self, task: tokio::task::JoinHandle<()>) {
22 | let mut subtasks = self.subtasks.lock().await;
23 | subtasks.push(task);
24 | }
25 | pub async fn stop(&self) {
26 | let subtasks = self.subtasks.lock().await;
27 | for subtask in subtasks.iter() {
28 | subtask.abort();
29 | }
30 | }
31 | }
32 |
--------------------------------------------------------------------------------
/chaindexing/src/nodes/node_tasks.rs:
--------------------------------------------------------------------------------
1 | use crate::OptimizationConfig;
2 |
3 | use chrono::Utc;
4 | use std::fmt::Debug;
5 |
6 | use super::node::{self, Node};
7 | use super::node_tasks_runner::NodeTasksRunner;
8 | use super::NodeTask;
9 |
10 | #[derive(PartialEq, Debug)]
11 | enum NodeTasksState {
12 | /// Initial state of tasks are Idle.
13 | /// In this state, no NodeTask is running because nothing has happened yet.
14 | Idle,
15 | /// All NodeTasks are running when Active.
16 | Active,
17 | /// All NodeTasks are NOT running.
18 | /// However, when there is a recent KeepNodeActiveRequest, they get reactivated.
19 | InActive,
20 | /// All NodeTasks are NOT running.
21 | /// If there is a recent KeepNodeActiveRequest, it stays aborted.
22 | /// Only non-leader Nodes self-abort.
23 | Aborted,
24 | }
25 |
26 | pub struct NodeTasks<'a> {
27 | current_node: &'a Node,
28 | state: NodeTasksState,
29 | tasks: Vec,
30 | started_at_in_secs: u64,
31 | /// Not used currently. In V2, We will populate NodeTasksErrors here
32 | pub errors: Vec,
33 | }
34 |
35 | impl<'a> NodeTasks<'a> {
36 | pub fn new(current_node: &'a Node) -> Self {
37 | Self {
38 | current_node,
39 | state: NodeTasksState::Idle,
40 | started_at_in_secs: Self::now_in_secs(),
41 | tasks: vec![],
42 | errors: vec![],
43 | }
44 | }
45 |
46 | pub async fn orchestrate(
47 | &mut self,
48 | optimization_config: &Option,
49 | active_nodes: &[Node],
50 | tasks_runner: &impl NodeTasksRunner,
51 | ) {
52 | let leader_node = node::elect_leader(active_nodes);
53 |
54 | if self.current_node.is_leader(leader_node) {
55 | match self.state {
56 | NodeTasksState::Idle | NodeTasksState::Aborted => {
57 | self.make_active(tasks_runner).await;
58 | }
59 |
60 | NodeTasksState::Active => {
61 | if let Some(OptimizationConfig {
62 | node_heartbeat,
63 | start_after_in_secs,
64 | }) = optimization_config
65 | {
66 | if node_heartbeat.is_stale().await
67 | && self.started_n_seconds_ago(*start_after_in_secs)
68 | {
69 | self.make_inactive().await;
70 | }
71 | }
72 | }
73 |
74 | NodeTasksState::InActive => {
75 | if let Some(OptimizationConfig { node_heartbeat, .. }) = optimization_config {
76 | if node_heartbeat.is_recent().await {
77 | self.make_active(tasks_runner).await;
78 | }
79 | }
80 | }
81 | }
82 | } else if self.state == NodeTasksState::Active {
83 | self.abort().await;
84 | }
85 | }
86 |
87 | async fn make_active(&mut self, tasks_runner: &impl NodeTasksRunner) {
88 | self.tasks = tasks_runner.run().await;
89 | self.state = NodeTasksState::Active;
90 | }
91 | async fn make_inactive(&mut self) {
92 | self.stop().await;
93 | self.state = NodeTasksState::InActive;
94 | }
95 | async fn abort(&mut self) {
96 | self.stop().await;
97 | self.state = NodeTasksState::Aborted;
98 | }
99 | async fn stop(&mut self) {
100 | for task in &self.tasks {
101 | task.stop().await;
102 | }
103 | }
104 |
105 | pub fn started_n_seconds_ago(&self, n_seconds: u64) -> bool {
106 | Self::now_in_secs() - self.started_at_in_secs >= n_seconds
107 | }
108 |
109 | fn now_in_secs() -> u64 {
110 | Utc::now().timestamp() as u64
111 | }
112 | }
113 |
--------------------------------------------------------------------------------
/chaindexing/src/nodes/node_tasks_runner.rs:
--------------------------------------------------------------------------------
1 | use super::node_task::NodeTask;
2 |
3 | #[crate::augmenting_std::async_trait]
4 | pub trait NodeTasksRunner {
5 | async fn run(&self) -> Vec;
6 | }
7 |
--------------------------------------------------------------------------------
/chaindexing/src/pruning.rs:
--------------------------------------------------------------------------------
1 | #[derive(Clone, Debug)]
2 | pub struct PruningConfig {
3 | /// Retains events inserted within the max age specified
4 | /// below. Unit in seconds.
5 | pub prune_n_blocks_away: u64,
6 | /// Advnace option for how often stale data gets pruned.
7 | /// Unit in seconds.
8 | pub prune_interval: u64,
9 | }
10 |
11 | impl Default for PruningConfig {
12 | fn default() -> Self {
13 | Self {
14 | prune_n_blocks_away: 30 * 1_000, // Blocks in the last 30 days ish
15 | prune_interval: 30 * 24 * 60 * 60, // 30 days,
16 | }
17 | }
18 | }
19 |
20 | impl PruningConfig {
21 | pub fn get_min_block_number(&self, current_block_number: u64) -> u64 {
22 | if current_block_number < self.prune_n_blocks_away {
23 | current_block_number
24 | } else {
25 | current_block_number - self.prune_n_blocks_away
26 | }
27 | }
28 | }
29 |
--------------------------------------------------------------------------------
/chaindexing/src/repos.rs:
--------------------------------------------------------------------------------
1 | #[cfg(feature = "postgres")]
2 | mod postgres_repo;
3 |
4 | #[doc(hidden)]
5 | #[cfg(feature = "postgres")]
6 | pub use postgres_repo::{
7 | Conn as PostgresRepoConn, Pool as PostgresRepoPool, PostgresRepo, PostgresRepoAsyncConnection,
8 | PostgresRepoClient, PostgresRepoTxnClient,
9 | };
10 |
11 | mod repo;
12 |
13 | #[doc(hidden)]
14 | pub use repo::{ExecutesWithRawQuery, HasRawQueryClient, Repo, RepoError};
15 |
16 | #[doc(hidden)]
17 | pub(crate) use repo::{LoadsDataWithRawQuery, Migratable, RepoMigrations, SQLikeMigrations};
18 |
19 | #[doc(hidden)]
20 | pub mod streams;
21 |
--------------------------------------------------------------------------------
/chaindexing/src/repos/postgres_repo.rs:
--------------------------------------------------------------------------------
1 | mod migrations;
2 | mod raw_queries;
3 |
4 | use crate::chain_reorg::UnsavedReorgedBlock;
5 |
6 | use crate::{contracts::ContractAddress, events::Event, nodes::Node};
7 | use diesel_async::RunQueryDsl;
8 |
9 | use diesel::{
10 | delete,
11 | result::{DatabaseErrorKind, Error as DieselError},
12 | ExpressionMethods, QueryDsl,
13 | };
14 | use diesel_async::{pooled_connection::AsyncDieselConnectionManager, AsyncPgConnection};
15 | use futures_core::future::BoxFuture;
16 | use uuid::Uuid;
17 |
18 | use super::repo::{Repo, RepoError};
19 |
20 | pub type Conn<'a> = bb8::PooledConnection<'a, AsyncDieselConnectionManager>;
21 | pub type Pool = bb8::Pool>;
22 |
23 | pub use diesel_async::{
24 | scoped_futures::ScopedFutureExt as PostgresRepoTransactionExt,
25 | AsyncConnection as PostgresRepoAsyncConnection,
26 | };
27 |
28 | pub use raw_queries::{PostgresRepoClient, PostgresRepoTxnClient};
29 |
30 | impl From for RepoError {
31 | fn from(value: DieselError) -> Self {
32 | match value {
33 | DieselError::DatabaseError(DatabaseErrorKind::ClosedConnection, _info) => {
34 | RepoError::NotConnected
35 | }
36 | any_other_error => RepoError::Unknown(any_other_error.to_string()),
37 | }
38 | }
39 | }
40 |
41 | /// Repo for Postgres databases
42 | #[derive(Clone, Debug)]
43 | pub struct PostgresRepo {
44 | url: String,
45 | }
46 |
47 | type PgPooledConn<'a> = bb8::PooledConnection<'a, AsyncDieselConnectionManager>;
48 |
49 | impl PostgresRepo {
50 | pub fn new(url: &str) -> Self {
51 | Self {
52 | url: url.to_string(),
53 | }
54 | }
55 | }
56 |
57 | #[crate::augmenting_std::async_trait]
58 | impl Repo for PostgresRepo {
59 | type Conn<'a> = PgPooledConn<'a>;
60 | type Pool = bb8::Pool>;
61 |
62 | async fn get_pool(&self, max_size: u32) -> Pool {
63 | let manager = AsyncDieselConnectionManager::::new(&self.url);
64 |
65 | bb8::Pool::builder().max_size(max_size).build(manager).await.unwrap()
66 | }
67 |
68 | async fn get_conn<'a>(pool: &'a Pool) -> Conn<'a> {
69 | pool.get().await.unwrap()
70 | }
71 |
72 | async fn run_in_transaction<'a, F>(conn: &mut Conn<'a>, repo_ops: F) -> Result<(), RepoError>
73 | where
74 | F: for<'b> FnOnce(&'b mut Conn<'a>) -> BoxFuture<'b, Result<(), RepoError>>
75 | + Send
76 | + Sync
77 | + 'a,
78 | {
79 | conn.transaction::<(), RepoError, _>(|transaction_conn| {
80 | async move { (repo_ops)(transaction_conn).await }.scope_boxed()
81 | })
82 | .await
83 | }
84 |
85 | async fn create_events<'a>(conn: &mut Conn<'a>, events: &[Event]) {
86 | use crate::diesel::schema::chaindexing_events::dsl::*;
87 |
88 | diesel::insert_into(chaindexing_events)
89 | .values(events)
90 | .execute(conn)
91 | .await
92 | .unwrap();
93 | }
94 | async fn get_all_events<'a>(conn: &mut Conn<'a>) -> Vec {
95 | use crate::diesel::schema::chaindexing_events::dsl::*;
96 |
97 | chaindexing_events.load(conn).await.unwrap()
98 | }
99 | async fn get_events<'a>(
100 | conn: &mut Self::Conn<'a>,
101 | address: String,
102 | from: u64,
103 | to: u64,
104 | ) -> Vec {
105 | use crate::diesel::schema::chaindexing_events::dsl::*;
106 |
107 | chaindexing_events
108 | .filter(contract_address.eq(address.to_lowercase()))
109 | .filter(block_number.between(from as i64, to as i64))
110 | .load(conn)
111 | .await
112 | .unwrap()
113 | }
114 | async fn delete_events_by_ids<'a>(conn: &mut Self::Conn<'a>, ids: &[Uuid]) {
115 | use crate::diesel::schema::chaindexing_events::dsl::*;
116 |
117 | delete(chaindexing_events).filter(id.eq_any(ids)).execute(conn).await.unwrap();
118 | }
119 |
120 | async fn update_next_block_number_to_ingest_from<'a>(
121 | conn: &mut Self::Conn<'a>,
122 | contract_address: &ContractAddress,
123 | block_number: i64,
124 | ) {
125 | use crate::diesel::schema::chaindexing_contract_addresses::dsl::*;
126 |
127 | diesel::update(chaindexing_contract_addresses)
128 | .filter(id.eq(contract_address.id))
129 | .set(next_block_number_to_ingest_from.eq(block_number))
130 | .execute(conn)
131 | .await
132 | .unwrap();
133 | }
134 |
135 | async fn create_reorged_block<'a>(
136 | conn: &mut Self::Conn<'a>,
137 | reorged_block: &UnsavedReorgedBlock,
138 | ) {
139 | use crate::diesel::schema::chaindexing_reorged_blocks::dsl::*;
140 |
141 | diesel::insert_into(chaindexing_reorged_blocks)
142 | .values(reorged_block)
143 | .execute(conn)
144 | .await
145 | .unwrap();
146 | }
147 |
148 | async fn get_active_nodes<'a>(
149 | conn: &mut Self::Conn<'a>,
150 | node_election_rate_ms: u64,
151 | ) -> Vec {
152 | use crate::diesel::schema::chaindexing_nodes::dsl::*;
153 |
154 | chaindexing_nodes
155 | .filter(last_active_at.gt(Node::get_min_active_at_in_secs(node_election_rate_ms)))
156 | .load(conn)
157 | .await
158 | .unwrap()
159 | }
160 | async fn keep_node_active<'a>(conn: &mut Self::Conn<'a>, node: &Node) {
161 | use crate::diesel::schema::chaindexing_nodes::dsl::*;
162 |
163 | let now = chrono::offset::Utc::now().timestamp();
164 |
165 | diesel::update(chaindexing_nodes)
166 | .filter(id.eq(node.id))
167 | .set(last_active_at.eq(now))
168 | .execute(conn)
169 | .await
170 | .unwrap();
171 | }
172 | }
173 |
--------------------------------------------------------------------------------
/chaindexing/src/repos/postgres_repo/migrations.rs:
--------------------------------------------------------------------------------
1 | use crate::{Migratable, PostgresRepo, RepoMigrations, SQLikeMigrations};
2 |
3 | impl RepoMigrations for PostgresRepo {
4 | fn create_nodes_migration() -> &'static [&'static str] {
5 | SQLikeMigrations::create_nodes()
6 | }
7 |
8 | fn create_contract_addresses_migration() -> &'static [&'static str] {
9 | SQLikeMigrations::create_contract_addresses()
10 | }
11 | fn restart_ingest_and_handlers_next_block_numbers_migration() -> &'static [&'static str] {
12 | SQLikeMigrations::restart_ingest_and_handlers_next_block_numbers()
13 | }
14 | fn zero_next_block_number_for_side_effects_migration() -> &'static [&'static str] {
15 | SQLikeMigrations::zero_next_block_number_for_side_effects()
16 | }
17 |
18 | fn create_events_migration() -> &'static [&'static str] {
19 | SQLikeMigrations::create_events()
20 | }
21 | fn drop_events_migration() -> &'static [&'static str] {
22 | SQLikeMigrations::drop_events()
23 | }
24 |
25 | fn create_reorged_blocks_migration() -> &'static [&'static str] {
26 | SQLikeMigrations::create_reorged_blocks()
27 | }
28 | fn drop_reorged_blocks_migration() -> &'static [&'static str] {
29 | SQLikeMigrations::drop_reorged_blocks()
30 | }
31 |
32 | fn create_root_states_migration() -> &'static [&'static str] {
33 | SQLikeMigrations::create_root_states()
34 | }
35 | }
36 |
37 | impl Migratable for PostgresRepo {}
38 |
--------------------------------------------------------------------------------
/chaindexing/src/repos/repo.rs:
--------------------------------------------------------------------------------
1 | use derive_more::Display;
2 | use std::fmt::Debug;
3 | use uuid::Uuid;
4 |
5 | use futures_core::future::BoxFuture;
6 | use serde::de::DeserializeOwned;
7 |
8 | use crate::chain_reorg::{ReorgedBlock, UnsavedReorgedBlock};
9 | use crate::root;
10 | use crate::{
11 | contracts::UnsavedContractAddress,
12 | events::{Event, PartialEvent},
13 | nodes::Node,
14 | ContractAddress,
15 | };
16 |
17 | /// Errors from interacting the configured SQL database
18 | #[derive(Debug, Display)]
19 | pub enum RepoError {
20 | NotConnected,
21 | Unknown(String),
22 | }
23 |
24 | #[crate::augmenting_std::async_trait]
25 | pub trait Repo:
26 | Sync + Send + Migratable + ExecutesWithRawQuery + LoadsDataWithRawQuery + Clone + Debug
27 | {
28 | type Pool;
29 | type Conn<'a>;
30 |
31 | async fn get_pool(&self, max_size: u32) -> Self::Pool;
32 | async fn get_conn<'a>(pool: &'a Self::Pool) -> Self::Conn<'a>;
33 |
34 | async fn run_in_transaction<'a, F>(
35 | conn: &mut Self::Conn<'a>,
36 | repo_ops: F,
37 | ) -> Result<(), RepoError>
38 | where
39 | F: for<'b> FnOnce(&'b mut Self::Conn<'a>) -> BoxFuture<'b, Result<(), RepoError>>
40 | + Send
41 | + Sync
42 | + 'a;
43 |
44 | async fn create_events<'a>(conn: &mut Self::Conn<'a>, events: &[Event]);
45 | async fn get_all_events<'a>(conn: &mut Self::Conn<'a>) -> Vec;
46 | async fn get_events<'a>(
47 | conn: &mut Self::Conn<'a>,
48 | address: String,
49 | from: u64,
50 | to: u64,
51 | ) -> Vec;
52 | async fn delete_events_by_ids<'a>(conn: &mut Self::Conn<'a>, ids: &[Uuid]);
53 |
54 | async fn update_next_block_number_to_ingest_from<'a>(
55 | conn: &mut Self::Conn<'a>,
56 | contract_address: &ContractAddress,
57 | block_number: i64,
58 | );
59 |
60 | async fn create_reorged_block<'a>(
61 | conn: &mut Self::Conn<'a>,
62 | reorged_block: &UnsavedReorgedBlock,
63 | );
64 |
65 | async fn get_active_nodes<'a>(
66 | conn: &mut Self::Conn<'a>,
67 | node_election_rate_ms: u64,
68 | ) -> Vec;
69 | async fn keep_node_active<'a>(conn: &mut Self::Conn<'a>, node: &Node);
70 | }
71 |
72 | #[crate::augmenting_std::async_trait]
73 | pub trait HasRawQueryClient {
74 | type RawQueryClient: Send + Sync;
75 | type RawQueryTxnClient<'a>: Send + Sync;
76 |
77 | async fn get_client(&self) -> Self::RawQueryClient;
78 | async fn get_txn_client<'a>(
79 | client: &'a mut Self::RawQueryClient,
80 | ) -> Self::RawQueryTxnClient<'a>;
81 | }
82 |
83 | #[crate::augmenting_std::async_trait]
84 | pub trait ExecutesWithRawQuery: HasRawQueryClient {
85 | async fn execute(client: &Self::RawQueryClient, query: &str);
86 | async fn execute_in_txn<'a>(client: &Self::RawQueryTxnClient<'a>, query: &str);
87 | async fn commit_txns<'a>(client: Self::RawQueryTxnClient<'a>);
88 |
89 | async fn create_contract_address<'a>(
90 | client: &Self::RawQueryTxnClient<'a>,
91 | contract_address: &UnsavedContractAddress,
92 | );
93 |
94 | async fn create_contract_addresses(
95 | client: &Self::RawQueryClient,
96 | contract_addresses: &[UnsavedContractAddress],
97 | );
98 |
99 | async fn update_next_block_number_to_handle_from<'a>(
100 | client: &Self::RawQueryTxnClient<'a>,
101 | address: &str,
102 | chain_id: u64,
103 | block_number: u64,
104 | );
105 |
106 | async fn update_next_block_numbers_to_handle_from<'a>(
107 | client: &Self::RawQueryTxnClient<'a>,
108 | chain_id: u64,
109 | block_number: u64,
110 | );
111 |
112 | async fn update_next_block_number_for_side_effects<'a>(
113 | client: &Self::RawQueryTxnClient<'a>,
114 | address: &str,
115 | chain_id: u64,
116 | block_number: u64,
117 | );
118 |
119 | async fn update_reorged_blocks_as_handled<'a>(
120 | client: &Self::RawQueryTxnClient<'a>,
121 | reorged_block_ids: &[i32],
122 | );
123 |
124 | async fn append_root_state(client: &Self::RawQueryClient, new_root_state: &root::State);
125 | async fn prune_events(client: &Self::RawQueryClient, min_block_number: u64, chain_id: u64);
126 | async fn prune_nodes(client: &Self::RawQueryClient, retain_size: u16);
127 | async fn prune_root_states(client: &Self::RawQueryClient, retain_size: u64);
128 | }
129 |
130 | #[crate::augmenting_std::async_trait]
131 | pub trait LoadsDataWithRawQuery: HasRawQueryClient {
132 | async fn create_and_load_new_node(client: &Self::RawQueryClient) -> Node;
133 | async fn load_last_root_state(client: &Self::RawQueryClient) -> Option;
134 | async fn load_latest_events(
135 | client: &Self::RawQueryClient,
136 | addresses: &[String],
137 | ) -> Vec;
138 | async fn load_unhandled_reorged_blocks(client: &Self::RawQueryClient) -> Vec;
139 |
140 | async fn load_events(
141 | client: &Self::RawQueryClient,
142 | chain_id: u64,
143 | contract_address: &str,
144 | from_block_number: u64,
145 | limit: u64,
146 | ) -> Vec;
147 |
148 | async fn load_data(
149 | client: &Self::RawQueryClient,
150 | query: &str,
151 | ) -> Option;
152 | async fn load_data_in_txn<'a, Data: Send + DeserializeOwned>(
153 | client: &Self::RawQueryTxnClient<'a>,
154 | query: &str,
155 | ) -> Option;
156 | async fn load_data_list(
157 | conn: &Self::RawQueryClient,
158 | query: &str,
159 | ) -> Vec;
160 | async fn load_data_list_in_txn<'a, Data: Send + DeserializeOwned>(
161 | conn: &Self::RawQueryTxnClient<'a>,
162 | query: &str,
163 | ) -> Vec;
164 | }
165 |
166 | pub trait RepoMigrations: Migratable {
167 | fn create_root_states_migration() -> &'static [&'static str];
168 |
169 | fn create_nodes_migration() -> &'static [&'static str];
170 |
171 | fn create_contract_addresses_migration() -> &'static [&'static str];
172 | fn restart_ingest_and_handlers_next_block_numbers_migration() -> &'static [&'static str];
173 | fn zero_next_block_number_for_side_effects_migration() -> &'static [&'static str];
174 |
175 | fn create_events_migration() -> &'static [&'static str];
176 | fn drop_events_migration() -> &'static [&'static str];
177 |
178 | fn create_reorged_blocks_migration() -> &'static [&'static str];
179 | fn drop_reorged_blocks_migration() -> &'static [&'static str];
180 |
181 | fn get_internal_migrations() -> Vec<&'static str> {
182 | [
183 | Self::create_events_migration(),
184 | Self::create_reorged_blocks_migration(),
185 | ]
186 | .concat()
187 | }
188 |
189 | fn get_reset_internal_migrations() -> Vec<&'static str> {
190 | [
191 | Self::drop_events_migration(),
192 | Self::drop_reorged_blocks_migration(),
193 | Self::restart_ingest_and_handlers_next_block_numbers_migration(),
194 | ]
195 | .concat()
196 | }
197 | }
198 |
199 | #[crate::augmenting_std::async_trait]
200 | pub trait Migratable: ExecutesWithRawQuery + Sync + Send {
201 | async fn migrate(client: &Self::RawQueryClient, migrations: Vec + Send + Sync>)
202 | where
203 | Self: Sized,
204 | {
205 | for migration in migrations {
206 | Self::execute(client, migration.as_ref()).await;
207 | }
208 | }
209 | }
210 |
211 | pub struct SQLikeMigrations;
212 |
213 | impl SQLikeMigrations {
214 | pub fn create_root_states() -> &'static [&'static str] {
215 | &["CREATE TABLE IF NOT EXISTS chaindexing_root_states (
216 | id BIGSERIAL PRIMARY KEY,
217 | reset_count BIGINT NOT NULL,
218 | reset_including_side_effects_count BIGINT NOT NULL
219 | )"]
220 | }
221 |
222 | pub fn create_nodes() -> &'static [&'static str] {
223 | &["CREATE TABLE IF NOT EXISTS chaindexing_nodes (
224 | id SERIAL PRIMARY KEY,
225 | last_active_at BIGINT DEFAULT EXTRACT(EPOCH FROM CURRENT_TIMESTAMP)::BIGINT,
226 | inserted_at BIGINT DEFAULT EXTRACT(EPOCH FROM CURRENT_TIMESTAMP)::BIGINT
227 | )"]
228 | }
229 |
230 | pub fn create_contract_addresses() -> &'static [&'static str] {
231 | &[
232 | "CREATE TABLE IF NOT EXISTS chaindexing_contract_addresses (
233 | id BIGSERIAL PRIMARY KEY,
234 | address VARCHAR NOT NULL,
235 | contract_name VARCHAR NOT NULL,
236 | chain_id BIGINT NOT NULL,
237 | start_block_number BIGINT NOT NULL,
238 | next_block_number_to_ingest_from BIGINT NOT NULL,
239 | next_block_number_to_handle_from BIGINT NOT NULL,
240 | next_block_number_for_side_effects BIGINT DEFAULT 0
241 | )",
242 | "CREATE UNIQUE INDEX IF NOT EXISTS chaindexing_contract_addresses_chain_address_index
243 | ON chaindexing_contract_addresses(chain_id, address)",
244 | ]
245 | }
246 | pub fn restart_ingest_and_handlers_next_block_numbers() -> &'static [&'static str] {
247 | &["UPDATE chaindexing_contract_addresses
248 | SET next_block_number_to_handle_from = start_block_number, next_block_number_to_ingest_from = start_block_number"]
249 | }
250 | pub fn zero_next_block_number_for_side_effects() -> &'static [&'static str] {
251 | &["UPDATE chaindexing_contract_addresses SET next_block_number_for_side_effects = 0"]
252 | }
253 |
254 | pub fn create_events() -> &'static [&'static str] {
255 | &[
256 | "CREATE TABLE IF NOT EXISTS chaindexing_events (
257 | id uuid PRIMARY KEY,
258 | chain_id BIGINT NOT NULL,
259 | contract_address VARCHAR NOT NULL,
260 | contract_name VARCHAR NOT NULL,
261 | abi TEXT NOT NULL,
262 | parameters JSON NOT NULL,
263 | topics JSON NOT NULL,
264 | block_hash VARCHAR NOT NULL,
265 | block_number BIGINT NOT NULL,
266 | block_timestamp BIGINT NOT NULL,
267 | transaction_hash VARCHAR NOT NULL,
268 | transaction_index INTEGER NOT NULL,
269 | log_index INTEGER NOT NULL,
270 | removed BOOLEAN NOT NULL,
271 | inserted_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
272 | )",
273 | "CREATE INDEX IF NOT EXISTS chaindexing_events_chain_contract_block_log_index
274 | ON chaindexing_events(chain_id,contract_address,block_number,log_index)",
275 | "CREATE INDEX IF NOT EXISTS chaindexing_events_abi
276 | ON chaindexing_events(abi)",
277 | ]
278 | }
279 | pub fn drop_events() -> &'static [&'static str] {
280 | &["DROP TABLE IF EXISTS chaindexing_events"]
281 | }
282 |
283 | pub fn create_reorged_blocks() -> &'static [&'static str] {
284 | &["CREATE TABLE IF NOT EXISTS chaindexing_reorged_blocks (
285 | id SERIAL PRIMARY KEY,
286 | chain_id BIGINT NOT NULL,
287 | block_number BIGINT NOT NULL,
288 | handled_at BIGINT,
289 | inserted_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
290 | )"]
291 | }
292 | pub fn drop_reorged_blocks() -> &'static [&'static str] {
293 | &["DROP TABLE IF EXISTS chaindexing_reorged_blocks"]
294 | }
295 | }
296 |
--------------------------------------------------------------------------------
/chaindexing/src/repos/streams.rs:
--------------------------------------------------------------------------------
1 | // TODO: Rewrite after migrating to tokio-postgres
2 |
3 | use std::{
4 | future::Future,
5 | pin::Pin,
6 | sync::Arc,
7 | task::{Context, Poll},
8 | };
9 |
10 | use futures_util::Stream;
11 | use pin_project_lite::pin_project;
12 |
13 | use futures_util::FutureExt;
14 | use serde::Deserialize;
15 | use tokio::sync::Mutex;
16 |
17 | use crate::{ChaindexingRepo, ChaindexingRepoClient, ContractAddress, LoadsDataWithRawQuery};
18 |
19 | type DataStream = Vec;
20 |
21 | enum ContractAddressesStreamState {
22 | GetFromAndTo,
23 | PollFromAndToFuture(Pin + Send>>),
24 | GetDataStreamFuture((i64, i64)),
25 | PollDataStreamFuture((Pin + Send>>, i64, i64)),
26 | }
27 |
28 | pin_project!(
29 | pub struct ContractAddressesStream {
30 | chain_id_: i64,
31 | from: Option,
32 | to: Option,
33 | chunk_size: i64,
34 | client: Arc>,
35 | state: ContractAddressesStreamState,
36 | }
37 | );
38 |
39 | impl ContractAddressesStream {
40 | pub fn new(client: &Arc>, chain_id_: i64) -> Self {
41 | Self {
42 | chain_id_,
43 | from: None,
44 | to: None,
45 | chunk_size: 500,
46 | client: client.clone(),
47 | state: ContractAddressesStreamState::GetFromAndTo,
48 | }
49 | }
50 | pub fn with_chunk_size(mut self, chunk_size: i64) -> Self {
51 | self.chunk_size = chunk_size;
52 | self
53 | }
54 | }
55 |
56 | impl Stream for ContractAddressesStream {
57 | type Item = DataStream;
58 |
59 | fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> {
60 | let this = self.project();
61 | let chain_id_ = *this.chain_id_;
62 | let from = *this.from;
63 | let to = *this.to;
64 |
65 | match this.state {
66 | ContractAddressesStreamState::GetFromAndTo => {
67 | let client = this.client.clone();
68 |
69 | *this.state = ContractAddressesStreamState::PollFromAndToFuture(
70 | async move {
71 | let client = client.lock().await;
72 |
73 | #[derive(Deserialize)]
74 | struct MinOrMax {
75 | min: Option,
76 | max: Option,
77 | }
78 |
79 | let from = match from {
80 | Some(from) => from,
81 | None => {
82 | let query = format!(
83 | "
84 | SELECT MIN(id) FROM chaindexing_contract_addresses
85 | WHERE chain_id = {chain_id_}"
86 | );
87 |
88 | let min_or_max: Option =
89 | ChaindexingRepo::load_data(&client, &query).await;
90 |
91 | min_or_max.and_then(|mm| mm.min).unwrap_or(0)
92 | }
93 | };
94 |
95 | let to = match to {
96 | Some(to) => to,
97 | None => {
98 | let query = format!(
99 | "
100 | SELECT MAX(id) FROM chaindexing_contract_addresses
101 | WHERE chain_id = {chain_id_}"
102 | );
103 |
104 | let min_or_max: Option =
105 | ChaindexingRepo::load_data(&client, &query).await;
106 |
107 | min_or_max.and_then(|mm| mm.max).unwrap_or(0)
108 | }
109 | };
110 |
111 | (from, to)
112 | }
113 | .boxed(),
114 | );
115 |
116 | cx.waker().wake_by_ref();
117 | Poll::Pending
118 | }
119 | ContractAddressesStreamState::PollFromAndToFuture(from_and_to_future) => {
120 | let (from, to): (i64, i64) =
121 | futures_util::ready!(from_and_to_future.as_mut().poll(cx));
122 |
123 | *this.state = ContractAddressesStreamState::GetDataStreamFuture((from, to));
124 |
125 | cx.waker().wake_by_ref();
126 |
127 | Poll::Pending
128 | }
129 | ContractAddressesStreamState::GetDataStreamFuture((from, to)) => {
130 | let client = this.client.clone();
131 | let from = *from;
132 | let to = *to;
133 |
134 | if from > to {
135 | Poll::Ready(None)
136 | } else {
137 | let chunk_limit = from + *this.chunk_size;
138 |
139 | let data_stream_future = async move {
140 | let client = client.lock().await;
141 |
142 | let query = format!(
143 | "
144 | SELECT * FROM chaindexing_contract_addresses
145 | WHERE chain_id = {chain_id_} AND id BETWEEN {from} AND {chunk_limit}
146 | "
147 | );
148 |
149 | let addresses: Vec =
150 | ChaindexingRepo::load_data_list(&client, &query).await;
151 |
152 | addresses
153 | }
154 | .boxed();
155 |
156 | *this.state = ContractAddressesStreamState::PollDataStreamFuture((
157 | data_stream_future,
158 | chunk_limit,
159 | to,
160 | ));
161 |
162 | cx.waker().wake_by_ref();
163 |
164 | Poll::Pending
165 | }
166 | }
167 | ContractAddressesStreamState::PollDataStreamFuture((
168 | data_stream_future,
169 | next_from,
170 | to,
171 | )) => {
172 | let streamed_data = futures_util::ready!(data_stream_future.as_mut().poll(cx));
173 |
174 | *this.state = ContractAddressesStreamState::GetDataStreamFuture((*next_from, *to));
175 |
176 | cx.waker().wake_by_ref();
177 |
178 | Poll::Ready(Some(streamed_data))
179 | }
180 | }
181 | }
182 | }
183 |
--------------------------------------------------------------------------------
/chaindexing/src/root.rs:
--------------------------------------------------------------------------------
1 | pub mod states {
2 | use serde::Deserialize;
3 |
4 | pub const MAX_COUNT: u64 = 1_000;
5 |
6 | #[derive(Clone, Deserialize)]
7 | pub struct State {
8 | pub reset_count: u64,
9 | pub reset_including_side_effects_count: u64,
10 | }
11 |
12 | impl Default for State {
13 | fn default() -> Self {
14 | Self::new()
15 | }
16 | }
17 |
18 | impl State {
19 | pub fn new() -> Self {
20 | Self {
21 | reset_count: 0,
22 | reset_including_side_effects_count: 0,
23 | }
24 | }
25 |
26 | pub fn update_reset_count(&mut self, count: u64) {
27 | self.reset_count = count;
28 | }
29 | pub fn update_reset_including_side_effects_count(&mut self, count: u64) {
30 | self.reset_including_side_effects_count = count;
31 | }
32 | }
33 | }
34 |
35 | pub use states::State;
36 |
--------------------------------------------------------------------------------
/chaindexing/src/states.rs:
--------------------------------------------------------------------------------
1 | //! # States
2 | //! Any struct that can be serialized and deserialized while implementing
3 | //! any state type, such as ContractState, ChainState, MultiChainState etc.
4 | //! is a valid Chaindexing State
5 | //!
6 | //! ## Example
7 | //!
8 | //! ```rust,no_run
9 | //! use chaindexing::states::{ContractState, StateMigrations};
10 | //! use serde::{Deserialize, Serialize};
11 |
12 | //! #[derive(Clone, Debug, Serialize, Deserialize)]
13 | //! pub struct Nft {
14 | //! pub token_id: u32,
15 | //! pub owner_address: String,
16 | //! }
17 |
18 | //! impl ContractState for Nft {
19 | //! fn table_name() -> &'static str {
20 | //! "nfts"
21 | //! }
22 | //! }
23 |
24 | //! pub struct NftMigrations;
25 |
26 | //! impl StateMigrations for NftMigrations {
27 | //! fn migrations(&self) -> &'static [&'static str] {
28 | //! &["CREATE TABLE IF NOT EXISTS nfts (
29 | //! token_id INTEGER NOT NULL,
30 | //! owner_address TEXT NOT NULL
31 | //! )"]
32 | //! }
33 | //! }
34 | //! ```
35 | pub use migrations::StateMigrations;
36 |
37 | use std::collections::HashMap;
38 | use std::sync::Arc;
39 |
40 | mod migrations;
41 | mod state_versions;
42 | mod state_views;
43 |
44 | mod chain_state;
45 | mod contract_state;
46 | mod filters;
47 | mod multi_chain_state;
48 | mod state;
49 | mod updates;
50 |
51 | pub use filters::Filters;
52 | pub use updates::Updates;
53 |
54 | use crate::{
55 | ChaindexingRepo, ChaindexingRepoClient, ChaindexingRepoTxnClient, ExecutesWithRawQuery,
56 | };
57 |
58 | pub use chain_state::ChainState;
59 | pub use contract_state::ContractState;
60 | pub use multi_chain_state::MultiChainState;
61 |
62 | use state_versions::{StateVersion, StateVersions, STATE_VERSIONS_TABLE_PREFIX};
63 | use state_views::StateViews;
64 |
65 | pub(crate) async fn backtrack_states<'a>(
66 | table_names: &Vec,
67 | chain_id: i64,
68 | block_number: i64,
69 | client: &ChaindexingRepoTxnClient<'a>,
70 | ) {
71 | for table_name in table_names {
72 | let state_versions = StateVersions::get(block_number, chain_id, table_name, client).await;
73 |
74 | let state_version_ids = StateVersions::get_ids(&state_versions);
75 | StateVersions::delete_by_ids(&state_version_ids, table_name, client).await;
76 |
77 | let state_version_group_ids = StateVersions::get_group_ids(&state_versions);
78 | StateViews::refresh(&state_version_group_ids, table_name, client).await;
79 | }
80 | }
81 |
82 | pub(crate) async fn prune_state_versions(
83 | table_names: &Vec,
84 | client: &ChaindexingRepoClient,
85 | min_block_number: u64,
86 | chain_id: u64,
87 | ) {
88 | for table_name in table_names {
89 | let state_version_table_name = StateVersion::table_name(table_name);
90 |
91 | ChaindexingRepo::execute(
92 | client,
93 | &format!(
94 | "
95 | DELETE FROM {state_version_table_name}
96 | WHERE block_number < {min_block_number}
97 | AND chain_id = {chain_id}
98 | "
99 | ),
100 | )
101 | .await;
102 | }
103 | }
104 |
105 | pub(crate) fn get_all_table_names(state_migrations: &[Arc]) -> Vec {
106 | state_migrations
107 | .iter()
108 | .flat_map(|state_migration| state_migration.get_table_names())
109 | .collect()
110 | }
111 |
112 | pub(crate) fn to_columns_and_values(state: &HashMap) -> (Vec, Vec) {
113 | state.iter().fold(
114 | (vec![], vec![]),
115 | |(mut columns, mut values), (column, value)| {
116 | columns.push(column.to_string());
117 | values.push(format!("'{value}'"));
118 |
119 | (columns, values)
120 | },
121 | )
122 | }
123 |
124 | pub(crate) fn to_and_filters(
125 | state: &HashMap,
126 | ) -> String {
127 | let filters = state.iter().fold(vec![], |mut filters, (column, value)| {
128 | let column = column.to_string();
129 | let value = value.to_string();
130 | filters.push(format!("{column} = '{value}'"));
131 |
132 | filters
133 | });
134 |
135 | filters.join(" AND ")
136 | }
137 |
138 | pub(crate) fn serde_map_to_string_map(
139 | serde_map: &HashMap, serde_json::Value>,
140 | ) -> HashMap {
141 | serde_map.iter().fold(HashMap::new(), |mut map, (key, value)| {
142 | if !value.is_null() {
143 | if value.is_object() {
144 | map.insert(key.as_ref().to_owned(), value.to_string());
145 | } else {
146 | map.insert(key.as_ref().to_owned(), value.to_string().replace('\"', ""));
147 | }
148 | }
149 |
150 | map
151 | })
152 | }
153 |
--------------------------------------------------------------------------------
/chaindexing/src/states/chain_state.rs:
--------------------------------------------------------------------------------
1 | use std::collections::HashMap;
2 | use std::fmt::Debug;
3 |
4 | use crate::handlers::{HandlerContext, PureHandlerContext};
5 | use crate::{ChaindexingRepoTxnClient, Event};
6 |
7 | use super::filters::Filters;
8 | use super::state;
9 | use super::state::read_many;
10 | use super::state_versions::StateVersion;
11 | use super::state_views::StateView;
12 | use super::updates::Updates;
13 | use serde::de::DeserializeOwned;
14 | use serde::Serialize;
15 |
16 | /// States derived from different contracts within a chain
17 | #[crate::augmenting_std::async_trait]
18 | pub trait ChainState: DeserializeOwned + Serialize + Clone + Debug + Sync + Send + 'static {
19 | /// Table of the state as specified in StateMigrations
20 | fn table_name() -> &'static str;
21 |
22 | /// Inserts state in the state's table
23 | async fn create<'a, 'b>(&self, context: &PureHandlerContext<'a, 'b>) {
24 | state::create(Self::table_name(), &state::to_view(self), context).await;
25 | }
26 |
27 | /// Returns a single state matching filters. Panics if there are multiple.
28 | async fn read_one<'a, C: HandlerContext<'a>>(filters: &Filters, context: &C) -> Option {
29 | Self::read_many(filters, context).await.first().cloned()
30 | }
31 |
32 | /// Returns states matching filters
33 | async fn read_many<'a, C: HandlerContext<'a>>(filters: &Filters, context: &C) -> Vec {
34 | read_many(filters, context, Self::table_name()).await
35 | }
36 |
37 | /// Updates state with the specified updates
38 | async fn update<'a, 'b>(&self, updates: &Updates, context: &PureHandlerContext<'a, 'b>) {
39 | let event = &context.event;
40 | let client = context.repo_client;
41 |
42 | let table_name = Self::table_name();
43 | let state_view = self.to_complete_view(table_name, client, event).await;
44 |
45 | let latest_state_version =
46 | StateVersion::update(&state_view, &updates.values, table_name, event, client).await;
47 | StateView::refresh(&latest_state_version, table_name, client).await;
48 | }
49 |
50 | /// Deletes state from the state's table
51 | async fn delete<'a, 'b>(&self, context: &PureHandlerContext<'a, 'b>) {
52 | let event = &context.event;
53 | let client = context.repo_client;
54 |
55 | let table_name = Self::table_name();
56 | let state_view = self.to_complete_view(table_name, client, event).await;
57 |
58 | let latest_state_version =
59 | StateVersion::delete(&state_view, table_name, event, client).await;
60 | StateView::refresh(&latest_state_version, table_name, client).await;
61 | }
62 |
63 | fn to_view(&self) -> HashMap {
64 | state::to_view(self)
65 | }
66 |
67 | async fn to_complete_view<'a>(
68 | &self,
69 | table_name: &str,
70 | client: &ChaindexingRepoTxnClient<'a>,
71 | event: &Event,
72 | ) -> HashMap {
73 | let mut state_view = self.to_view();
74 | state_view.insert("chain_id".to_string(), event.chain_id.to_string());
75 | StateView::get_complete(&self.to_view(), table_name, client).await
76 | }
77 | }
78 |
--------------------------------------------------------------------------------
/chaindexing/src/states/contract_state.rs:
--------------------------------------------------------------------------------
1 | use std::collections::HashMap;
2 | use std::fmt::Debug;
3 |
4 | use crate::handlers::{HandlerContext, PureHandlerContext};
5 | use crate::{ChaindexingRepoTxnClient, Event};
6 |
7 | use super::filters::Filters;
8 | use super::state;
9 | use super::state::read_many;
10 | use super::state_versions::StateVersion;
11 | use super::state_views::StateView;
12 | use super::updates::Updates;
13 | use serde::de::DeserializeOwned;
14 | use serde::Serialize;
15 |
16 | /// States derived from a contract
17 | #[crate::augmenting_std::async_trait]
18 | pub trait ContractState:
19 | DeserializeOwned + Serialize + Clone + Debug + Sync + Send + 'static
20 | {
21 | /// Table of the state as specified in StateMigrations
22 | fn table_name() -> &'static str;
23 |
24 | /// Inserts state in the state's table
25 | async fn create<'a, 'b>(&self, context: &PureHandlerContext<'a, 'b>) {
26 | state::create(Self::table_name(), &state::to_view(self), context).await;
27 | }
28 |
29 | /// Returns a single state matching filters. Panics if there are multiple.
30 | async fn read_one<'a, C: HandlerContext<'a>>(filters: &Filters, context: &C) -> Option {
31 | Self::read_many(filters, context).await.first().cloned()
32 | }
33 |
34 | /// Returns states matching filters
35 | async fn read_many<'a, C: HandlerContext<'a>>(filters: &Filters, context: &C) -> Vec {
36 | read_many(filters, context, Self::table_name()).await
37 | }
38 |
39 | /// Updates state with the specified updates
40 | async fn update<'a, 'b>(&self, updates: &Updates, context: &PureHandlerContext<'a, 'b>) {
41 | let event = &context.event;
42 | let client = context.repo_client;
43 |
44 | let table_name = Self::table_name();
45 | let state_view = self.to_complete_view(table_name, client, event).await;
46 |
47 | let latest_state_version =
48 | StateVersion::update(&state_view, &updates.values, table_name, event, client).await;
49 | StateView::refresh(&latest_state_version, table_name, client).await;
50 | }
51 |
52 | /// Deletes state from the state's table
53 | async fn delete<'a, 'b>(&self, context: &PureHandlerContext<'a, 'b>) {
54 | let event = &context.event;
55 | let client = context.repo_client;
56 |
57 | let table_name = Self::table_name();
58 | let state_view = self.to_complete_view(table_name, client, event).await;
59 |
60 | let latest_state_version =
61 | StateVersion::delete(&state_view, table_name, event, client).await;
62 | StateView::refresh(&latest_state_version, table_name, client).await;
63 | }
64 |
65 | fn to_view(&self) -> HashMap {
66 | state::to_view(self)
67 | }
68 |
69 | async fn to_complete_view<'a>(
70 | &self,
71 | table_name: &str,
72 | client: &ChaindexingRepoTxnClient<'a>,
73 | event: &Event,
74 | ) -> HashMap {
75 | let mut state_view = self.to_view();
76 | state_view.insert("chain_id".to_string(), event.chain_id.to_string());
77 | state_view.insert(
78 | "contract_address".to_string(),
79 | event.contract_address.to_owned(),
80 | );
81 | StateView::get_complete(&state_view, table_name, client).await
82 | }
83 | }
84 |
--------------------------------------------------------------------------------
/chaindexing/src/states/filters.rs:
--------------------------------------------------------------------------------
1 | use std::{collections::HashMap, fmt::Debug};
2 |
3 | use crate::Event;
4 |
5 | #[derive(Clone, Debug)]
6 | enum FiltersContext {
7 | Contract,
8 | Chain,
9 | MultiChain,
10 | }
11 |
12 | /// Represents a set of filters used for querying data.
13 | #[derive(Clone, Debug)]
14 | pub struct Filters {
15 | values: HashMap, // A map of filter field names to their values.
16 | context: FiltersContext, // The context in which the filters are applied.
17 | }
18 |
19 | impl Filters {
20 | /// Creates a new Filters instance with a single filter.
21 | ///
22 | /// # Arguments
23 | ///
24 | /// * `field` - The field name of the filter.
25 | /// * `value` - The value of the filter.
26 | ///
27 | /// # Example
28 | ///
29 | /// ```ignore
30 | /// let filters = Filters::new("token_id", token_id);
31 | /// Nft::read_one(&filters, &context);
32 | /// ```
33 | pub fn new(field: impl ToString, value: impl ToString) -> Self {
34 | Self {
35 | values: HashMap::from([(field.to_string(), value.to_string())]),
36 | context: FiltersContext::Contract,
37 | }
38 | }
39 |
40 | /// Adds a new filter to the existing set of filters by moving the
41 | /// original filters
42 | ///
43 | /// # Arguments
44 | ///
45 | /// * `field` - The field name of the filter.
46 | /// * `value` - The value of the filter.
47 | ///
48 | /// # Example
49 | ///
50 | /// ```ignore
51 | /// Filters::new("address", address).add("token_id", token_id); // filters is moved
52 | /// ```
53 | pub fn add(mut self, field: impl ToString, value: impl ToString) -> Self {
54 | self.add_mut(field, value);
55 | self
56 | }
57 |
58 | /// Adds a new filter to the existing set of filters without moving
59 | /// the original filters
60 | ///
61 | /// # Arguments
62 | ///
63 | /// * `field` - The field name of the filter.
64 | /// * `value` - The value of the filter.
65 | ///
66 | /// # Example
67 | ///
68 | /// ```ignore
69 | /// let mut filters = Filters::new("address", address);
70 | ///
71 | /// filters.add_mut("token_id", token_id); // filters not moved
72 | /// ```
73 | pub fn add_mut(&mut self, field: impl ToString, value: impl ToString) {
74 | self.values.insert(field.to_string(), value.to_string());
75 | }
76 |
77 | /// Sets the context of the filters to Contract
78 | pub fn within_contract(mut self) -> Self {
79 | self.context = FiltersContext::Contract;
80 | self
81 | }
82 |
83 | /// Sets the context of the filters to Chain
84 | pub fn within_chain(mut self) -> Self {
85 | self.context = FiltersContext::Chain;
86 | self
87 | }
88 |
89 | /// Sets the context of the filters to MultiChain
90 | pub fn within_multi_chain(mut self) -> Self {
91 | self.context = FiltersContext::MultiChain;
92 | self
93 | }
94 | pub(super) fn get(&self, event: &Event) -> HashMap {
95 | let mut filters = self.values.clone();
96 |
97 | match self.context {
98 | FiltersContext::Contract => {
99 | filters.insert("chain_id".to_string(), event.chain_id.to_string());
100 | filters.insert(
101 | "contract_address".to_string(),
102 | event.contract_address.to_owned(),
103 | );
104 | }
105 | FiltersContext::Chain => {
106 | filters.insert("chain_id".to_string(), event.chain_id.to_string());
107 | }
108 | FiltersContext::MultiChain => {}
109 | }
110 |
111 | filters
112 | }
113 | }
114 |
--------------------------------------------------------------------------------
/chaindexing/src/states/multi_chain_state.rs:
--------------------------------------------------------------------------------
1 | use std::collections::HashMap;
2 | use std::fmt::Debug;
3 |
4 | use crate::handlers::{HandlerContext, PureHandlerContext};
5 | use crate::ChaindexingRepoTxnClient;
6 |
7 | use super::filters::Filters;
8 | use super::state::{self, read_many};
9 | use super::state_versions::StateVersion;
10 | use super::state_views::StateView;
11 | use super::updates::Updates;
12 | use serde::de::DeserializeOwned;
13 | use serde::Serialize;
14 |
15 | /// States derived from different contracts across different chains
16 | /// N/B: Indexing MultiChainStates must be Order-Agnostic
17 | #[crate::augmenting_std::async_trait]
18 | pub trait MultiChainState:
19 | DeserializeOwned + Serialize + Clone + Debug + Sync + Send + 'static
20 | {
21 | /// Table of the state as specified in StateMigrations
22 | fn table_name() -> &'static str;
23 |
24 | /// Inserts state in the state's table
25 | async fn create<'a, 'b>(&self, context: &PureHandlerContext<'a, 'b>) {
26 | state::create(Self::table_name(), &state::to_view(self), context).await;
27 | }
28 |
29 | /// Returns a single state matching filters. Panics if there are multiple.
30 | async fn read_one<'a, C: HandlerContext<'a>>(filters: &Filters, context: &C) -> Option {
31 | Self::read_many(filters, context).await.first().cloned()
32 | }
33 |
34 | /// Returns states matching filters
35 | async fn read_many<'a, C: HandlerContext<'a>>(filters: &Filters, context: &C) -> Vec {
36 | read_many(filters, context, Self::table_name()).await
37 | }
38 |
39 | /// Updates state with the specified updates
40 | async fn update<'a, 'b>(&self, updates: &Updates, context: &PureHandlerContext<'a, 'b>) {
41 | let event = context.event.clone();
42 | let client = context.repo_client;
43 | let table_name = Self::table_name();
44 | let state_view = self.to_complete_view(table_name, client).await;
45 | let updates = updates.clone();
46 | let client = context.repo_client_for_mcs.clone();
47 |
48 | context
49 | .deferred_mutations_for_mcs
50 | .add(async move {
51 | let mut client = client.lock().await;
52 |
53 | let latest_state_version = StateVersion::update_without_txn(
54 | &state_view,
55 | &updates.values,
56 | table_name,
57 | &event,
58 | &mut client,
59 | )
60 | .await;
61 | StateView::refresh_without_txn(&latest_state_version, table_name, &client).await;
62 | })
63 | .await;
64 | }
65 |
66 | /// Deletes state from the state's table
67 | async fn delete<'a, 'b>(&self, context: &PureHandlerContext<'a, 'b>) {
68 | let event = context.event.clone();
69 | let client = context.repo_client;
70 | let table_name = Self::table_name();
71 | let state_view = self.to_complete_view(table_name, client).await;
72 | let client = context.repo_client_for_mcs.clone();
73 |
74 | context
75 | .deferred_mutations_for_mcs
76 | .add(async move {
77 | let client = client.lock().await;
78 |
79 | let latest_state_version =
80 | StateVersion::delete_without_txn(&state_view, table_name, &event, &client)
81 | .await;
82 | StateView::refresh_without_txn(&latest_state_version, table_name, &client).await;
83 | })
84 | .await;
85 | }
86 |
87 | fn to_view(&self) -> HashMap {
88 | state::to_view(self)
89 | }
90 |
91 | async fn to_complete_view<'a>(
92 | &self,
93 | table_name: &str,
94 | client: &ChaindexingRepoTxnClient<'a>,
95 | ) -> HashMap {
96 | StateView::get_complete(&self.to_view(), table_name, client).await
97 | }
98 | }
99 |
--------------------------------------------------------------------------------
/chaindexing/src/states/state.rs:
--------------------------------------------------------------------------------
1 | use std::collections::HashMap;
2 |
3 | use crate::handlers::{HandlerContext, PureHandlerContext};
4 | use crate::{ChaindexingRepo, LoadsDataWithRawQuery};
5 |
6 | use super::filters::Filters;
7 | use super::state_versions::StateVersion;
8 | use super::state_views::StateView;
9 | use super::{serde_map_to_string_map, to_and_filters};
10 | use serde::de::DeserializeOwned;
11 | use serde::Serialize;
12 |
13 | pub fn to_view(value: &T) -> HashMap
14 | where
15 | T: Serialize,
16 | {
17 | let state: serde_json::Value = serde_json::to_value(value).unwrap();
18 |
19 | let map: HashMap = serde_json::from_value(state).unwrap();
20 |
21 | serde_map_to_string_map(&map)
22 | }
23 |
24 | pub async fn read_many<'a, C: HandlerContext<'a>, T: Send + DeserializeOwned>(
25 | filters: &Filters,
26 | context: &C,
27 | table_name: &str,
28 | ) -> Vec {
29 | let client = context.get_client();
30 |
31 | let query = format!(
32 | "SELECT * FROM {table_name}
33 | WHERE {filters}",
34 | table_name = table_name,
35 | filters = to_and_filters(&filters.get(context.get_event())),
36 | );
37 |
38 | ChaindexingRepo::load_data_list_in_txn(client, &query).await
39 | }
40 |
41 | pub async fn create<'a, 'b>(
42 | table_name: &str,
43 | state_view: &HashMap,
44 | context: &PureHandlerContext<'a, 'b>,
45 | ) {
46 | let event = &context.event;
47 | let client = context.repo_client;
48 |
49 | let latest_state_version = StateVersion::create(state_view, table_name, event, client).await;
50 | StateView::refresh(&latest_state_version, table_name, client).await;
51 | }
52 |
--------------------------------------------------------------------------------
/chaindexing/src/states/state_versions.rs:
--------------------------------------------------------------------------------
1 | use std::collections::HashMap;
2 |
3 | use crate::{
4 | ChaindexingRepo, ChaindexingRepoTxnClient, ExecutesWithRawQuery, LoadsDataWithRawQuery,
5 | };
6 | use crate::{ChaindexingRepoClient, Event};
7 |
8 | use super::{serde_map_to_string_map, to_columns_and_values};
9 |
10 | pub const STATE_VERSIONS_TABLE_PREFIX: &str = "chaindexing_state_versions_for_";
11 | pub const STATE_VERSIONS_UNIQUE_FIELDS: [&str; 2] =
12 | ["state_version_id", "state_version_is_deleted"];
13 |
14 | pub struct StateVersions;
15 |
16 | impl StateVersions {
17 | pub async fn get<'a>(
18 | from_block_number: i64,
19 | chain_id: i64,
20 | state_table_name: &str,
21 | client: &ChaindexingRepoTxnClient<'a>,
22 | ) -> Vec> {
23 | let query = format!(
24 | "SELECT * FROM {table_name}
25 | WHERE chain_id = {chain_id}
26 | AND block_number >= {from_block_number}",
27 | table_name = StateVersion::table_name(state_table_name),
28 | );
29 |
30 | ChaindexingRepo::load_data_list_in_txn::>(client, &query)
31 | .await
32 | .iter()
33 | .map(serde_map_to_string_map)
34 | .collect()
35 | }
36 |
37 | pub fn get_ids(state_versions: &[HashMap]) -> Vec {
38 | state_versions
39 | .iter()
40 | .map(|state_version| state_version.get("state_version_id").unwrap())
41 | .cloned()
42 | .collect()
43 | }
44 |
45 | pub fn get_group_ids(state_versions: &[HashMap]) -> Vec {
46 | state_versions
47 | .iter()
48 | .map(|state_version| state_version.get("state_version_group_id").unwrap())
49 | .cloned()
50 | .collect()
51 | }
52 |
53 | pub async fn delete_by_ids<'a>(
54 | ids: &[String],
55 | state_table_name: &str,
56 | client: &ChaindexingRepoTxnClient<'a>,
57 | ) {
58 | let query = format!(
59 | "DELETE FROM {table_name}
60 | WHERE state_version_id IN ({ids})",
61 | table_name = StateVersion::table_name(state_table_name),
62 | ids = ids.join(",")
63 | );
64 |
65 | ChaindexingRepo::execute_in_txn(client, &query).await;
66 | }
67 |
68 | pub async fn get_latest<'a>(
69 | group_ids: &[String],
70 | state_table_name: &str,
71 | client: &ChaindexingRepoTxnClient<'a>,
72 | ) -> Vec> {
73 | let query = format!(
74 | "SELECT DISTINCT ON (state_version_group_id) * FROM {table_name}
75 | WHERE state_version_group_id IN ({group_ids})
76 | ORDER BY state_version_group_id, block_number, log_index DESC",
77 | table_name = StateVersion::table_name(state_table_name),
78 | group_ids = group_ids.iter().map(|id| format!("'{id}'")).collect::>().join(",")
79 | );
80 |
81 | ChaindexingRepo::load_data_list_in_txn::>(client, &query)
82 | .await
83 | .iter()
84 | .map(serde_map_to_string_map)
85 | .collect()
86 | }
87 | }
88 |
89 | pub struct StateVersion;
90 |
91 | impl StateVersion {
92 | pub fn table_name(state_table_name: &str) -> String {
93 | format!("{STATE_VERSIONS_TABLE_PREFIX}{state_table_name}")
94 | }
95 |
96 | pub fn was_deleted(state_version: &HashMap) -> bool {
97 | state_version.get("state_version_is_deleted").unwrap() == "true"
98 | }
99 |
100 | pub fn get_group_id(state_version: &HashMap) -> String {
101 | state_version.get("state_version_group_id").unwrap().to_owned()
102 | }
103 |
104 | pub async fn create<'a>(
105 | state: &HashMap,
106 | state_table_name: &str,
107 | event: &Event,
108 | client: &ChaindexingRepoTxnClient<'a>,
109 | ) -> HashMap {
110 | let mut state_version = state.clone();
111 | state_version.insert(
112 | "state_version_group_id".to_owned(),
113 | uuid::Uuid::new_v4().to_string(),
114 | );
115 |
116 | Self::append(&state_version, state_table_name, event, client).await
117 | }
118 |
119 | pub async fn update<'a>(
120 | state: &HashMap,
121 | updates: &HashMap,
122 | state_table_name: &str,
123 | event: &Event,
124 | client: &ChaindexingRepoTxnClient<'a>,
125 | ) -> HashMap {
126 | let mut state_version = state.clone();
127 | state_version.extend(updates.clone());
128 | Self::append(&state_version, state_table_name, event, client).await
129 | }
130 | pub async fn update_without_txn<'b>(
131 | state: &HashMap,
132 | updates: &HashMap,
133 | state_table_name: &str,
134 | event: &Event,
135 | client: &'b mut ChaindexingRepoClient,
136 | ) -> HashMap