├── .github └── workflows │ └── ci.yml ├── .gitignore ├── CHANGELOG.md ├── Cargo.toml ├── LICENSE-APACHE ├── LICENSE-MIT ├── README.md ├── examples ├── README.md ├── actions.rs ├── nft_indexer.rs ├── simple.rs ├── with_context.rs └── with_context_parent_tx_cache.rs ├── lake-context-derive ├── Cargo.toml ├── README.md └── src │ └── lib.rs ├── lake-framework ├── Cargo.toml ├── README.md ├── blocks │ └── 000000879765 │ │ ├── block.json │ │ └── shard_0.json └── src │ ├── lib.rs │ ├── s3_fetchers.rs │ ├── streamer.rs │ └── types.rs ├── lake-parent-transaction-cache ├── Cargo.toml ├── README.md └── src │ └── lib.rs ├── lake-primitives ├── Cargo.toml ├── README.md └── src │ ├── lib.rs │ └── types │ ├── actions.rs │ ├── block.rs │ ├── delegate_actions.rs │ ├── events.rs │ ├── impl_actions.rs │ ├── mod.rs │ ├── receipts.rs │ ├── state_changes.rs │ └── transactions.rs ├── release-plz.toml └── rust-toolchain /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: Rust 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | branches: 9 | - main 10 | 11 | env: 12 | CARGO_TERM_COLOR: always 13 | 14 | jobs: 15 | check: 16 | 17 | runs-on: ubuntu-latest 18 | 19 | steps: 20 | - uses: actions/checkout@v2 21 | - name: Run check 22 | run: cargo check 23 | 24 | test: 25 | 26 | runs-on: ubuntu-latest 27 | 28 | steps: 29 | - uses: actions/checkout@v2 30 | - name: Run check 31 | run: cargo test 32 | 33 | rustfmt: 34 | name: rustfmt 35 | runs-on: ubuntu-24.04 36 | steps: 37 | - name: Checkout repository 38 | uses: actions/checkout@v2 39 | - name: Install Rust 40 | uses: actions-rs/toolchain@v1 41 | with: 42 | toolchain: stable 43 | override: true 44 | profile: minimal 45 | components: rustfmt 46 | - name: Check formatting 47 | run: | 48 | cargo fmt -- --check 49 | 50 | rustclippy: 51 | name: rustclippy 52 | runs-on: ubuntu-24.04 53 | steps: 54 | - name: Checkout repository 55 | uses: actions/checkout@v2 56 | - name: Install Rust 57 | uses: actions-rs/toolchain@v1 58 | with: 59 | toolchain: stable 60 | override: true 61 | profile: minimal 62 | components: clippy 63 | - name: Clippy check 64 | run: | 65 | cargo clippy 66 | 67 | release-plz: 68 | name: release-plz 69 | runs-on: ubuntu-latest 70 | needs: [check, rustclippy, rustfmt, test] 71 | if: github.ref == 'refs/heads/main' # Specify the branch condition 72 | steps: 73 | - name: Checkout repository 74 | uses: actions/checkout@v3 75 | with: 76 | fetch-depth: 0 77 | token: ${{ secrets.CUSTOM_GITHUB_TOKEN }} 78 | - name: Install Rust toolchain 79 | uses: dtolnay/rust-toolchain@stable 80 | - name: Run release-plz 81 | uses: MarcoIeni/release-plz-action@v0.5 82 | env: 83 | # https://marcoieni.github.io/release-plz/github/trigger.html 84 | GITHUB_TOKEN: ${{ secrets.CUSTOM_GITHUB_TOKEN }} 85 | CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} 86 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | Cargo.lock 3 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 6 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 7 | 8 | ## [Unreleased](https://github.com/near/near-lake-framework/compare/v0.7.2...HEAD) 9 | - Simpler start boilerplate, simpler structures to deal with! 10 | - Upgrade to latest AWS SDK version (*since beta.3*) 11 | 12 | ### Breaking changes 13 | 14 | This version introduces a different much simplified concept of Lake Framework usage. Thus it brings breaking changes. 15 | 16 | We introduce `near-lake-primitives` crate with simplified primitive structures (e.g `Block`, `Transaction`, `StateChange`, etc.) which is heavily used by Lake Framework since now. 17 | 18 | And some other changes: 19 | 20 | - `LakeConfig` is renamed to be just `Lake`. It is done because since this update `Lake` is accepting the **indexing function** from a user and runs the streamer implicitly. Thus shortening and simplifying the start boilerplate to something like this: 21 | ```rust 22 | fn main() -> anyhow::Result<()> { 23 | // Lake Framework start boilerplate 24 | near_lake_framework::LakeBuilder::default() 25 | .mainnet() 26 | .start_block_height(80504433) 27 | .build()? 28 | .run(handle_block) // user-defined asynchronous function that handles each block 29 | } 30 | ``` 31 | 32 | Please note your main function isn't required to be asynchronous anymore! It is now handled by Lake Framework under the hood. 33 | 34 | ## [0.7.2](https://github.com/near/near-lake-framework/compare/v0.7.1...0.7.2) 35 | 36 | - Upgrade near primitives crate to `0.17.0` 37 | - Upgrade `tokio` version to the latest (`1.28.2`) 38 | 39 | ## [0.7.1](https://github.com/near/near-lake-framework/compare/v0.7.0...0.7.1) 40 | 41 | - Refactor `s3_fetchers` to allow testing 42 | - Fix `betanet` default region (the corresponding bucket is in different region) 43 | 44 | ## [0.7.0](https://github.com/near/near-lake-framework/compare/v0.6.1...0.7.0) 45 | 46 | - Add support for Meta Transactions [NEP-366](https://github.com/near/NEPs/blob/master/neps/nep-0366.md) by upgrading `near-indexer-primitives` to `0.16` 47 | - Add helper function for connecting to `betanet` lake 48 | 49 | ### Breaking change 50 | 51 | - `Delegate` action has been introduced in `near-primitives::views::ActionView`, this should be handled everywhere you are handling `ActionView` 52 | 53 | ## [0.6.1](https://github.com/near/near-lake-framework/compare/v0.6.0...0.6.1) 54 | 55 | - Fix of possible silent stops of the streamer (firing logs and returning errors where necessary) 56 | - Fix the issue the streamer was always 1 block behind 57 | - Renamed a few internal methods to reflect what they do 58 | - Added debug and error logs in a few places 59 | - Introduced a `LakeError` enum using `thiserror` (#42), but not exposing it yet to avoid breaking changes to the framework (for now, it will be done in `0.7.0`) 60 | - Added proper error handling in a few places 61 | - Updated the dependencies version of AWS crates 62 | 63 | ## [0.6.0](https://github.com/near/near-lake-framework/compare/v0.5.2...v0.6.0) 64 | 65 | - Upgrade underlying dependency `near-indexer-primitives` to versions between 0.15 and 0.16 66 | 67 | ### Breaking change 68 | 69 | `near-indexer-primitives` reflects some breaking changes in the data types. Some of the fields that were previously 70 | a base64-encoded String that now became raw `Vec`: 71 | 72 | - `views::ActionView::FunctionCall.args` 73 | - `views::QueryResponseKind::ViewState` 74 | - `views::ExecutionStatusView::SuccessValue` 75 | 76 | **Refer to this [`nearcore` commit](https://github.com/near/nearcore/commit/8e9be9fff4d520993c81b0e3738c0f223a9538c0) to find all the changes of this kind.** 77 | 78 | ## [0.5.2](https://github.com/near/near-lake-framework/compare/v0.5.1...v0.5.2) 79 | 80 | - Fixed the bug that caused a lag by 100 blocks that was introduced in 0.5.1 81 | 82 | ## [0.5.1](https://github.com/near/near-lake-framework/compare/v0.5.0...v0.5.1) 83 | 84 | - Avoid spiky latency with streaming block heights preload 85 | 86 | ## [0.5.0](https://github.com/near/near-lake-framework/compare/v0.4.1...v0.5.0) - 2022-06-16 87 | 88 | - Cleaned up unused depdendencies 89 | - Added the configuration option to control the size of the pool of 90 | preloaded blocks `blocks_preload_pool_size` (100 remains to be the default) 91 | - Update AWS dependencies to `0.13.0` 92 | 93 | ### Breaking change 94 | 95 | - Dropped the previously allowed way to instantiate LakeConfig by manually 96 | initializing the public fields in favor of 97 | [the builder pattern](https://docs.rs/near-lake-framework/0.4.1/near_lake_framework/struct.LakeConfigBuilder.html) 98 | 99 | ## [0.4.1](https://github.com/near/near-lake-framework/compare/v0.4.0...v0.4.1) - 2022-06-14 100 | 101 | - Bumped the minimum required version of `serde_json` to 1.0.75 to avoid 102 | confusing errors when `arbitrary_precision` feature is enabled. 103 | - Extended the list of supported near-primitives versions from 0.12.0 104 | to >=0.12.0,<0.15.0 to help downstream project avoid duplicate versions 105 | of near-primitives and its dependencies. 106 | - Reduced verbosity level of recoverable errors from `ERROR` to `WARN` 107 | 108 | ## [0.4.0](https://github.com/near/near-lake-framework/compare/v0.3.0...v0.4.0) - 2022-05-17 109 | 110 | - Remove calls to `.unwrap()` and `.expect()` within the stream sender that 111 | could panic. Instead, a `Result` is returned from the sender task. 112 | - Remove calls to `.unwrap()` and `.expect()` within `s3_fetchers` module 113 | 114 | ### Breaking change 115 | 116 | - The `streamer()` function now returns a tuple, with the first element being a 117 | `JoinHandle>` that you can use to gracefully capture any 118 | errors that occurred within the sender task. If you don't care about errors, 119 | you can easily adapt to this change by changing: 120 | ```rust 121 | let receiver = near_lake_framework::streamer(settings); 122 | ``` 123 | to this instead: 124 | ```rust 125 | let (_, receiver) = near_lake_framework::streamer(settings); 126 | ``` 127 | 128 | ## [0.3.0](https://github.com/near/near-lake-framework/compare/v0.2.0...v0.3.0) - 2022-05-10 129 | 130 | - Introduce `LakeConfigBuilder` for creating configs 131 | ```rust 132 | let config = LakeConfigBuilder.default() 133 | .testnet() 134 | .start_block_height(88220926) 135 | .build() 136 | .expect("Failed to build LakeConfig"); 137 | ``` 138 | - Now you can provide custom AWS SDK S3 `Config` 139 | ```rust 140 | use aws_sdk_s3::Endpoint; 141 | use http::Uri; 142 | use near_lake_framework::LakeConfigBuilder; 143 | 144 | let aws_config = aws_config::from_env().load().await; 145 | let mut s3_conf = aws_sdk_s3::config::Builder::from(&aws_config); 146 | s3_conf = s3_conf 147 | .endpoint_resolver( 148 | Endpoint::immutable("http://0.0.0.0:9000".parse::().unwrap())) 149 | .build(); 150 | 151 | let config = LakeConfigBuilder::default() 152 | .s3_config(s3_conf) 153 | .s3_bucket_name("near-lake-data-custom") 154 | .start_block_height(1) 155 | .build() 156 | .expect("Failed to build LakeConfig"); 157 | ``` 158 | 159 | ### Breaking change 160 | 161 | `LakeConfig` has a breaking change as we've removed `s3_endpoint` and added `s3_config`. Please, consider migrating to use `LakeConfigBuilder` instead of directly crafting the `Lakeconfig` 162 | 163 | [0.3.0]: https://github.com/near/near-lake-framework/releases/tag/v0.3.0 164 | 165 | ## [0.2.0] - 2022-04-25 166 | 167 | The first public release. See [announcement on NEAR Gov Forum](https://gov.near.org/t/announcement-near-lake-framework-brand-new-word-in-indexer-building-approach/17668) 168 | 169 | > Release Page: 170 | 171 | [0.2.0]: https://github.com/near/near-lake-framework/releases/tag/v0.2.0 172 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = [ 3 | "lake-framework", 4 | "lake-primitives", 5 | "lake-parent-transaction-cache", 6 | "lake-context-derive", 7 | ] 8 | 9 | # cargo-workspaces 10 | [workspace.package] 11 | version = "0.8.0-beta.4" 12 | license = "MIT OR Apache-2.0" 13 | repository = "https://github.com/near/near-lake-framework-rs" 14 | description = "Library to connect to the NEAR Lake S3 and stream the data" 15 | categories = ["asynchronous", "api-bindings", "network-programming"] 16 | keywords = ["near", "near-lake", "near-indexer"] 17 | authors = ["Near Inc "] 18 | rust-version = "1.69.0" 19 | -------------------------------------------------------------------------------- /LICENSE-APACHE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | -------------------------------------------------------------------------------- /LICENSE-MIT: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in 11 | all copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | THE SOFTWARE. 20 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # near-lake-framework-rs 2 | 3 | Available in programming languages: **Rust** | [Javascript](https://github.com/near/near-lake-framework-js) 4 | 5 | NEAR Lake Framework is a small library companion to [NEAR Lake](https://github.com/near/near-lake). It allows you to build 6 | your own indexer that subscribes to the stream of blocks from the NEAR Lake data source and create your own logic to process 7 | the NEAR Protocol data. 8 | 9 | [![crates.io](https://img.shields.io/crates/v/near-lake-framework?label=latest)](https://crates.io/crates/near-lake-framework) 10 | [![Documentation](https://docs.rs/near-lake-framework/badge.svg)](https://docs.rs/near-lake-framework) 11 | ![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/near-lake-framework.svg) 12 | 13 | --- 14 | 15 | [Official NEAR Lake Framework High-level update announcement](https://near.org/near/widget/PostPage?accountId=khorolets.near&blockHeight=93659695) made on NEAR.org. This post announces the release of the beta version of the NEAR Lake Framework 0.8.0. The post also includes an overview of the new approach and features from the High-level update. 16 | 17 | --- 18 | 19 | ## Example 20 | 21 | ```rust 22 | use futures::StreamExt; 23 | use near_lake_framework::LakeConfigBuilder; 24 | 25 | #[tokio::main] 26 | async fn main() -> Result<(), tokio::io::Error> { 27 | // create a NEAR Lake Framework config 28 | let config = LakeConfigBuilder::default() 29 | .testnet() 30 | .start_block_height(82422587) 31 | .build() 32 | .expect("Failed to build LakeConfig"); 33 | 34 | // instantiate the NEAR Lake Framework Stream 35 | let (sender, stream) = near_lake_framework::streamer(config); 36 | 37 | // read the stream events and pass them to a handler function with 38 | // concurrency 1 39 | let mut handlers = tokio_stream::wrappers::ReceiverStream::new(stream) 40 | .map(|streamer_message| handle_streamer_message(streamer_message)) 41 | .buffer_unordered(1usize); 42 | 43 | while let Some(_handle_message) = handlers.next().await {} 44 | drop(handlers); // close the channel so the sender will stop 45 | 46 | // propagate errors from the sender 47 | match sender.await { 48 | Ok(Ok(())) => Ok(()), 49 | Ok(Err(e)) => Err(e), 50 | Err(e) => Err(anyhow::Error::from(e)), // JoinError 51 | } 52 | } 53 | 54 | // The handler function to take the entire `StreamerMessage` 55 | // and print the block height and number of shards 56 | async fn handle_streamer_message( 57 | streamer_message: near_lake_framework::near_indexer_primitives::StreamerMessage, 58 | ) { 59 | eprintln!( 60 | "{} / shards {}", 61 | streamer_message.block.header.height, 62 | streamer_message.shards.len() 63 | ); 64 | } 65 | ``` 66 | 67 | For more information [refer to the docs](https://docs.rs/near-lake-framework) 68 | 69 | ### Tutorials 70 | 71 | - Video tutorial about [`near-examples/near-lake-accounts-watcher`](https://github.com/near-examples/near-lake-accounts-watcher) https://youtu.be/GsF7I93K-EQ 72 | - [Migrating to NEAR Lake Framework](https://near-indexers.io/tutorials/lake/migrating-to-near-lake-framework) from [NEAR Indexer Framework](https://near-indexers.io/docs/projects/near-indexer-framework) 73 | 74 | ### More examples 75 | 76 | We're keeping a set of examples in the [examples](./examples/) folder. The examples there are always up-to-date with the latest version of the NEAR Lake Framework. 77 | 78 | And here are some more examples. Despite the fact that they are not up-to-date with the latest version of the NEAR Lake Framework, they still can be used as a reference. Though, we try to keep them updated as well. 79 | 80 | - [`near-examples/near-lake-raw-printer`](https://github.com/near-examples/near-lake-raw-printer) simple example of a data printer built on top of NEAR Lake Framework 81 | - [`near-examples/near-lake-accounts-watcher`](https://github.com/near-examples/near-lake-accounts-watcher) another simple example of the indexer built on top of NEAR Lake Framework for a tutorial purpose 82 | - [`near-examples/indexer-tx-watcher-example-lake`](https://github.com/near-examples/indexer-tx-watcher-example-lake) an example of the indexer built on top of NEAR Lake Framework that watches for transactions related to specified account(s) 83 | - [`octopus-network/octopus-near-indexer-s3`](https://github.com/octopus-network/octopus-near-indexer-s3) a community-made project that uses NEAR Lake Framework 84 | 85 | ## How to use 86 | 87 | ### Dependencies 88 | 89 | Add the following dependencies to your `Cargo.toml` 90 | 91 | ```toml 92 | ... 93 | [dependencies] 94 | futures = "0.3.5" 95 | itertools = "0.10.3" 96 | tokio = { version = "1.1", features = ["sync", "time", "macros", "rt-multi-thread"] } 97 | tokio-stream = { version = "0.1" } 98 | 99 | # NEAR Lake Framework 100 | near-lake-framework = "0.6.1" 101 | ``` 102 | 103 | ## Cost estimates (Updated Mar 10, 2023 with more precise calculations) 104 | 105 | **TL;DR** approximately $20 per month (for AWS S3 access, paid directly to AWS) for the reading of fresh blocks 106 | 107 | ### Historical indexing 108 | 109 | | Blocks | GET | LIST | Subtotal GET | Subtotal LIST | Total $ | 110 | |---|---|---|---|---|---| 111 | | 1000 | 5000 | 4 | 0.00215 | 0.0000216 | $0.00 | 112 | | 86,400 | 432000 | 345.6 | 0.18576 | 0.00186624 | $0.19 | 113 | | 2,592,000 | 12960000 | 10368 | 5.5728 | 0.0559872 | $5.63 | 114 | | 77,021,059 | 385105295 | 308084.236 | 165.5952769 | 1.663654874 | $167.26 | 115 | 116 | **Note:** ~77m of blocks is the number of blocks on the moment I was calculating. 117 | 118 | **84,400 blocks is approximate number of blocks per day** (1 block per second * 60 seconds * 60 minutes * 24 hours) 119 | 120 | **2,592,000 blocks is approximate number of blocks per months** (86,400 blocks per day * 30 days) 121 | 122 | ### Tip of the network indexing 123 | 124 | | Blocks | GET | LIST | Subtotal GET | Subtotal LIST | Total $ | 125 | |---|---|---|---|---|---| 126 | | 1000 | 5000 | 1000 | 0.00215 | 0.0054 | $0.01 | 127 | | 86,400 | 432000 | 86,400 | 0.18576 | 0.46656 | $0.65 | 128 | | 2,592,000 | 12960000 | 2,592,000 | 5.5728 | 13.9968 | $19.57 | 129 | | 77,021,059 | 385105295 | 77,021,059 | 165.5952769 | 415.9137186 | $581.51 | 130 | 131 | Explanation: 132 | 133 | Assuming NEAR Protocol produces accurately 1 block per second (which is really not, the average block production time is 1.3s). A full day consists of 86400 seconds, that's the max number of blocks that can be produced. 134 | 135 | According the [Amazon S3 prices](https://aws.amazon.com/s3/pricing/?nc1=h_ls) `list` requests are charged for $0.0054 per 1000 requests and `get` is charged for $0.00043 per 1000 requests. 136 | 137 | Calculations (assuming we are following the tip of the network all the time): 138 | 139 | ``` 140 | 86400 blocks per day * 5 requests for each block / 1000 requests * $0.0004 per 1k requests = $0.19 * 30 days = $5.7 141 | ``` 142 | **Note:** 5 requests for each block means we have 4 shards (1 file for common block data and 4 separate files for each shard) 143 | 144 | And a number of `list` requests we need to perform for 30 days: 145 | 146 | ``` 147 | 86400 blocks per day / 1000 requests * $0.005 per 1k list requests = $0.47 * 30 days = $14.1 148 | 149 | $5.7 + $14.1 = $19.8 150 | ``` 151 | 152 | The price depends on the number of shards 153 | 154 | ## Future plans 155 | 156 | We use Milestones with clearly defined acceptance criteria: 157 | 158 | * [x] [MVP](https://github.com/near/near-lake-framework/milestone/1) 159 | * [ ] [0.8 High-level update](https://github.com/near/near-lake-framework-rs/milestone/3) 160 | * [ ] [1.0](https://github.com/near/near-lake-framework/milestone/2) 161 | -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | # NEAR Lake Framework Examples 2 | 3 | This directory contains several example scripts showcasing the usage of the NEAR Lake Framework. Each example demonstrates different aspects and features of the framework. Below is a brief description of each example: 4 | 5 | ## simple.rs 6 | 7 | A simple example of how to use the Lake Framework. This indexer will listen to the NEAR blockchain and print the block height of each block. 8 | 9 | ```bash 10 | $ cd lake-framework 11 | $ cargo run --example simple 12 | ``` 13 | 14 | ## actions.rs 15 | 16 | This example shows how to filter actions in a block. It is a more real-life example than the simple example. It is going to follow the NEAR Social contract and print all function calls to it. 17 | 18 | ```bash 19 | $ cd lake-framework 20 | $ cargo run --example actions 21 | ``` 22 | 23 | ## nft_indexer.rs 24 | 25 | This is a more complex real-life example of how to use the NEAR Lake Framework. 26 | 27 | It is going to follow the network and watch for the Events according to the [Events Format][1]. It will monitor for nft_mint events from the known marketplaces, such as Mintbase and Paras, and index them to print in the terminal. 28 | 29 | [1]: https://nomicon.io/Standards/EventsFormat 30 | 31 | ```bash 32 | $ cd lake-framework 33 | $ cargo run --example nft_indexer 34 | ``` 35 | 36 | ## with_context.rs 37 | 38 | This example show how to use a context with Lake Framework. It is going to follow the NEAR Social contract and the block height along with a number of calls to the contract. 39 | 40 | ```bash 41 | $ cd lake-framework 42 | $ cargo run --example with_context 43 | ``` 44 | 45 | ## with_context_parent_tx_cache.rs 46 | 47 | This example show how to use a context `ParentTransactionCache` with the Lake Framework. It is going to follow the NEAR Social contract and cache the parent Transaction for the Receipts. Thus we would be able to capture the Transaction where the change to the contract state has started. 48 | 49 | ```bash 50 | $ cd lake-parent-transaction-cache 51 | $ cargo run --example with_context_parent_tx_cache 52 | ``` -------------------------------------------------------------------------------- /examples/actions.rs: -------------------------------------------------------------------------------- 1 | //! This example shows how to filter actions in a block. 2 | //! It it a more real-life example than the simple example. 3 | //! It is going to follow the NEAR Social contract and print all function calls to it. 4 | use near_lake_framework::near_lake_primitives; 5 | // We need to import this trait to use the `as_function_call` method. 6 | use near_lake_primitives::actions::ActionMetaDataExt; 7 | 8 | const CONTRACT_ID: &str = "social.near"; 9 | 10 | fn main() -> anyhow::Result<()> { 11 | eprintln!("Starting..."); 12 | // Lake Framework start boilerplate 13 | near_lake_framework::LakeBuilder::default() 14 | .mainnet() 15 | .start_block_height(88444526) 16 | .build()? 17 | // developer-defined async function that handles each block 18 | .run(print_function_calls_to_my_account)?; 19 | Ok(()) 20 | } 21 | 22 | async fn print_function_calls_to_my_account( 23 | mut block: near_lake_primitives::block::Block, 24 | ) -> anyhow::Result<()> { 25 | let block_height = block.block_height(); 26 | let actions: Vec<&near_lake_primitives::actions::FunctionCall> = block 27 | .actions() 28 | .filter(|action| action.receiver_id().as_str() == CONTRACT_ID) 29 | .filter_map(|action| action.as_function_call()) 30 | .collect(); 31 | 32 | if !actions.is_empty() { 33 | println!("Block #{:?}\n{:#?}", block_height, actions); 34 | } 35 | 36 | Ok(()) 37 | } 38 | -------------------------------------------------------------------------------- /examples/nft_indexer.rs: -------------------------------------------------------------------------------- 1 | //! This is a more complex real-life example of how to use the NEAR Lake Framework. 2 | //! 3 | //! It is going to follow the network and watch for the Events according to the 4 | //! [Events Format][1]. It will monitor for nft_mint events from the known 5 | //! marketplaces, such as Mintbase and Paras, and index them to print in the terminal. 6 | //! 7 | //! [1]: https://nomicon.io/Standards/EventsFormat 8 | use near_lake_framework::near_lake_primitives; 9 | use regex::Regex; 10 | 11 | use once_cell::sync::Lazy; 12 | 13 | static MINTBASE_STORE_REGEXP: Lazy = 14 | Lazy::new(|| Regex::new(r"^*.mintbase\d+.near$").unwrap()); 15 | 16 | fn main() -> anyhow::Result<()> { 17 | eprintln!("Starting..."); 18 | // Lake Framework start boilerplate 19 | near_lake_framework::LakeBuilder::default() 20 | .testnet() 21 | .start_block_height(112205773) 22 | .build()? 23 | .run(handle_block)?; // developer-defined async function that handles each block 24 | Ok(()) 25 | } 26 | 27 | async fn handle_block(mut block: near_lake_primitives::block::Block) -> anyhow::Result<()> { 28 | // Indexing lines START 29 | let nfts: Vec = block 30 | .events() // fetching all the events that occurred in the block 31 | .filter(|event| event.standard() == "nep171") 32 | .filter(|event| event.event() == "nft_mint") // filter them by "nft_mint" event only 33 | .filter_map(|event| parse_event(event)) 34 | .collect(); 35 | // Indexing lines END 36 | 37 | if !nfts.is_empty() { 38 | println!("We caught freshly minted NFTs!\n{:#?}", nfts); 39 | } 40 | Ok(()) 41 | } 42 | 43 | // ================================================================ 44 | // The following lines define structures and methods that support 45 | // the goal of indexing NFT MINT events and printing links to newly 46 | // created NFTs. 47 | // These lines are not related to the NEAR Lake Framework. 48 | // This logic is developer-defined and tailored to their indexing needs. 49 | // ================================================================ 50 | 51 | /// Parses the given event to extract NFT data for known Marketplaces (Mintbase and Paras). 52 | /// 53 | /// The function parses the event data to extract the owner and link to the NFT, then filters out any 54 | /// Marketplaces or contracts that it doesn't know how to parse. The resulting NFT data is returned 55 | /// as an `Option`. Note that the logic used in this function is specific to the needs 56 | /// of this application and does not relate to the Lake Framework. 57 | /// 58 | /// # Arguments 59 | /// 60 | /// * `event` - The event to parse for NFT data. 61 | /// 62 | /// # Returns 63 | /// 64 | /// An `Option` containing the extracted NFT data, or `None` if the event data could not 65 | /// be parsed. 66 | fn parse_event(event: &near_lake_primitives::events::Event) -> Option { 67 | let marketplace = { 68 | if MINTBASE_STORE_REGEXP.is_match(event.related_receipt_receiver_id().as_str()) { 69 | Marketplace::Mintbase 70 | } else if event.related_receipt_receiver_id().as_str() == "x.paras.near" { 71 | Marketplace::Paras 72 | } else { 73 | Marketplace::Unknown 74 | } 75 | }; 76 | 77 | if let Some(event_data) = event.data() { 78 | if let Some(nfts) = marketplace 79 | .convert_event_data_to_nfts(event_data.clone(), event.related_receipt_receiver_id()) 80 | { 81 | Some(NFTReceipt { 82 | receipt_id: event.related_receipt_id().to_string(), 83 | marketplace_name: marketplace.name(), 84 | nfts, 85 | }) 86 | } else { 87 | None 88 | } 89 | } else { 90 | None 91 | } 92 | } 93 | 94 | enum Marketplace { 95 | Mintbase, 96 | Paras, 97 | Unknown, 98 | } 99 | 100 | impl Marketplace { 101 | fn name(&self) -> String { 102 | match self { 103 | Self::Mintbase => "Mintbase".to_string(), 104 | Self::Paras => "Paras".to_string(), 105 | Self::Unknown => "Unknown".to_string(), 106 | } 107 | } 108 | fn convert_event_data_to_nfts( 109 | &self, 110 | event_data: serde_json::Value, 111 | receiver_id: &near_lake_primitives::near_primitives::types::AccountId, 112 | ) -> Option> { 113 | match self { 114 | Self::Mintbase => Some(self.mintbase(event_data, receiver_id)), 115 | Self::Paras => Some(self.paras(event_data, receiver_id)), 116 | Self::Unknown => None, 117 | } 118 | } 119 | 120 | fn paras( 121 | &self, 122 | event_data: serde_json::Value, 123 | receiver_id: &near_lake_primitives::near_primitives::types::AccountId, 124 | ) -> Vec { 125 | let paras_event_data = serde_json::from_value::>(event_data) 126 | .expect("Failed to parse NftMintLog"); 127 | 128 | paras_event_data 129 | .iter() 130 | .map(|nft_mint_log| NFT { 131 | owner: nft_mint_log.owner_id.clone(), 132 | links: nft_mint_log 133 | .token_ids 134 | .iter() 135 | .map(|token_id| { 136 | format!( 137 | "https://paras.id/token/{}::{}/{}", 138 | receiver_id.to_string(), 139 | token_id.split(":").collect::>()[0], 140 | token_id, 141 | ) 142 | }) 143 | .collect(), 144 | }) 145 | .collect() 146 | } 147 | 148 | fn mintbase( 149 | &self, 150 | event_data: serde_json::Value, 151 | receiver_id: &near_lake_primitives::near_primitives::types::AccountId, 152 | ) -> Vec { 153 | let mintbase_event_data = serde_json::from_value::>(event_data) 154 | .expect("Failed to parse NftMintLog"); 155 | 156 | mintbase_event_data 157 | .iter() 158 | .map(|nft_mint_log| NFT { 159 | owner: nft_mint_log.owner_id.clone(), 160 | links: vec![format!( 161 | "https://mintbase.io/contract/{}/token/{}", 162 | receiver_id.to_string(), 163 | nft_mint_log.token_ids[0] 164 | )], 165 | }) 166 | .collect() 167 | } 168 | } 169 | 170 | // We are allowing the dead_code lint because not all fields of the structures are used 171 | // However, they are printed to the terminal for debugging purposes. 172 | #[allow(dead_code)] 173 | #[derive(Debug)] 174 | struct NFTReceipt { 175 | receipt_id: String, 176 | marketplace_name: String, 177 | nfts: Vec, 178 | } 179 | 180 | // We are allowing the dead_code lint because not all fields of the structures are used 181 | // However, they are printed to the terminal for debugging purposes. 182 | #[allow(dead_code)] 183 | #[derive(Debug)] 184 | struct NFT { 185 | owner: String, 186 | links: Vec, 187 | } 188 | 189 | #[derive(Debug, serde::Deserialize)] 190 | struct NftMintLog { 191 | owner_id: String, 192 | token_ids: Vec, 193 | // There is also a `memo` field, but it is not used in this example 194 | // memo: Option, 195 | } 196 | -------------------------------------------------------------------------------- /examples/simple.rs: -------------------------------------------------------------------------------- 1 | //! A simple example of how to use the Lake Framework 2 | //! This indexer will listen to the NEAR blockchain and print the block height of each block 3 | 4 | use near_lake_framework::near_lake_primitives; 5 | 6 | fn main() -> anyhow::Result<()> { 7 | eprintln!("Starting..."); 8 | // Lake Framework start boilerplate 9 | near_lake_framework::LakeBuilder::default() 10 | .testnet() 11 | .start_block_height(112205773) 12 | .build()? 13 | .run(handle_block)?; // developer-defined async function that handles each block 14 | Ok(()) 15 | } 16 | 17 | async fn handle_block(block: near_lake_primitives::block::Block) -> anyhow::Result<()> { 18 | println!("Block {:?}", block.block_height()); 19 | 20 | Ok(()) 21 | } 22 | -------------------------------------------------------------------------------- /examples/with_context.rs: -------------------------------------------------------------------------------- 1 | //! This example show how to use a context with Lake Framework. 2 | //! It is going to follow the NEAR Social contract and the block height along 3 | //! with a number of calls to the contract. 4 | use near_lake_framework::{near_lake_primitives, LakeContext}; 5 | use std::io::Write; 6 | // We need to import this trait to use the `as_function_call` method. 7 | use near_lake_primitives::actions::ActionMetaDataExt; 8 | 9 | const CONTRACT_ID: &str = "social.near"; 10 | 11 | // This is the context we're going to use. 12 | // Lake::run_with_context requires the context to implement the LakeContext trait. 13 | // That trait requires to implement two methods `execute_before_run` and `execute_after_run`. 14 | // However, we don't actually need them in our cause of using the context. 15 | // That's why we're using the derive macro to implement the trait for us. 16 | // The macro will generate the default implementation of the methods. Those methods are empty. 17 | // By doing so, we don't need to implement the trait manually and can use the context as is. 18 | #[derive(Clone, LakeContext)] 19 | struct FileContext { 20 | path: std::path::PathBuf, 21 | } 22 | 23 | impl FileContext { 24 | fn new(path: impl Into) -> Self { 25 | Self { path: path.into() } 26 | } 27 | 28 | // append to the file 29 | pub fn write(&self, value: &str) -> anyhow::Result<()> { 30 | let mut file = std::fs::OpenOptions::new() 31 | .create(true) 32 | .append(true) 33 | .open(&self.path)?; 34 | file.write_all(value.as_bytes())?; 35 | Ok(()) 36 | } 37 | } 38 | 39 | fn main() -> anyhow::Result<()> { 40 | println!("Starting..."); 41 | // Create the context 42 | let context = FileContext::new("./output.txt"); 43 | // Lake Framework start boilerplate 44 | near_lake_framework::LakeBuilder::default() 45 | .mainnet() 46 | .start_block_height(88444526) 47 | .build()? 48 | // developer-defined async function that handles each block 49 | .run_with_context(print_function_calls_to_my_account, &context)?; 50 | Ok(()) 51 | } 52 | 53 | async fn print_function_calls_to_my_account( 54 | mut block: near_lake_primitives::block::Block, 55 | ctx: &FileContext, 56 | ) -> anyhow::Result<()> { 57 | let block_height = block.block_height(); 58 | let actions: Vec<&near_lake_primitives::actions::FunctionCall> = block 59 | .actions() 60 | .filter(|action| action.receiver_id().as_str() == CONTRACT_ID) 61 | .filter_map(|action| action.as_function_call()) 62 | .collect(); 63 | 64 | if !actions.is_empty() { 65 | // Here's the usage of the context. 66 | ctx.write( 67 | format!( 68 | "Block #{} - {} calls to {}\n", 69 | block_height, 70 | actions.len(), 71 | CONTRACT_ID 72 | ) 73 | .as_str(), 74 | )?; 75 | println!("Block #{:?}\n{:#?}", block_height, actions); 76 | } 77 | 78 | Ok(()) 79 | } 80 | -------------------------------------------------------------------------------- /examples/with_context_parent_tx_cache.rs: -------------------------------------------------------------------------------- 1 | //! This example show how to use a context ParentTransactionCache with the Lake Framework. 2 | //! It is going to follow the NEAR Social contract and cache the parent Transaction for the Receipts. 3 | //! Thus we would be able to capture the Transaction where the change to the contract state has started. 4 | //! **WARNING**: ParentTransactionCache captures all the transactions in the block. 5 | //! That's why we filter it by only one account we're watching here. 6 | use near_lake_framework::near_lake_primitives; 7 | use near_lake_primitives::CryptoHash; 8 | // We need to import this trait to use the `as_function_call` method. 9 | use near_lake_parent_transaction_cache::{ParentTransactionCache, ParentTransactionCacheBuilder}; 10 | use near_lake_primitives::actions::ActionMetaDataExt; 11 | 12 | const CONTRACT_ID: &str = "social.near"; 13 | 14 | fn main() -> anyhow::Result<()> { 15 | println!("Starting..."); 16 | // Building the ParentTransactionCache context. 17 | // The way of instantiation of the context depends on the implementation developers choose. 18 | // ParentTransactionCache follows the Builder pattern. 19 | // This will create the context with the default size of the cache (100_000) 20 | // and a filter for the account we're watching. 21 | // It will omit caching all the transactions that are not related to the account. 22 | let parent_transaction_cache_ctx = ParentTransactionCacheBuilder::default() 23 | .for_account(String::from(CONTRACT_ID).try_into()?) 24 | .build()?; 25 | // Lake Framework start boilerplate 26 | near_lake_framework::LakeBuilder::default() 27 | .mainnet() 28 | .start_block_height(88444526) 29 | .build()? 30 | // developer-defined async function that handles each block 31 | .run_with_context(print_function_call_tx_hash, &parent_transaction_cache_ctx)?; 32 | Ok(()) 33 | } 34 | 35 | async fn print_function_call_tx_hash( 36 | mut block: near_lake_primitives::block::Block, 37 | ctx: &ParentTransactionCache, 38 | ) -> anyhow::Result<()> { 39 | // Cache has been updated before this function is called. 40 | let block_height = block.block_height(); 41 | let actions: Vec<( 42 | &near_lake_primitives::actions::FunctionCall, 43 | Option, 44 | )> = block 45 | .actions() 46 | .filter(|action| action.receiver_id().as_str() == CONTRACT_ID) 47 | .filter_map(|action| action.as_function_call()) 48 | .map(|action| { 49 | ( 50 | action, 51 | ctx.get_parent_transaction_hash(&action.receipt_id()), 52 | ) 53 | }) 54 | .collect(); 55 | 56 | if !actions.is_empty() { 57 | // Here's the usage of the context. 58 | println!("Block #{:?}\n{:#?}", block_height, actions); 59 | } 60 | 61 | Ok(()) 62 | } 63 | -------------------------------------------------------------------------------- /lake-context-derive/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "near-lake-context-derive" 3 | description = "Derive macro for LakeContext" 4 | edition = "2021" 5 | version.workspace = true 6 | license.workspace = true 7 | repository.workspace = true 8 | 9 | [lib] 10 | proc-macro = true 11 | 12 | [dependencies] 13 | syn = "2.0" 14 | quote = "1.0" 15 | -------------------------------------------------------------------------------- /lake-context-derive/README.md: -------------------------------------------------------------------------------- 1 | # NEAR Lake Context Derive 2 | 3 | Lake Context Derive is a Rust crate that provides a derive macro for easy and convenient implementation of the `near_lake_framework::LakeContextExt` trait. This trait has two functions: `execute_before_run` and `execute_after_run` that are executed before and after the user-provided indexer function respectively. 4 | 5 | ## Usage 6 | 7 | The Lake Context Derive macro can be utilized by annotating the context struct with `#[derive(LakeContext)]`. This trait implementation will then facilitate the combination of different contexts. For instance, to use a `ParentTransactionCache` with some additional data, one would define a context like: 8 | 9 | ```ignore 10 | use near_lake_parent_transaction_cache::ParentTransactionCache; 11 | 12 | #[derive(LakeContext)] 13 | struct MyContext { 14 | db_connection_string: String, 15 | parent_tx_cache: ParentTransactionCache, 16 | } 17 | ``` 18 | 19 | ### Instantiation 20 | 21 | You can create an instance of your context as follows: 22 | 23 | ```ignore 24 | use near_lake_parent_transaction_cache::{ParentTransactionCacheBuilder}; 25 | 26 | let my_context = MyContext { 27 | db_connection_string: String::from("postgres://user:pass@host/db"), 28 | parent_tx_cache: ParentTransactionCacheBuilder::default().build().unwrap(), 29 | }; 30 | ``` 31 | 32 | ### User Indexer Function 33 | 34 | This will simplify your indexer function signature. It now needs only the context as an additional parameter: 35 | 36 | ```ignore 37 | async fn handle_block( 38 | mut block: Block, 39 | ctx: &MyContext, 40 | ) -> anyhow::Result<()> { 41 | // body 42 | } 43 | ``` 44 | 45 | The Lake Context Derive will look for all fields in the struct that implement `LakeContextExt`, and will append their trait methods to the top-level calls. For `execute_before_run`, it is done in ascending order, and for `execute_after_run` in descending order. 46 | 47 | ## Purpose 48 | 49 | The purpose of the Lake Context Derive crate is to alleviate some of the common pain points in context development and usage in Rust. By encapsulating and standardizing the handling of these function calls, we aim to create a more accessible and user-friendly approach to context implementation. 50 | 51 | ## Collaboration 52 | 53 | We hope that this tool will be useful for the Rust community and look forward to seeing how it can be used in a range of different projects. We encourage community contributions, whether that's through sharing your own unique context implementations or by providing feedback and suggestions for how we can continue to improve the Lake Context Derive. 54 | -------------------------------------------------------------------------------- /lake-context-derive/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![doc = include_str!("../README.md")] 2 | use proc_macro::TokenStream; 3 | use quote::quote; 4 | 5 | #[proc_macro_derive(LakeContext)] 6 | pub fn lake_context_derive(input: TokenStream) -> TokenStream { 7 | let input = syn::parse_macro_input!(input as syn::DeriveInput); 8 | 9 | // Used in the quasi-quotation below as `#name`. 10 | let name = input.ident; 11 | 12 | // Build the trait impl. 13 | // Iterate over all fields and for each field generate a call to `execute_before_run`. 14 | // if the field is a an impl of LakeContext, then call `execute_before_run` on the struct. 15 | 16 | let fields = if let syn::Data::Struct(syn::DataStruct { 17 | fields: syn::Fields::Named(syn::FieldsNamed { named, .. }), 18 | .. 19 | }) = &input.data 20 | { 21 | named 22 | } else { 23 | unimplemented!(); 24 | }; 25 | 26 | let calls_before_run = fields 27 | .iter() 28 | .filter(|f| { 29 | let ty = &f.ty; 30 | if let syn::Type::Path(syn::TypePath { path, .. }) = ty { 31 | if let Some(ident) = path.get_ident() { 32 | ident == "LakeContext" 33 | } else { 34 | false 35 | } 36 | } else { 37 | false 38 | } 39 | }) 40 | .map(|f| { 41 | let name = &f.ident; 42 | quote! { self.#name.execute_before_run(block); } 43 | }); 44 | 45 | let calls_after_run = fields 46 | .iter() 47 | .rev() 48 | .filter(|f| { 49 | let ty = &f.ty; 50 | if let syn::Type::Path(syn::TypePath { path, .. }) = ty { 51 | if let Some(ident) = path.get_ident() { 52 | ident == "LakeContext" 53 | } else { 54 | false 55 | } 56 | } else { 57 | false 58 | } 59 | }) 60 | .map(|f| { 61 | let name = &f.ident; 62 | quote! { self.#name.execute_after_run(); } 63 | }); 64 | 65 | let expanded = quote! { 66 | // The generated impl. 67 | impl near_lake_framework::LakeContextExt for #name { 68 | fn execute_before_run(&self, block: &mut near_lake_primitives::block::Block) { 69 | #( #calls_before_run )* 70 | } 71 | 72 | fn execute_after_run(&self) { 73 | #( #calls_after_run )* 74 | } 75 | } 76 | }; 77 | 78 | // Hand the output tokens back to the compiler. 79 | proc_macro::TokenStream::from(expanded) 80 | } 81 | -------------------------------------------------------------------------------- /lake-framework/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "near-lake-framework" 3 | description = "Library to connect to the NEAR Lake S3 and stream the data" 4 | edition = "2021" 5 | version.workspace = true 6 | license.workspace = true 7 | repository.workspace = true 8 | 9 | [dependencies] 10 | aws-config = { version = "1.4.0", features = ["behavior-version-latest"] } 11 | aws-types = "1.2.0" 12 | aws-credential-types = "1.2.0" 13 | aws-sdk-s3 = "1.24.0" 14 | async-stream = "0.3.3" 15 | async-trait = "0.1.64" 16 | derive_builder = "0.11.2" 17 | futures = "0.3.23" 18 | serde = { version = "1", features = ["derive"] } 19 | serde_json = "1.0.75" 20 | thiserror = "1.0.38" 21 | tokio = { version = "1.1", features = ["sync", "time", "rt-multi-thread"] } 22 | tokio-stream = { version = "0.1" } 23 | tracing = "0.1.13" 24 | 25 | near-lake-primitives = { path = "../lake-primitives", version = "0.8.0-beta.2" } 26 | near-lake-context-derive = { path = "../lake-context-derive", version = "0.8.0-beta.2" } 27 | 28 | [dev-dependencies] 29 | aws-smithy-http = "0.60.0" 30 | aws-smithy-types = "1.0.0" 31 | # use by examples 32 | anyhow = "1.0.51" 33 | 34 | # used by nft_indexer example 35 | regex = "1.5.4" 36 | once_cell = "1.8.0" 37 | 38 | # used in the doc examples 39 | diesel = { version = "2", features = ["postgres_backend", "postgres"] } 40 | 41 | [[example]] 42 | name = "simple" 43 | path = "../examples/simple.rs" 44 | 45 | [[example]] 46 | name = "actions" 47 | path = "../examples/actions.rs" 48 | 49 | [[example]] 50 | name = "nft_indexer" 51 | path = "../examples/nft_indexer.rs" 52 | 53 | [[example]] 54 | name = "with_context" 55 | path = "../examples/with_context.rs" 56 | -------------------------------------------------------------------------------- /lake-framework/README.md: -------------------------------------------------------------------------------- 1 | # NEAR Lake Framework 2 | 3 | NEAR Lake Framework is a small library companion to [NEAR Lake](https://github.com/near/near-lake). It allows you to build 4 | your own indexer that subscribes to the stream of blocks from the NEAR Lake data source and create your own logic to process 5 | the NEAR Protocol data. 6 | 7 | ## Example 8 | 9 | ```no_run 10 | fn main() -> anyhow::Result<()> { 11 | near_lake_framework::LakeBuilder::default() 12 | .testnet() 13 | .start_block_height(112205773) 14 | .build()? 15 | .run(handle_block)?; 16 | Ok(()) 17 | } 18 | 19 | // The handler function to take the `Block` 20 | // and print the block height 21 | async fn handle_block( 22 | block: near_lake_primitives::block::Block, 23 | ) -> anyhow::Result<()> { 24 | eprintln!( 25 | "Block #{}", 26 | block.block_height(), 27 | ); 28 | # Ok(()) 29 | } 30 | ``` 31 | 32 | ### Pass the context to the function 33 | 34 | ```no_run 35 | #[derive(near_lake_framework::LakeContext)] 36 | struct MyContext { 37 | my_field: String 38 | } 39 | 40 | fn main() -> anyhow::Result<()> { 41 | 42 | let context = MyContext { 43 | my_field: "My value".to_string(), 44 | }; 45 | 46 | near_lake_framework::LakeBuilder::default() 47 | .testnet() 48 | .start_block_height(112205773) 49 | .build()? 50 | .run_with_context(handle_block, &context)?; 51 | 52 | Ok(()) 53 | } 54 | 55 | // The handler function to take the `Block` 56 | // and print the block height 57 | async fn handle_block( 58 | block: near_lake_primitives::block::Block, 59 | context: &MyContext, 60 | ) -> anyhow::Result<()> { 61 | eprintln!( 62 | "Block #{} / {}", 63 | block.block_height(), 64 | context.my_field, 65 | ); 66 | # Ok(()) 67 | } 68 | ``` 69 | 70 | ## Parent Transaction for the Receipt Context 71 | 72 | It is an old problem that the NEAR Protocol doesn't provide the parent transaction hash in the receipt. This is a problem for the indexer that needs to know the parent transaction hash to build the transaction tree. We've got you covered with the [`lake-parent-transaction-cache`](../lake-parent-transaction-cache/) crate that provides a cache for the parent transaction hashes. 73 | 74 | ```ignore 75 | use near_lake_framework::near_lake_primitives; 76 | use near_lake_primitives::CryptoHash; 77 | use near_lake_parent_transaction_cache::{ParentTransactionCache, ParentTransactionCacheBuilder}; 78 | use near_lake_primitives::actions::ActionMetaDataExt; 79 | 80 | fn main() -> anyhow::Result<()> { 81 | let parent_transaction_cache_ctx = ParentTransactionCacheBuilder::default() 82 | .build()?; 83 | // Lake Framework start boilerplate 84 | near_lake_framework::LakeBuilder::default() 85 | .mainnet() 86 | .start_block_height(88444526) 87 | .build()? 88 | // developer-defined async function that handles each block 89 | .run_with_context(print_function_call_tx_hash, &parent_transaction_cache_ctx)?; 90 | Ok(()) 91 | } 92 | 93 | async fn print_function_call_tx_hash( 94 | mut block: near_lake_primitives::block::Block, 95 | ctx: &ParentTransactionCache, 96 | ) -> anyhow::Result<()> { 97 | // Cache has been updated before this function is called. 98 | let block_height = block.block_height(); 99 | let actions: Vec<( 100 | &near_lake_primitives::actions::FunctionCall, 101 | Option, 102 | )> = block 103 | .actions() 104 | .filter_map(|action| action.as_function_call()) 105 | .map(|action| { 106 | ( 107 | action, 108 | ctx.get_parent_transaction_hash(&action.receipt_id()), 109 | ) 110 | }) 111 | .collect(); 112 | 113 | if !actions.is_empty() { 114 | // Here's the usage of the context. 115 | println!("Block #{:?}\n{:#?}", block_height, actions); 116 | } 117 | 118 | Ok(()) 119 | } 120 | ``` 121 | 122 | ## Tutorials: 123 | 124 | - 125 | - [Migrating to NEAR Lake Framework](https://near-indexers.io/tutorials/lake/migrating-to-near-lake-framework) from [NEAR Indexer Framework](https://near-indexers.io/docs/projects/near-indexer-framework) 126 | 127 | ### More examples 128 | 129 | You might want to have a look at the always up-to-date examples in [`examples`](https://github.com/near/near-lake-framework-rs/tree/main/lake-framework/examples) folder. 130 | 131 | Other examples that we try to keep up-to-date but we might fail sometimes: 132 | 133 | - simple example of a data printer built on top of NEAR Lake Framework 134 | - another simple example of the indexer built on top of NEAR Lake Framework for a tutorial purpose 135 | 136 | - an example of the indexer built on top of NEAR Lake Framework that watches for transactions related to specified account(s) 137 | - a community-made project that uses NEAR Lake Framework 138 | 139 | ## How to use 140 | 141 | ### AWS S3 Credentials 142 | 143 | In order to be able to get objects from the AWS S3 bucket you need to provide the AWS credentials. 144 | 145 | #### Passing credentials to the config builder 146 | 147 | ```rust 148 | use near_lake_framework::LakeBuilder; 149 | 150 | # fn main() { 151 | let credentials = aws_credential_types::Credentials::new( 152 | "AKIAIOSFODNN7EXAMPLE", 153 | "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", 154 | None, 155 | None, 156 | "custom_credentials", 157 | ); 158 | let s3_config = aws_sdk_s3::Config::builder() 159 | .credentials_provider(credentials) 160 | .build(); 161 | 162 | let lake = LakeBuilder::default() 163 | .s3_config(s3_config) 164 | .s3_bucket_name("near-lake-data-custom") 165 | .s3_region_name("eu-central-1") 166 | .start_block_height(1) 167 | .build() 168 | .expect("Failed to build LakeConfig"); 169 | # } 170 | ``` 171 | 172 | **You should never hardcode your credentials, it is insecure. Use the described method to pass the credentials you read from CLI arguments** 173 | 174 | #### File-based AWS credentials 175 | AWS default profile configuration with aws configure looks similar to the following: 176 | 177 | `~/.aws/credentials` 178 | ```text 179 | [default] 180 | aws_access_key_id=AKIAIOSFODNN7EXAMPLE 181 | aws_secret_access_key=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY 182 | ``` 183 | 184 | [AWS docs: Configuration and credential file settings](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html) 185 | 186 | ### Environmental variables 187 | 188 | Alternatively, you can provide your AWS credentials via environment variables with constant names: 189 | 190 | ```text 191 | $ export AWS_ACCESS_KEY_ID=AKIAIOSFODNN7EXAMPLE 192 | $ AWS_SECRET_ACCESS_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY 193 | $ AWS_DEFAULT_REGION=eu-central-1 194 | ``` 195 | 196 | ### Dependencies 197 | 198 | Add the following dependencies to your `Cargo.toml` 199 | 200 | ```toml 201 | ... 202 | [dependencies] 203 | futures = "0.3.5" 204 | itertools = "0.10.3" 205 | tokio = { version = "1.1", features = ["sync", "time", "macros", "rt-multi-thread"] } 206 | tokio-stream = { version = "0.1" } 207 | 208 | # NEAR Lake Framework 209 | near-lake-framework = "0.8.0" 210 | ``` 211 | 212 | ### Custom S3 storage 213 | 214 | In case you want to run your own [near-lake](https://github.com/near/near-lake) instance and store data in some S3 compatible storage ([Minio](https://min.io/) or [Localstack](https://localstack.cloud/) as example) 215 | You can owerride default S3 API endpoint by using `s3_endpoint` option 216 | 217 | - run minio 218 | 219 | ```bash 220 | $ mkdir -p /data/near-lake-custom && minio server /data 221 | ``` 222 | 223 | - pass custom `aws_sdk_s3::config::Config` to the [LakeBuilder] 224 | 225 | ``` 226 | use near_lake_framework::LakeBuilder; 227 | 228 | # #[tokio::main] 229 | # async fn main() -> anyhow::Result<()> { 230 | let aws_config = aws_config::from_env().load().await; 231 | let s3_config = aws_sdk_s3::config::Builder::from(&aws_types::SdkConfig::from(aws_config)) 232 | .endpoint_url("http://0.0.0.0:9000") 233 | .build(); 234 | 235 | LakeBuilder::default() 236 | .s3_bucket_name("near-lake-custom") 237 | .s3_region_name("eu-central-1") 238 | .start_block_height(0) 239 | .s3_config(s3_config) 240 | .build() 241 | .expect("Failed to build Lake"); 242 | 243 | # Ok(()) 244 | # } 245 | ``` 246 | 247 | ## Configuration 248 | 249 | Everything should be configured before the start of your indexer application via `LakeConfigBuilder` struct. 250 | 251 | Available parameters: 252 | 253 | * [`start_block_height(value: u64)`](LakeConfigBuilder::start_block_height) - block height to start the stream from 254 | * *optional* [`s3_bucket_name(value: impl Into)`](LakeConfigBuilder::s3_bucket_name) - provide the AWS S3 bucket name (you need to provide it if you use custom S3-compatible service, otherwise you can use [LakeConfigBuilder::mainnet] and [LakeConfigBuilder::testnet]) 255 | * *optional* [`LakeConfigBuilder::s3_region_name(value: impl Into)`](LakeConfigBuilder::s3_region_name) - provide the AWS S3 region name (if you need to set a custom one) 256 | * *optional* [`LakeConfigBuilder::s3_config(value: aws_sdk_s3::config::Config`](LakeConfigBuilder::s3_config) - provide custom AWS SDK S3 Config 257 | 258 | ## Cost estimates (Updated Mar 10, 2022 with more precise calculations) 259 | 260 | **TL;DR** approximately $20 per month (for AWS S3 access, paid directly to AWS) for the reading of fresh blocks 261 | 262 | ### Historical indexing 263 | 264 | | Blocks | GET | LIST | Subtotal GET | Subtotal LIST | Total $ | 265 | |---|---|---|---|---|---| 266 | | 1000 | 5000 | 4 | 0.00215 | 0.0000216 | $0.00 | 267 | | 86,400 | 432000 | 345.6 | 0.18576 | 0.00186624 | $0.19 | 268 | | 2,592,000 | 12960000 | 10368 | 5.5728 | 0.0559872 | $5.63 | 269 | | 77,021,059 | 385105295 | 308084.236 | 165.5952769 | 1.663654874 | $167.26 | 270 | 271 | **Note:** ~77m of blocks is the number of blocks on the moment I was calculating. 272 | 273 | **84,400 blocks is approximate number of blocks per day** (1 block per second * 60 seconds * 60 minutes * 24 hours) 274 | 275 | **2,592,000 blocks is approximate number of blocks per months** (86,400 blocks per day * 30 days) 276 | 277 | ### Tip of the network indexing 278 | 279 | | Blocks | GET | LIST | Subtotal GET | Subtotal LIST | Total $ | 280 | |---|---|---|---|---|---| 281 | | 1000 | 5000 | 1000 | 0.00215 | 0.0054 | $0.01 | 282 | | 86,400 | 432000 | 86,400 | 0.18576 | 0.46656 | $0.65 | 283 | | 2,592,000 | 12960000 | 2,592,000 | 5.5728 | 13.9968 | $19.57 | 284 | | 77,021,059 | 385105295 | 77,021,059 | 165.5952769 | 415.9137186 | $581.51 | 285 | 286 | Explanation: 287 | 288 | Assuming NEAR Protocol produces accurately 1 block per second (which is really not, the average block production time is 1.3s). A full day consists of 86400 seconds, that's the max number of blocks that can be produced. 289 | 290 | According to the [Amazon S3 prices](https://aws.amazon.com/s3/pricing/?nc1=h_ls) `list` requests are charged for $0.0054 per 1000 requests and `get` is charged for $0.00043 per 1000 requests. 291 | 292 | Calculations (assuming we are following the tip of the network all the time): 293 | 294 | ```text 295 | 86400 blocks per day * 5 requests for each block / 1000 requests * $0.0004 per 1k requests = $0.19 * 30 days = $5.7 296 | ``` 297 | **Note:** 5 requests for each block means we have 4 shards (1 file for common block data and 4 separate files for each shard) 298 | 299 | And a number of `list` requests we need to perform for 30 days: 300 | 301 | ```text 302 | 86400 blocks per day / 1000 requests * $0.005 per 1k list requests = $0.47 * 30 days = $14.1 303 | 304 | $5.7 + $14.1 = $19.8 305 | ``` 306 | 307 | The price depends on the number of shards 308 | 309 | ## Future plans 310 | 311 | We use Milestones with clearly defined acceptance criteria: 312 | 313 | * [x] [MVP](https://github.com/near/near-lake-framework/milestone/1) 314 | * [ ] [0.8 High-level update](https://github.com/near/near-lake-framework-rs/milestone/3) 315 | * [ ] [1.0](https://github.com/near/near-lake-framework/milestone/2) 316 | -------------------------------------------------------------------------------- /lake-framework/blocks/000000879765/block.json: -------------------------------------------------------------------------------- 1 | { 2 | "author": "test.near", 3 | "header": { 4 | "height": 879765, 5 | "prev_height": 879764, 6 | "epoch_id": "Hp4sw9ZGSceYadnvh7NpYJVVK7rcdir48jfrsxvwKQu9", 7 | "next_epoch_id": "4h5mecoLYVFeZxAMAX3Mq3GQfEnuvSAPPo9kEpr4rGUL", 8 | "hash": "95K8Je1iAVqieVU8ZuGgSdbvYs8T9rL6ER1XnRekMGbj", 9 | "prev_hash": "9Da84RTsubZPcLxzK1K6JkCnDnMn4DxaSRzJPtnYJXUM", 10 | "prev_state_root": "6zDM1UGLsZ7HnyUofDrTF73gv5vk2N614ViDkXBkq4ej", 11 | "chunk_receipts_root": "9ETNjrt6MkwTgSVMMbpukfxRshSD1avBUUa4R4NuqwHv", 12 | "chunk_headers_root": "4otZ2Zj1wANZweh33kWETr3VbF3HwW9zWET4YRYTo2pL", 13 | "chunk_tx_root": "9rdfzfYzJMZyaj2yMvjget2ZsPNbZhKqY1qUXc1urDfu", 14 | "outcome_root": "7tkzFg8RHBmMw1ncRJZCCZAizgq4rwCftTKYLce8RU8t", 15 | "chunks_included": 1, 16 | "challenges_root": "11111111111111111111111111111111", 17 | "timestamp": 1676913656724153000, 18 | "timestamp_nanosec": "1676913656724153000", 19 | "random_value": "Au7bq9XzGAhDm2wb4PxbXQnTngzVTcWYa76Govx6n7NK", 20 | "validator_proposals": [], 21 | "chunk_mask": [ 22 | true 23 | ], 24 | "gas_price": "100000000", 25 | "block_ordinal": 879714, 26 | "rent_paid": "0", 27 | "validator_reward": "0", 28 | "total_supply": "2085303629225498163419972383984892", 29 | "challenges_result": [], 30 | "last_final_block": "BS9QJenf3N9pKy8PZ5xRuowZi9X9T4sSDDu4i3i5UJZe", 31 | "last_ds_final_block": "9Da84RTsubZPcLxzK1K6JkCnDnMn4DxaSRzJPtnYJXUM", 32 | "next_bp_hash": "EtsYQonaJ7n5nRt32XJC5dBxxBxh7a9UVApykmmt8fCQ", 33 | "block_merkle_root": "CqRoDd8BR4su7Z8vSfvg45HrugZnwbMbnXHRTWYQkWfZ", 34 | "epoch_sync_data_hash": null, 35 | "approvals": [ 36 | "ed25519:3RBQ4PnfBbnDn8WnCScQJH9asjkicuhZZo36aa6FVa2Lbnj531NLiBkTmj8rhg5vfsarmYLgQmcMcXRuJ4jkzKns" 37 | ], 38 | "signature": "ed25519:2dWsY1QadJyNaVkyga5Wcj9DFRizAyFc9STjyN5Mtxc59ZzNYqML6qQTgtLeCYkpCy1h7kG34jcALTpEDQpkBoKQ", 39 | "latest_protocol_version": 59 40 | }, 41 | "chunks": [ 42 | { 43 | "chunk_hash": "7Ewp1AnL6o29UXLW2up9miQBdSaKxCnfRyhMGt9G4epN", 44 | "prev_block_hash": "9Da84RTsubZPcLxzK1K6JkCnDnMn4DxaSRzJPtnYJXUM", 45 | "outcome_root": "11111111111111111111111111111111", 46 | "prev_state_root": "2ViDp7rmam77VmhY5C9KW92a6mgUTCKQ3Scz8tFyH13z", 47 | "encoded_merkle_root": "44MrDjQzt1jU5PGUYY69THZ4g3SsfQiNiKKorey3GVtq", 48 | "encoded_length": 364, 49 | "height_created": 879765, 50 | "height_included": 879765, 51 | "shard_id": 0, 52 | "gas_used": 0, 53 | "gas_limit": 1000000000000000, 54 | "rent_paid": "0", 55 | "validator_reward": "0", 56 | "balance_burnt": "0", 57 | "outgoing_receipts_root": "H4Rd6SGeEBTbxkitsCdzfu9xL9HtZ2eHoPCQXUeZ6bW4", 58 | "tx_root": "GKd8Evs3JdahRpS8q14q6RzzkodzFiSQPcH4yJxs4ZjG", 59 | "validator_proposals": [], 60 | "signature": "ed25519:2qev3mWQdYLi9aPwCnFHt22GFxhuGTGfnaz3msGcduUdXeycTQDBkY4EyQzpph4frXCybuYHE6g4GFxD2HVmWbJY" 61 | } 62 | ] 63 | } 64 | -------------------------------------------------------------------------------- /lake-framework/blocks/000000879765/shard_0.json: -------------------------------------------------------------------------------- 1 | { 2 | "shard_id": 0, 3 | "chunk": { 4 | "author": "test.near", 5 | "header": { 6 | "chunk_hash": "7Ewp1AnL6o29UXLW2up9miQBdSaKxCnfRyhMGt9G4epN", 7 | "prev_block_hash": "9Da84RTsubZPcLxzK1K6JkCnDnMn4DxaSRzJPtnYJXUM", 8 | "outcome_root": "11111111111111111111111111111111", 9 | "prev_state_root": "2ViDp7rmam77VmhY5C9KW92a6mgUTCKQ3Scz8tFyH13z", 10 | "encoded_merkle_root": "44MrDjQzt1jU5PGUYY69THZ4g3SsfQiNiKKorey3GVtq", 11 | "encoded_length": 364, 12 | "height_created": 879765, 13 | "height_included": 0, 14 | "shard_id": 0, 15 | "gas_used": 0, 16 | "gas_limit": 1000000000000000, 17 | "rent_paid": "0", 18 | "validator_reward": "0", 19 | "balance_burnt": "0", 20 | "outgoing_receipts_root": "H4Rd6SGeEBTbxkitsCdzfu9xL9HtZ2eHoPCQXUeZ6bW4", 21 | "tx_root": "GKd8Evs3JdahRpS8q14q6RzzkodzFiSQPcH4yJxs4ZjG", 22 | "validator_proposals": [], 23 | "signature": "ed25519:2qev3mWQdYLi9aPwCnFHt22GFxhuGTGfnaz3msGcduUdXeycTQDBkY4EyQzpph4frXCybuYHE6g4GFxD2HVmWbJY" 24 | }, 25 | "transactions": [ 26 | { 27 | "transaction": { 28 | "signer_id": "test.near", 29 | "public_key": "ed25519:8Rn4FJeeRYcrLbcrAQNFVgvbZ2FCEQjgydbXwqBwF1ib", 30 | "nonce": 39, 31 | "receiver_id": "test.near", 32 | "actions": [ 33 | { 34 | "Delegate": { 35 | "delegate_action": { 36 | "sender_id": "test.near", 37 | "receiver_id": "test.near", 38 | "actions": [ 39 | { 40 | "AddKey": { 41 | "public_key": "ed25519:CnQMksXTTtn81WdDujsEMQgKUMkFvDJaAjDeDLTxVrsg", 42 | "access_key": { 43 | "nonce": 0, 44 | "permission": "FullAccess" 45 | } 46 | } 47 | } 48 | ], 49 | "nonce": 879546, 50 | "max_block_height": 100, 51 | "public_key": "ed25519:8Rn4FJeeRYcrLbcrAQNFVgvbZ2FCEQjgydbXwqBwF1ib" 52 | }, 53 | "signature": "ed25519:25uGrsJNU3fVgUpPad3rGJRy2XQum8gJxLRjKFCbd7gymXwUxQ9r3tuyBCD6To7SX5oSJ2ScJZejwqK1ju8WdZfS" 54 | } 55 | } 56 | ], 57 | "signature": "ed25519:3vKF31u2naSjow1uQEfkoWy834fu9xhk66oBfTAYL3XVtJVAf1FREt7owJzwyRrN5F4mtd1rkvv1iTPTL86Szb2j", 58 | "hash": "EZnJpyJDnkwnadB1V8PqjVMx7oe2zLhUMtJ8v6EUh1NQ" 59 | }, 60 | "outcome": { 61 | "execution_outcome": { 62 | "proof": [ 63 | { 64 | "hash": "7kPZTTVYJHvUg4g3S7SFErkKs18Ex1kN4rESnZwtJb2U", 65 | "direction": "Right" 66 | } 67 | ], 68 | "block_hash": "95K8Je1iAVqieVU8ZuGgSdbvYs8T9rL6ER1XnRekMGbj", 69 | "id": "EZnJpyJDnkwnadB1V8PqjVMx7oe2zLhUMtJ8v6EUh1NQ", 70 | "outcome": { 71 | "logs": [], 72 | "receipt_ids": [ 73 | "AQDQ9G4QpK7x2inV3GieVEbqeoCGF9nmvrViQ2UgEXDQ" 74 | ], 75 | "gas_burnt": 409824625000, 76 | "tokens_burnt": "40982462500000000000", 77 | "executor_id": "test.near", 78 | "status": { 79 | "SuccessReceiptId": "AQDQ9G4QpK7x2inV3GieVEbqeoCGF9nmvrViQ2UgEXDQ" 80 | }, 81 | "metadata": { 82 | "version": 1, 83 | "gas_profile": null 84 | } 85 | } 86 | }, 87 | "receipt": null 88 | } 89 | } 90 | ], 91 | "receipts": [ 92 | { 93 | "predecessor_id": "test.near", 94 | "receiver_id": "test.near", 95 | "receipt_id": "AQDQ9G4QpK7x2inV3GieVEbqeoCGF9nmvrViQ2UgEXDQ", 96 | "receipt": { 97 | "Action": { 98 | "signer_id": "test.near", 99 | "signer_public_key": "ed25519:8Rn4FJeeRYcrLbcrAQNFVgvbZ2FCEQjgydbXwqBwF1ib", 100 | "gas_price": "100000000", 101 | "output_data_receivers": [], 102 | "input_data_ids": [], 103 | "actions": [ 104 | { 105 | "Delegate": { 106 | "delegate_action": { 107 | "sender_id": "test.near", 108 | "receiver_id": "test.near", 109 | "actions": [ 110 | { 111 | "AddKey": { 112 | "public_key": "ed25519:CnQMksXTTtn81WdDujsEMQgKUMkFvDJaAjDeDLTxVrsg", 113 | "access_key": { 114 | "nonce": 0, 115 | "permission": "FullAccess" 116 | } 117 | } 118 | } 119 | ], 120 | "nonce": 879546, 121 | "max_block_height": 100, 122 | "public_key": "ed25519:8Rn4FJeeRYcrLbcrAQNFVgvbZ2FCEQjgydbXwqBwF1ib" 123 | }, 124 | "signature": "ed25519:25uGrsJNU3fVgUpPad3rGJRy2XQum8gJxLRjKFCbd7gymXwUxQ9r3tuyBCD6To7SX5oSJ2ScJZejwqK1ju8WdZfS" 125 | } 126 | } 127 | ] 128 | } 129 | } 130 | } 131 | ] 132 | }, 133 | "receipt_execution_outcomes": [ 134 | { 135 | "execution_outcome": { 136 | "proof": [ 137 | { 138 | "hash": "6vBgNYcwx6pcESfrw5YRBRamatBH8red3GEt3s3ntefm", 139 | "direction": "Left" 140 | } 141 | ], 142 | "block_hash": "95K8Je1iAVqieVU8ZuGgSdbvYs8T9rL6ER1XnRekMGbj", 143 | "id": "AQDQ9G4QpK7x2inV3GieVEbqeoCGF9nmvrViQ2UgEXDQ", 144 | "outcome": { 145 | "logs": [], 146 | "receipt_ids": [ 147 | "5rc8UEhD4hmNQ3pJJM5Xc3VHeLXpCQqkA3ep8ag4aaDA" 148 | ], 149 | "gas_burnt": 308059500000, 150 | "tokens_burnt": "30805950000000000000", 151 | "executor_id": "test.near", 152 | "status": { 153 | "Failure": { 154 | "ActionError": { 155 | "index": 0, 156 | "kind": "DelegateActionExpired" 157 | } 158 | } 159 | }, 160 | "metadata": { 161 | "version": 3, 162 | "gas_profile": [] 163 | } 164 | } 165 | }, 166 | "receipt": { 167 | "predecessor_id": "test.near", 168 | "receiver_id": "test.near", 169 | "receipt_id": "AQDQ9G4QpK7x2inV3GieVEbqeoCGF9nmvrViQ2UgEXDQ", 170 | "receipt": { 171 | "Action": { 172 | "signer_id": "test.near", 173 | "signer_public_key": "ed25519:8Rn4FJeeRYcrLbcrAQNFVgvbZ2FCEQjgydbXwqBwF1ib", 174 | "gas_price": "100000000", 175 | "output_data_receivers": [], 176 | "input_data_ids": [], 177 | "actions": [ 178 | { 179 | "Delegate": { 180 | "delegate_action": { 181 | "sender_id": "test.near", 182 | "receiver_id": "test.near", 183 | "actions": [ 184 | { 185 | "AddKey": { 186 | "public_key": "ed25519:CnQMksXTTtn81WdDujsEMQgKUMkFvDJaAjDeDLTxVrsg", 187 | "access_key": { 188 | "nonce": 0, 189 | "permission": "FullAccess" 190 | } 191 | } 192 | } 193 | ], 194 | "nonce": 879546, 195 | "max_block_height": 100, 196 | "public_key": "ed25519:8Rn4FJeeRYcrLbcrAQNFVgvbZ2FCEQjgydbXwqBwF1ib" 197 | }, 198 | "signature": "ed25519:25uGrsJNU3fVgUpPad3rGJRy2XQum8gJxLRjKFCbd7gymXwUxQ9r3tuyBCD6To7SX5oSJ2ScJZejwqK1ju8WdZfS" 199 | } 200 | } 201 | ] 202 | } 203 | } 204 | } 205 | } 206 | ], 207 | "state_changes": [ 208 | { 209 | "cause": { 210 | "type": "transaction_processing", 211 | "tx_hash": "EZnJpyJDnkwnadB1V8PqjVMx7oe2zLhUMtJ8v6EUh1NQ" 212 | }, 213 | "type": "account_update", 214 | "change": { 215 | "account_id": "test.near", 216 | "amount": "999999549946933447300000000000000", 217 | "locked": "81773107345435833494396250588347", 218 | "code_hash": "11111111111111111111111111111111", 219 | "storage_usage": 182, 220 | "storage_paid_at": 0 221 | } 222 | }, 223 | { 224 | "cause": { 225 | "type": "transaction_processing", 226 | "tx_hash": "EZnJpyJDnkwnadB1V8PqjVMx7oe2zLhUMtJ8v6EUh1NQ" 227 | }, 228 | "type": "access_key_update", 229 | "change": { 230 | "account_id": "test.near", 231 | "public_key": "ed25519:8Rn4FJeeRYcrLbcrAQNFVgvbZ2FCEQjgydbXwqBwF1ib", 232 | "access_key": { 233 | "nonce": 39, 234 | "permission": "FullAccess" 235 | } 236 | } 237 | } 238 | ] 239 | } 240 | -------------------------------------------------------------------------------- /lake-framework/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![doc = include_str!("../README.md")] 2 | #[macro_use] 3 | extern crate derive_builder; 4 | 5 | use futures::{Future, StreamExt}; 6 | 7 | pub use near_lake_context_derive::LakeContext; 8 | pub use near_lake_primitives::{ 9 | self, 10 | near_indexer_primitives::{self, near_primitives}, 11 | }; 12 | 13 | pub use aws_credential_types::Credentials; 14 | pub use types::{Lake, LakeBuilder, LakeContextExt, LakeError}; 15 | 16 | mod s3_fetchers; 17 | mod streamer; 18 | pub(crate) mod types; 19 | 20 | pub(crate) const LAKE_FRAMEWORK: &str = "near_lake_framework"; 21 | 22 | impl types::Lake { 23 | /// Creates `mpsc::channel` and returns the `receiver` to read the stream of `StreamerMessage` 24 | ///```no_run 25 | /// # use near_lake_framework::{LakeContext}; 26 | /// 27 | /// #[derive(LakeContext)] 28 | /// struct MyContext { 29 | /// my_field: String, 30 | /// } 31 | /// 32 | ///# fn main() -> anyhow::Result<()> { 33 | /// 34 | /// let context = MyContext { 35 | /// my_field: "my_value".to_string(), 36 | /// }; 37 | /// 38 | /// near_lake_framework::LakeBuilder::default() 39 | /// .testnet() 40 | /// .start_block_height(112205773) 41 | /// .build()? 42 | /// .run_with_context(handle_block, &context)?; 43 | /// Ok(()) 44 | ///# } 45 | /// 46 | /// # async fn handle_block(_block: near_lake_primitives::block::Block, context: &MyContext) -> anyhow::Result<()> { Ok(()) } 47 | ///``` 48 | pub fn run_with_context<'context, C: LakeContextExt, E, Fut>( 49 | self, 50 | f: impl Fn(near_lake_primitives::block::Block, &'context C) -> Fut, 51 | context: &'context C, 52 | ) -> Result<(), LakeError> 53 | where 54 | Fut: Future>, 55 | E: Into>, 56 | { 57 | let runtime = tokio::runtime::Runtime::new() 58 | .map_err(|err| LakeError::RuntimeStartError { error: err })?; 59 | 60 | runtime.block_on(async move { self.run_with_context_async(f, context).await }) 61 | } 62 | 63 | pub async fn run_with_context_async<'context, C: LakeContextExt, E, Fut>( 64 | self, 65 | f: impl Fn(near_lake_primitives::block::Block, &'context C) -> Fut, 66 | context: &'context C, 67 | ) -> Result<(), LakeError> 68 | where 69 | Fut: Future>, 70 | E: Into>, 71 | { 72 | // capture the concurrency value before it moves into the streamer 73 | let concurrency = self.concurrency; 74 | 75 | // instantiate the NEAR Lake Framework Stream 76 | let (sender, stream) = streamer::streamer(self); 77 | 78 | // read the stream events and pass them to a handler function with 79 | // concurrency 1 80 | let mut handlers = tokio_stream::wrappers::ReceiverStream::new(stream) 81 | .map(|streamer_message| async { 82 | let mut block: near_lake_primitives::block::Block = streamer_message.into(); 83 | 84 | context.execute_before_run(&mut block); 85 | 86 | let user_indexer_function_execution_result = f(block, context).await; 87 | 88 | context.execute_after_run(); 89 | 90 | user_indexer_function_execution_result 91 | }) 92 | .buffer_unordered(concurrency); 93 | 94 | while let Some(_handle_message) = handlers.next().await {} 95 | drop(handlers); // close the channel so the sender will stop 96 | 97 | // propagate errors from the sender 98 | match sender.await { 99 | Ok(Ok(())) => Ok(()), 100 | Ok(Err(err)) => Err(err), 101 | Err(err) => Err(err.into()), // JoinError 102 | } 103 | } 104 | 105 | /// Creates `mpsc::channel` and returns the `receiver` to read the stream of `StreamerMessage` 106 | ///```no_run 107 | ///# fn main() -> anyhow::Result<()> { 108 | /// near_lake_framework::LakeBuilder::default() 109 | /// .testnet() 110 | /// .start_block_height(112205773) 111 | /// .build()? 112 | /// .run(handle_block)?; 113 | /// Ok(()) 114 | ///# } 115 | /// 116 | /// # async fn handle_block(_block: near_lake_primitives::block::Block) -> anyhow::Result<()> { Ok(()) } 117 | ///``` 118 | pub fn run( 119 | self, 120 | f: impl Fn(near_lake_primitives::block::Block) -> Fut, 121 | ) -> Result<(), LakeError> 122 | where 123 | Fut: Future>, 124 | E: Into>, 125 | { 126 | struct EmptyContext {} 127 | 128 | impl LakeContextExt for EmptyContext { 129 | fn execute_before_run(&self, _block: &mut near_lake_primitives::block::Block) {} 130 | 131 | fn execute_after_run(&self) {} 132 | } 133 | 134 | let context = EmptyContext {}; 135 | 136 | self.run_with_context(|block, _context| f(block), &context) 137 | } 138 | 139 | /// Creates `mpsc::channel` and returns the `receiver` to read the stream of `StreamerMessage` 140 | ///```no_run 141 | ///#[tokio::main] 142 | ///# async fn main() -> anyhow::Result<()> { 143 | /// near_lake_framework::LakeBuilder::default() 144 | /// .testnet() 145 | /// .start_block_height(112205773) 146 | /// .build()? 147 | /// .run(handle_block)?; 148 | /// Ok(()) 149 | ///# } 150 | /// 151 | /// # async fn handle_block(_block: near_lake_primitives::block::Block) -> anyhow::Result<()> { Ok(()) } 152 | ///``` 153 | pub async fn run_async( 154 | self, 155 | f: impl Fn(near_lake_primitives::block::Block) -> Fut, 156 | ) -> Result<(), LakeError> 157 | where 158 | Fut: Future>, 159 | E: Into>, 160 | { 161 | struct EmptyContext {} 162 | 163 | impl LakeContextExt for EmptyContext { 164 | fn execute_before_run(&self, _block: &mut near_lake_primitives::block::Block) {} 165 | 166 | fn execute_after_run(&self) {} 167 | } 168 | 169 | let context = EmptyContext {}; 170 | 171 | self.run_with_context_async(|block, _context| f(block), &context) 172 | .await 173 | } 174 | } 175 | -------------------------------------------------------------------------------- /lake-framework/src/s3_fetchers.rs: -------------------------------------------------------------------------------- 1 | use async_trait::async_trait; 2 | use std::str::FromStr; 3 | 4 | use aws_sdk_s3::operation::get_object::GetObjectOutput; 5 | use aws_sdk_s3::operation::list_objects_v2::ListObjectsV2Output; 6 | 7 | #[async_trait] 8 | pub trait S3Client { 9 | async fn get_object( 10 | &self, 11 | bucket: &str, 12 | prefix: &str, 13 | ) -> Result< 14 | GetObjectOutput, 15 | aws_sdk_s3::error::SdkError, 16 | >; 17 | 18 | async fn list_objects( 19 | &self, 20 | bucket: &str, 21 | start_after: &str, 22 | ) -> Result< 23 | ListObjectsV2Output, 24 | aws_sdk_s3::error::SdkError, 25 | >; 26 | } 27 | 28 | #[derive(Clone, Debug)] 29 | pub struct LakeS3Client { 30 | s3: aws_sdk_s3::Client, 31 | } 32 | 33 | impl LakeS3Client { 34 | pub fn new(s3: aws_sdk_s3::Client) -> Self { 35 | Self { s3 } 36 | } 37 | } 38 | 39 | #[async_trait] 40 | impl S3Client for LakeS3Client { 41 | async fn get_object( 42 | &self, 43 | bucket: &str, 44 | prefix: &str, 45 | ) -> Result< 46 | GetObjectOutput, 47 | aws_sdk_s3::error::SdkError, 48 | > { 49 | Ok(self 50 | .s3 51 | .get_object() 52 | .bucket(bucket) 53 | .key(prefix) 54 | .request_payer(aws_sdk_s3::types::RequestPayer::Requester) 55 | .send() 56 | .await?) 57 | } 58 | 59 | async fn list_objects( 60 | &self, 61 | bucket: &str, 62 | start_after: &str, 63 | ) -> Result< 64 | ListObjectsV2Output, 65 | aws_sdk_s3::error::SdkError, 66 | > { 67 | Ok(self 68 | .s3 69 | .list_objects_v2() 70 | .max_keys(1000) // 1000 is the default and max value for this parameter 71 | .delimiter("/".to_string()) 72 | .start_after(start_after) 73 | .request_payer(aws_sdk_s3::types::RequestPayer::Requester) 74 | .bucket(bucket) 75 | .send() 76 | .await?) 77 | } 78 | } 79 | 80 | /// Queries the list of the objects in the bucket, grouped by "/" delimiter. 81 | /// Returns the list of block heights that can be fetched 82 | pub(crate) async fn list_block_heights( 83 | lake_s3_client: &impl S3Client, 84 | s3_bucket_name: &str, 85 | start_from_block_height: crate::types::BlockHeight, 86 | ) -> Result, crate::types::LakeError> { 87 | tracing::debug!( 88 | target: crate::LAKE_FRAMEWORK, 89 | "Fetching block heights from S3, after #{}...", 90 | start_from_block_height 91 | ); 92 | let response = lake_s3_client 93 | .list_objects(s3_bucket_name, &format!("{:0>12}", start_from_block_height)) 94 | .await?; 95 | 96 | Ok(match response.common_prefixes { 97 | None => vec![], 98 | Some(common_prefixes) => common_prefixes 99 | .into_iter() 100 | .filter_map(|common_prefix| common_prefix.prefix) 101 | .collect::>() 102 | .into_iter() 103 | .filter_map(|prefix_string| { 104 | prefix_string 105 | .split('/') 106 | .next() 107 | .map(u64::from_str) 108 | .and_then(|num| num.ok()) 109 | }) 110 | .collect(), 111 | }) 112 | } 113 | 114 | /// By the given block height gets the objects: 115 | /// - block.json 116 | /// - shard_N.json 117 | /// Reads the content of the objects and parses as a JSON. 118 | /// Returns the result in `near_indexer_primitives::StreamerMessage` 119 | pub(crate) async fn fetch_streamer_message( 120 | lake_s3_client: &impl S3Client, 121 | s3_bucket_name: &str, 122 | block_height: crate::types::BlockHeight, 123 | ) -> Result { 124 | let block_view = { 125 | let body_bytes = loop { 126 | match lake_s3_client 127 | .get_object(s3_bucket_name, &format!("{:0>12}/block.json", block_height)) 128 | .await 129 | { 130 | Ok(response) => { 131 | match response.body.collect().await { 132 | Ok(bytes_stream) => break bytes_stream.into_bytes(), 133 | Err(err) => { 134 | tracing::debug!( 135 | target: crate::LAKE_FRAMEWORK, 136 | "Failed to read bytes from the block #{:0>12} response. Retrying immediately.\n{:#?}", 137 | block_height, 138 | err, 139 | ); 140 | } 141 | }; 142 | } 143 | Err(err) => { 144 | tracing::debug!( 145 | target: crate::LAKE_FRAMEWORK, 146 | "Failed to get {:0>12}/block.json. Retrying immediately\n{:#?}", 147 | block_height, 148 | err 149 | ); 150 | } 151 | }; 152 | }; 153 | 154 | serde_json::from_slice::( 155 | body_bytes.as_ref(), 156 | )? 157 | }; 158 | 159 | let fetch_shards_futures = block_view.chunks.iter().map(|chunk| { 160 | fetch_shard_or_retry( 161 | lake_s3_client, 162 | s3_bucket_name, 163 | block_height, 164 | chunk.shard_id.into(), 165 | ) 166 | }); 167 | 168 | let shards = futures::future::try_join_all(fetch_shards_futures).await?; 169 | 170 | Ok(near_lake_primitives::StreamerMessage { 171 | block: block_view, 172 | shards, 173 | }) 174 | } 175 | 176 | /// Fetches the shard data JSON from AWS S3 and returns the `IndexerShard` 177 | async fn fetch_shard_or_retry( 178 | lake_s3_client: &impl S3Client, 179 | s3_bucket_name: &str, 180 | block_height: crate::types::BlockHeight, 181 | shard_id: u64, 182 | ) -> Result { 183 | let body_bytes = loop { 184 | match lake_s3_client 185 | .get_object( 186 | s3_bucket_name, 187 | &format!("{:0>12}/shard_{}.json", block_height, shard_id), 188 | ) 189 | .await 190 | { 191 | Ok(response) => { 192 | let body_bytes = match response.body.collect().await { 193 | Ok(body) => body.into_bytes(), 194 | Err(err) => { 195 | tracing::debug!( 196 | target: crate::LAKE_FRAMEWORK, 197 | "Failed to read the {:0>12}/shard_{}.json. Retrying in 1s...\n {:#?}", 198 | block_height, 199 | shard_id, 200 | err, 201 | ); 202 | tokio::time::sleep(std::time::Duration::from_secs(1)).await; 203 | continue; 204 | } 205 | }; 206 | 207 | break body_bytes; 208 | } 209 | Err(err) => { 210 | tracing::debug!( 211 | target: crate::LAKE_FRAMEWORK, 212 | "Failed to fetch shard #{}, retrying immediately\n{:#?}", 213 | shard_id, 214 | err 215 | ); 216 | } 217 | } 218 | }; 219 | 220 | Ok(serde_json::from_slice::(body_bytes.as_ref())?) 221 | } 222 | 223 | #[cfg(test)] 224 | mod test { 225 | use super::*; 226 | 227 | use async_trait::async_trait; 228 | 229 | use aws_sdk_s3::operation::get_object::builders::GetObjectOutputBuilder; 230 | use aws_sdk_s3::operation::list_objects_v2::builders::ListObjectsV2OutputBuilder; 231 | use aws_sdk_s3::primitives::ByteStream; 232 | 233 | use aws_smithy_types::body::SdkBody; 234 | 235 | #[derive(Clone, Debug)] 236 | pub struct LakeS3Client {} 237 | 238 | #[async_trait] 239 | impl S3Client for LakeS3Client { 240 | async fn get_object( 241 | &self, 242 | _bucket: &str, 243 | prefix: &str, 244 | ) -> Result< 245 | GetObjectOutput, 246 | aws_sdk_s3::error::SdkError, 247 | > { 248 | let path = format!("{}/blocks/{}", env!("CARGO_MANIFEST_DIR"), prefix); 249 | let file_bytes = tokio::fs::read(path).await.unwrap(); 250 | let stream = ByteStream::new(SdkBody::from(file_bytes)); 251 | Ok(GetObjectOutputBuilder::default().body(stream).build()) 252 | } 253 | 254 | async fn list_objects( 255 | &self, 256 | _bucket: &str, 257 | _start_after: &str, 258 | ) -> Result< 259 | ListObjectsV2Output, 260 | aws_sdk_s3::error::SdkError, 261 | > { 262 | Ok(ListObjectsV2OutputBuilder::default().build()) 263 | } 264 | } 265 | 266 | #[tokio::test] 267 | async fn deserializes_meta_transactions() { 268 | let lake_client = LakeS3Client {}; 269 | 270 | let streamer_message = 271 | fetch_streamer_message(&lake_client, "near-lake-data-mainnet", 879765) 272 | .await 273 | .unwrap(); 274 | 275 | let delegate_action = &streamer_message.shards[0] 276 | .chunk 277 | .as_ref() 278 | .unwrap() 279 | .transactions[0] 280 | .transaction 281 | .actions[0]; 282 | 283 | assert_eq!( 284 | serde_json::to_value(delegate_action).unwrap(), 285 | serde_json::json!({ 286 | "Delegate": { 287 | "delegate_action": { 288 | "sender_id": "test.near", 289 | "receiver_id": "test.near", 290 | "actions": [ 291 | { 292 | "AddKey": { 293 | "public_key": "ed25519:CnQMksXTTtn81WdDujsEMQgKUMkFvDJaAjDeDLTxVrsg", 294 | "access_key": { 295 | "nonce": 0, 296 | "permission": "FullAccess" 297 | } 298 | } 299 | } 300 | ], 301 | "nonce": 879546, 302 | "max_block_height": 100, 303 | "public_key": "ed25519:8Rn4FJeeRYcrLbcrAQNFVgvbZ2FCEQjgydbXwqBwF1ib" 304 | }, 305 | "signature": "ed25519:25uGrsJNU3fVgUpPad3rGJRy2XQum8gJxLRjKFCbd7gymXwUxQ9r3tuyBCD6To7SX5oSJ2ScJZejwqK1ju8WdZfS" 306 | } 307 | }) 308 | ); 309 | } 310 | } 311 | -------------------------------------------------------------------------------- /lake-framework/src/streamer.rs: -------------------------------------------------------------------------------- 1 | use aws_sdk_s3::Client; 2 | 3 | use futures::stream::StreamExt; 4 | use tokio::sync::mpsc; 5 | use tokio::sync::mpsc::error::SendError; 6 | 7 | use near_lake_primitives::near_indexer_primitives; 8 | 9 | use crate::{s3_fetchers, types}; 10 | 11 | /// Creates [mpsc::Receiver] and 12 | /// [mpsc::Sender] spawns the streamer 13 | /// process that writes [near_idnexer_primitives::StreamerMessage] to the given `mpsc::channel` 14 | /// returns both `sender` and `receiver` 15 | pub(crate) fn streamer( 16 | config: crate::Lake, 17 | ) -> ( 18 | tokio::task::JoinHandle>, 19 | mpsc::Receiver, 20 | ) { 21 | let (sender, receiver) = mpsc::channel(config.blocks_preload_pool_size); 22 | (tokio::spawn(start(sender, config)), receiver) 23 | } 24 | 25 | fn stream_block_heights<'a: 'b, 'b>( 26 | lake_s3_client: &'a s3_fetchers::LakeS3Client, 27 | s3_bucket_name: &'a str, 28 | mut start_from_block_height: crate::types::BlockHeight, 29 | ) -> impl futures::Stream + 'b { 30 | async_stream::stream! { 31 | loop { 32 | tracing::debug!(target: crate::LAKE_FRAMEWORK, "Fetching a list of blocks from S3..."); 33 | match s3_fetchers::list_block_heights( 34 | lake_s3_client, 35 | s3_bucket_name, 36 | start_from_block_height, 37 | ) 38 | .await { 39 | Ok(block_heights) => { 40 | if block_heights.is_empty() { 41 | tracing::debug!( 42 | target: crate::LAKE_FRAMEWORK, 43 | "There are no newer block heights than {} in bucket {}. Fetching again in 2s...", 44 | start_from_block_height, 45 | s3_bucket_name, 46 | ); 47 | tokio::time::sleep(std::time::Duration::from_secs(2)).await; 48 | continue; 49 | } 50 | tracing::debug!( 51 | target: crate::LAKE_FRAMEWORK, 52 | "Received {} newer block heights", 53 | block_heights.len() 54 | ); 55 | 56 | start_from_block_height = *block_heights.last().unwrap() + 1; 57 | for block_height in block_heights { 58 | tracing::debug!(target: crate::LAKE_FRAMEWORK, "Yielding {} block height...", block_height); 59 | yield block_height; 60 | } 61 | } 62 | Err(err) => { 63 | tracing::warn!( 64 | target: crate::LAKE_FRAMEWORK, 65 | "Failed to get block heights from bucket {}: {}. Retrying in 1s...", 66 | s3_bucket_name, 67 | err, 68 | ); 69 | tokio::time::sleep(std::time::Duration::from_secs(1)).await; 70 | } 71 | } 72 | } 73 | } 74 | } 75 | 76 | // The only consumer of the BlockHeights Streamer 77 | async fn prefetch_block_heights_into_pool( 78 | pending_block_heights: &mut std::pin::Pin< 79 | &mut impl tokio_stream::Stream, 80 | >, 81 | limit: usize, 82 | await_for_at_least_one: bool, 83 | ) -> Result, crate::types::LakeError> { 84 | let mut block_heights = Vec::with_capacity(limit); 85 | for remaining_limit in (0..limit).rev() { 86 | tracing::debug!(target: crate::LAKE_FRAMEWORK, "Polling for the next block height without awaiting... (up to {} block heights are going to be fetched)", remaining_limit); 87 | match futures::poll!(pending_block_heights.next()) { 88 | std::task::Poll::Ready(Some(block_height)) => { 89 | block_heights.push(block_height); 90 | } 91 | std::task::Poll::Pending => { 92 | if await_for_at_least_one && block_heights.is_empty() { 93 | tracing::debug!(target: crate::LAKE_FRAMEWORK, "There were no block heights available immediatelly, and the prefetching blocks queue is empty, so we need to await for at least a single block height to be available before proceeding..."); 94 | match pending_block_heights.next().await { 95 | Some(block_height) => { 96 | block_heights.push(block_height); 97 | } 98 | None => { 99 | return Err(crate::types::LakeError::InternalError { 100 | error_message: "This state should be unreachable as the block heights stream should be infinite.".to_string() 101 | }); 102 | } 103 | } 104 | continue; 105 | } 106 | tracing::debug!(target: crate::LAKE_FRAMEWORK, "There were no block heights available immediatelly, so we should not block here and keep processing the blocks."); 107 | break; 108 | } 109 | std::task::Poll::Ready(None) => { 110 | return Err( 111 | crate::types::LakeError::InternalError { 112 | error_message: "This state should be unreachable as the block heights stream should be infinite.".to_string() 113 | } 114 | ); 115 | } 116 | } 117 | } 118 | Ok(block_heights) 119 | } 120 | 121 | #[allow(unused_labels)] // we use loop labels for code-readability 122 | pub(crate) async fn start( 123 | streamer_message_sink: mpsc::Sender, 124 | config: crate::Lake, 125 | ) -> Result<(), crate::types::LakeError> { 126 | let mut start_from_block_height = config.start_block_height; 127 | 128 | let s3_client = if let Some(config) = config.s3_config { 129 | Client::from_conf(config) 130 | } else { 131 | let aws_config = aws_config::from_env().load().await; 132 | let s3_config = aws_sdk_s3::config::Builder::from(&aws_config) 133 | .region(aws_types::region::Region::new(config.s3_region_name)) 134 | .build(); 135 | Client::from_conf(s3_config) 136 | }; 137 | let lake_s3_client = s3_fetchers::LakeS3Client::new(s3_client.clone()); 138 | 139 | let mut last_processed_block_hash: Option = None; 140 | 141 | 'main: loop { 142 | // In the beginning of the 'main' loop we create a Block Heights stream 143 | // and prefetch the initial data in that pool. 144 | // Later the 'stream' loop might exit to this 'main' one to repeat the procedure. 145 | // This happens because we assume Lake Indexer that writes to the S3 Bucket might 146 | // in some cases, write N+1 block before it finishes writing the N block. 147 | // We require to stream blocks consistently, so we need to try to load the block again. 148 | 149 | let pending_block_heights = stream_block_heights( 150 | &lake_s3_client, 151 | &config.s3_bucket_name, 152 | start_from_block_height, 153 | ); 154 | tokio::pin!(pending_block_heights); 155 | 156 | let mut streamer_messages_futures = futures::stream::FuturesOrdered::new(); 157 | tracing::debug!( 158 | target: crate::LAKE_FRAMEWORK, 159 | "Prefetching up to {} blocks...", 160 | config.blocks_preload_pool_size 161 | ); 162 | 163 | streamer_messages_futures.extend( 164 | prefetch_block_heights_into_pool( 165 | &mut pending_block_heights, 166 | config.blocks_preload_pool_size, 167 | true, 168 | ) 169 | .await? 170 | .into_iter() 171 | .map(|block_height| { 172 | s3_fetchers::fetch_streamer_message( 173 | &lake_s3_client, 174 | &config.s3_bucket_name, 175 | block_height, 176 | ) 177 | }), 178 | ); 179 | 180 | tracing::debug!( 181 | target: crate::LAKE_FRAMEWORK, 182 | "Awaiting for the first prefetched block..." 183 | ); 184 | 'stream: while let Some(streamer_message_result) = streamer_messages_futures.next().await { 185 | let streamer_message = streamer_message_result.map_err(|err| { 186 | tracing::error!( 187 | target: crate::LAKE_FRAMEWORK, 188 | "Failed to fetch StreamerMessage with error: \n{:#?}", 189 | err, 190 | ); 191 | err 192 | })?; 193 | 194 | tracing::debug!( 195 | target: crate::LAKE_FRAMEWORK, 196 | "Received block #{} ({})", 197 | streamer_message.block.header.height, 198 | streamer_message.block.header.hash 199 | ); 200 | // check if we have `last_processed_block_hash` (might be None only on start) 201 | if let Some(prev_block_hash) = last_processed_block_hash { 202 | // compare last_processed_block_hash` with `block.header.prev_hash` of the current 203 | // block (ensure we don't miss anything from S3) 204 | // retrieve the data from S3 if prev_hashes don't match and repeat the main loop step 205 | if prev_block_hash != streamer_message.block.header.prev_hash { 206 | tracing::warn!( 207 | target: crate::LAKE_FRAMEWORK, 208 | "`prev_hash` does not match, refetching the data from S3 in 200ms", 209 | ); 210 | tokio::time::sleep(std::time::Duration::from_millis(200)).await; 211 | break 'stream; 212 | } 213 | } 214 | 215 | // store current block info as `last_processed_block_*` for next iteration 216 | last_processed_block_hash = Some(streamer_message.block.header.hash); 217 | start_from_block_height = streamer_message.block.header.height + 1; 218 | 219 | tracing::debug!( 220 | target: crate::LAKE_FRAMEWORK, 221 | "Prefetching up to {} blocks... (there are {} blocks in the prefetching pool)", 222 | config.blocks_preload_pool_size, 223 | streamer_messages_futures.len(), 224 | ); 225 | tracing::debug!( 226 | target: crate::LAKE_FRAMEWORK, 227 | "Streaming block #{} ({})", 228 | streamer_message.block.header.height, 229 | streamer_message.block.header.hash 230 | ); 231 | let blocks_preload_pool_current_len = streamer_messages_futures.len(); 232 | 233 | let prefetched_block_heights_future = prefetch_block_heights_into_pool( 234 | &mut pending_block_heights, 235 | config 236 | .blocks_preload_pool_size 237 | .saturating_sub(blocks_preload_pool_current_len), 238 | blocks_preload_pool_current_len == 0, 239 | ); 240 | 241 | let streamer_message_sink_send_future = streamer_message_sink.send(streamer_message); 242 | 243 | let (prefetch_res, send_res): ( 244 | Result, crate::types::LakeError>, 245 | Result<_, SendError>, 246 | ) = futures::join!( 247 | prefetched_block_heights_future, 248 | streamer_message_sink_send_future, 249 | ); 250 | 251 | if let Err(SendError(err)) = send_res { 252 | tracing::debug!( 253 | target: crate::LAKE_FRAMEWORK, 254 | "Failed to send StreamerMessage (#{:0>12}) to the channel. Channel is closed, exiting \n{:?}", 255 | start_from_block_height - 1, 256 | err, 257 | ); 258 | return Ok(()); 259 | } 260 | 261 | streamer_messages_futures.extend( 262 | prefetch_res 263 | .map_err(|err| { 264 | tracing::error!( 265 | target: crate::LAKE_FRAMEWORK, 266 | "Failed to prefetch block heights to the prefetching pool with error: \n{:#?}", 267 | err 268 | ); 269 | err 270 | })? 271 | .into_iter() 272 | .map(|block_height| { 273 | s3_fetchers::fetch_streamer_message( 274 | &lake_s3_client, 275 | &config.s3_bucket_name, 276 | block_height, 277 | ) 278 | } 279 | )); 280 | } 281 | 282 | tracing::warn!( 283 | target: crate::LAKE_FRAMEWORK, 284 | "Exited from the 'stream' loop. It may happen in two cases:\n 285 | 1. Blocks has ended (impossible, might be an error on the Lake Buckets),\n 286 | 2. Received a Block which prev_hash doesn't match the previously streamed block.\n 287 | Will attempt to restart the stream from block #{}", 288 | start_from_block_height, 289 | ); 290 | } 291 | } 292 | -------------------------------------------------------------------------------- /lake-framework/src/types.rs: -------------------------------------------------------------------------------- 1 | /// Type alias represents the block height 2 | pub type BlockHeight = u64; 3 | 4 | /// Configuration struct for NEAR Lake Framework 5 | /// NB! Consider using [`LakeBuilder`] 6 | /// Building the `Lake` example: 7 | /// ``` 8 | /// use near_lake_framework::LakeBuilder; 9 | /// 10 | /// # fn main() { 11 | /// let lake = LakeBuilder::default() 12 | /// .testnet() 13 | /// .start_block_height(82422587) 14 | /// .build() 15 | /// .expect("Failed to build Lake"); 16 | /// # } 17 | /// ``` 18 | #[derive(Default, Builder, Debug)] 19 | #[builder(pattern = "owned")] 20 | pub struct Lake { 21 | /// AWS S3 Bucket name 22 | #[builder(setter(into))] 23 | pub(crate) s3_bucket_name: String, 24 | /// AWS S3 Region name 25 | #[builder(setter(into))] 26 | pub(crate) s3_region_name: String, 27 | /// Defines the block height to start indexing from 28 | pub(crate) start_block_height: u64, 29 | /// Custom aws_sdk_s3::config::Config 30 | /// ## Use-case: custom endpoint 31 | /// You might want to stream data from the custom S3-compatible source () . In order to do that you'd need to pass `aws_sdk_s3::config::Config` configured 32 | /// ``` 33 | /// use near_lake_framework::LakeBuilder; 34 | /// 35 | /// # #[tokio::main] 36 | /// # async fn main() { 37 | /// let aws_config = aws_config::from_env().load().await; 38 | /// let mut s3_conf = aws_sdk_s3::config::Builder::from(&aws_config) 39 | /// .endpoint_url("http://0.0.0.0:9000") 40 | /// .build(); 41 | /// 42 | /// let lake = LakeBuilder::default() 43 | /// .s3_config(s3_conf) 44 | /// .s3_bucket_name("near-lake-data-custom") 45 | /// .s3_region_name("eu-central-1") 46 | /// .start_block_height(1) 47 | /// .build() 48 | /// .expect("Failed to build Lake"); 49 | /// # } 50 | /// ``` 51 | #[builder(setter(strip_option), default)] 52 | pub(crate) s3_config: Option, 53 | /// Defines how many *block heights* Lake Framework will try to preload into memory to avoid S3 `List` requests. 54 | /// Default: 100 55 | /// 56 | /// *Note*: This value is not the number of blocks to preload, but the number of block heights. 57 | /// Also, this value doesn't affect your indexer much if it follows the tip of the network. 58 | /// This parameter is useful for historical indexing. 59 | #[builder(default = "100")] 60 | pub(crate) blocks_preload_pool_size: usize, 61 | /// Number of concurrent blocks to process. Default: 1 62 | /// **WARNING**: Increase this value only if your block handling logic doesn't have to rely on previous blocks and can be processed in parallel 63 | #[builder(default = "1")] 64 | pub(crate) concurrency: usize, 65 | } 66 | 67 | impl LakeBuilder { 68 | /// Shortcut to set up [LakeBuilder::s3_bucket_name] for mainnet 69 | /// ``` 70 | /// use near_lake_framework::LakeBuilder; 71 | /// 72 | /// # fn main() { 73 | /// let lake = LakeBuilder::default() 74 | /// .mainnet() 75 | /// .start_block_height(65231161) 76 | /// .build() 77 | /// .expect("Failed to build Lake"); 78 | /// # } 79 | /// ``` 80 | pub fn mainnet(mut self) -> Self { 81 | self.s3_bucket_name = Some("near-lake-data-mainnet".to_string()); 82 | self.s3_region_name = Some("eu-central-1".to_string()); 83 | self 84 | } 85 | 86 | /// Shortcut to set up [LakeBuilder::s3_bucket_name] for testnet 87 | /// ``` 88 | /// use near_lake_framework::LakeBuilder; 89 | /// 90 | /// # fn main() { 91 | /// let lake = LakeBuilder::default() 92 | /// .testnet() 93 | /// .start_block_height(82422587) 94 | /// .build() 95 | /// .expect("Failed to build Lake"); 96 | /// # } 97 | /// ``` 98 | pub fn testnet(mut self) -> Self { 99 | self.s3_bucket_name = Some("near-lake-data-testnet".to_string()); 100 | self.s3_region_name = Some("eu-central-1".to_string()); 101 | self 102 | } 103 | 104 | /// Shortcut to set up [LakeBuilder::s3_bucket_name] for betanet 105 | /// ``` 106 | /// use near_lake_framework::LakeBuilder; 107 | /// 108 | /// # fn main() { 109 | /// let lake = LakeBuilder::default() 110 | /// .betanet() 111 | /// .start_block_height(82422587) 112 | /// .build() 113 | /// .expect("Failed to build Lake"); 114 | /// # } 115 | /// ``` 116 | pub fn betanet(mut self) -> Self { 117 | self.s3_bucket_name = Some("near-lake-data-betanet".to_string()); 118 | self.s3_region_name = Some("us-east-1".to_string()); 119 | self 120 | } 121 | } 122 | 123 | #[allow(clippy::enum_variant_names)] 124 | #[derive(thiserror::Error, Debug)] 125 | pub enum LakeError { 126 | #[error("Failed to parse structure from JSON: {error_message}")] 127 | ParseError { 128 | #[from] 129 | error_message: serde_json::Error, 130 | }, 131 | #[error("AWS S3 error: {error}")] 132 | AwsGetObjectError { 133 | #[from] 134 | error: aws_sdk_s3::error::SdkError, 135 | }, 136 | #[error("AWS S3 error: {error}")] 137 | AwsLisObjectsV2Error { 138 | #[from] 139 | error: 140 | aws_sdk_s3::error::SdkError, 141 | }, 142 | #[error("Failed to convert integer: {error}")] 143 | IntConversionError { 144 | #[from] 145 | error: std::num::TryFromIntError, 146 | }, 147 | #[error("Join error: {error}")] 148 | JoinError { 149 | #[from] 150 | error: tokio::task::JoinError, 151 | }, 152 | #[error("Failed to start runtime: {error}")] 153 | RuntimeStartError { 154 | #[from] 155 | error: std::io::Error, 156 | }, 157 | #[error("Internal error: {error_message}")] 158 | InternalError { error_message: String }, 159 | } 160 | 161 | /// ### The concept of Context for the Lake Framework 162 | /// The main idea of the Lake Framework is to provide a simple way to index data from the NEAR blockchain. 163 | /// The framework is designed to be as flexible as possible, so it doesn't provide any specific logic for indexing. 164 | /// Instead, it provides a way to implement your own logic. One of the main concepts of the framework is the Context. 165 | /// The Context is a struct that implements the [LakeContext] trait. It is used to pass data between the framework and your logic. 166 | /// The Context is created once and then passed to the framework. The framework will call the [LakeContext::execute_before_run] 167 | /// method before the indexing process starts and [LakeContext::execute_after_run] after the indexing process is finished. 168 | /// The Context is useful for passing data between blocks. For example, you can use it to store the last block timestamp and use it in the next block. 169 | /// 170 | /// Also the Context is necessary to pass the "global" data to the indexing process. For example, you can use it to pass the database connection pool. 171 | /// 172 | /// ### Examples 173 | /// 174 | /// #### Simple Context examples (explicit) 175 | /// **WARNING**: This example demonsrates how Context works explicitly. In the real-world application you would do less boilerplate. See further examples. 176 | /// In this example we will create a simple Context that prints the block height before the processing the block. 177 | /// ```no_run 178 | /// use near_lake_framework::LakeContextExt; // note Lake Framework exports this trait with a suffix Ext in the name 179 | /// struct PrinterContext; 180 | /// 181 | /// impl LakeContextExt for PrinterContext { 182 | /// fn execute_before_run(&self, block: &mut near_lake_primitives::block::Block) { 183 | /// println!("Processing block {}", block.header().height()); 184 | /// } 185 | /// fn execute_after_run(&self) {} 186 | /// } 187 | /// ``` 188 | /// As you can see we will be printing `Processing block {block_height}` before processing the block. And we will do nothing after 189 | /// the indexing process is finished. 190 | /// 191 | /// The next example is showing how to provide some value to the indexing process. 192 | /// ```no_run 193 | /// use near_lake_framework::LakeContextExt; // note Lake Framework exports this trait with a suffix Ext in the name 194 | /// use near_lake_framework::LakeBuilder; 195 | /// # use diesel::Connection; 196 | /// 197 | /// struct ApplicationDataContext { 198 | /// pub db_pool: diesel::pg::PgConnection, 199 | /// } 200 | /// 201 | /// // We need our context to do nothing before and after the indexing process. 202 | /// // The only purpose is to provide the database connection pool to the indexing process. 203 | /// impl LakeContextExt for ApplicationDataContext { 204 | /// fn execute_before_run(&self, block: &mut near_lake_primitives::block::Block) {} 205 | /// fn execute_after_run(&self) {} 206 | /// } 207 | /// 208 | /// fn main() { 209 | /// let db_pool = diesel::PgConnection::establish("postgres://localhost:5432") 210 | /// .expect("Failed to connect to database"); 211 | /// let context = ApplicationDataContext { db_pool }; 212 | /// 213 | /// let result = LakeBuilder::default() 214 | /// .testnet() 215 | /// .start_block_height(82422587) 216 | /// .build() 217 | /// .unwrap() 218 | /// .run_with_context(indexing_function, &context); 219 | /// } 220 | /// 221 | /// async fn indexing_function( 222 | /// block: near_lake_primitives::block::Block, 223 | /// context: &ApplicationDataContext, 224 | /// ) -> Result<(), near_lake_framework::LakeError> { 225 | /// // Now we can use the database connection pool 226 | /// let db_pool = &context.db_pool; 227 | /// ///... 228 | /// Ok(()) 229 | /// } 230 | /// ``` 231 | /// 232 | /// #### Simple Context example (real-world) 233 | /// The last example from the previous section is a bit verbose. In the real-world application you would do less boilerplate. 234 | /// The main purpose of that example was to show you what's happening under the hood. However, for your convenience, the Lake Framework 235 | /// provides a trait [LakeContextExt] that implements the [LakeContext] trait for you. So you can use it to create a simple Context. 236 | /// 237 | /// ```ignore 238 | /// use near_lake_framework::LakeContext; // This is a derive macro 239 | /// use near_lake_framework::LakeBuilder; 240 | /// 241 | /// #[derive(LakeContext)] 242 | /// /// struct ApplicationDataContext { 243 | /// pub db_pool: diesel::pg::PgConnection, 244 | /// } 245 | /// 246 | /// // Here we got rid of the boilerplate code that we had in the previous example to impl the LakeContext trait. 247 | /// 248 | /// fn main() { 249 | /// let db_pool = diesel::pg::PgConnection::establish("postgres://postgres:password@localhost:5432/database") 250 | /// .unwrap_or_else(|_| panic!("Error connecting to database")) 251 | /// 252 | /// let context = ApplicationDataContext { db_pool }; 253 | /// 254 | /// let result = LakeBuilder::default() 255 | /// .testnet() 256 | /// .start_block_height(82422587) 257 | /// .build() 258 | /// .unwrap() 259 | /// .run_with_context(indexing_function, &context); 260 | /// } 261 | /// 262 | /// async fn indexing_function( 263 | /// block: near_lake_primitives::block::Block, 264 | /// context: &ApplicationDataContext, 265 | /// ) -> Result<(), near_lake_framework::LakeError> { 266 | /// // Now we can use the database connection pool 267 | /// let db_pool = &context.db_pool; 268 | /// // ... 269 | /// Ok(()) 270 | /// } 271 | /// ``` 272 | /// 273 | /// It might look like not a big deal to get rid of the boilerplate code. However, it is very useful when you have a lot of Contexts or when you 274 | /// use a ready-to-use Context from the community. 275 | /// 276 | /// #### Advanced Context example 277 | /// In this example we will extend a previous one with the `ParentTransactionCache` context Lake Framework team has created and shared with everybody. 278 | /// 279 | /// ```ignore 280 | /// use near_lake_framework::LakeContext; // This is a derive macro 281 | /// use near_lake_parent_transaction_cache::{ParentTransactionCache, ParentTransactionCacheBuilder}; // This is a ready-to-use Context from the community that impls LakeContext trait 282 | /// use near_lake_framework::LakeBuilder; 283 | /// # use diesel::Connection; 284 | /// 285 | /// #[derive(LakeContext)] 286 | /// struct ApplicationDataContext { 287 | /// pub db_pool: diesel::pg::PgConnection, 288 | /// pub parent_transaction_cache: ParentTransactionCache, 289 | /// } 290 | /// 291 | /// fn main() { 292 | /// let db_pool = diesel::PgConnection::establish("postgres://postgres:password@localhost:5432/database") 293 | /// .unwrap_or_else(|_| panic!("Error connecting to database")); 294 | /// let parent_transaction_cache = ParentTransactionCacheBuilder::default().build().unwrap(); 295 | /// 296 | /// let context = ApplicationDataContext { db_pool, parent_transaction_cache }; 297 | /// 298 | /// let result = LakeBuilder::default() 299 | /// .testnet() 300 | /// .start_block_height(82422587) 301 | /// .build() 302 | /// .unwrap() 303 | /// .run_with_context(indexing_function, &context); 304 | /// } 305 | /// 306 | /// async fn indexing_function( 307 | /// block: near_lake_primitives::block::Block, 308 | /// context: &ApplicationDataContext, 309 | /// ) -> Result<(), near_lake_framework::LakeError> { 310 | /// // Now we can use the database connection pool 311 | /// let db_pool = &context.db_pool; 312 | /// dbg!(&context.parent_transaction_cache); 313 | /// Ok(()) 314 | /// } 315 | /// ``` 316 | /// As you can see we have extended our context with the `ParentTransactionCache` context. And we can use it in our indexing function. 317 | /// The `ParentTransactionCache` defines the `execute_before_run` and `execute_after_run` methods. So when we call `run_with_context` method 318 | /// the Lake Framework will call `execute_before_run` and `execute_after_run` methods for us. 319 | /// And we didn't need to implement them in our `ApplicationDataContext` struct because `LakeContext` derive macro did it for us automatically. 320 | pub trait LakeContextExt { 321 | /// This method will be called before the indexing process is started. 322 | fn execute_before_run(&self, block: &mut near_lake_primitives::block::Block); 323 | /// This method will be called after the indexing process is finished. 324 | fn execute_after_run(&self); 325 | } 326 | -------------------------------------------------------------------------------- /lake-parent-transaction-cache/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "near-lake-parent-transaction-cache" 3 | description = "Ready-to-use context for the Lake Framework in Rust. It provides a cache for keeping the relation between transactions and receipts in cache." 4 | edition = "2021" 5 | version.workspace = true 6 | license.workspace = true 7 | repository.workspace = true 8 | 9 | [dependencies] 10 | cached = "0.43.0" 11 | derive_builder = "0.12.0" 12 | 13 | near-lake-framework = { path = "../lake-framework", version = "0.8.0-beta.2" } 14 | 15 | [dev-dependencies] 16 | anyhow = "1.0.44" 17 | 18 | [[example]] 19 | name = "with_context_parent_tx_cache" 20 | path = "../examples/with_context_parent_tx_cache.rs" 21 | -------------------------------------------------------------------------------- /lake-parent-transaction-cache/README.md: -------------------------------------------------------------------------------- 1 | # NEAR Lake Parent Transaction Cache (Context) 2 | 3 | Lake Parent Transaction Cache is a ready-to-use context for the Lake Framework in Rust. It provides a cache for keeping the relation between transactions and receipts in cache. 4 | 5 | ## Example Usage 6 | 7 | ```no_run 8 | use near_lake_parent_transaction_cache::{ParentTransactionCache, ParentTransactionCacheBuilder}; 9 | # use near_lake_framework::LakeBuilder; 10 | # use near_lake_framework::near_lake_primitives::{block::Block, actions::ActionMetaDataExt}; 11 | 12 | # fn main() { 13 | let parent_transaction_cache_ctx = ParentTransactionCacheBuilder::default() 14 | .build() 15 | .expect("Failed to build the ParentTransactionCache context"); 16 | 17 | LakeBuilder::default() 18 | .mainnet() 19 | .start_block_height(80504433) 20 | .build() 21 | .expect("Failed to build the Lake Framework") 22 | .run_with_context(handle_block, &parent_transaction_cache_ctx) 23 | .expect("Failed to run the Lake Framework"); 24 | # } 25 | 26 | async fn handle_block( 27 | mut block: Block, 28 | ctx: &ParentTransactionCache, 29 | ) -> anyhow::Result<()> { 30 | for action in block.actions() { 31 | println!( 32 | "Action receipt ID: {:?} | Parent TX hash: {:?}", 33 | action.receipt_id(), 34 | ctx.get_parent_transaction_hash(&action.receipt_id()) 35 | ); 36 | } 37 | Ok(()) 38 | } 39 | ``` 40 | 41 | ## Getting Started 42 | 43 | To use the Lake Parent Transaction Cache context in your Rust project, follow these steps: 44 | 45 | 1. Add the following dependencies to your `Cargo.toml` file: 46 | 47 | ```toml 48 | [dependencies] 49 | near-lake-parent-transaction-cache = "" 50 | ``` 51 | 52 | 2. Import the necessary modules in your code: 53 | 54 | ```ignore 55 | use near_lake_parent_transaction_cache::ParentTransactionCache; 56 | use near_lake_primitives::actions::ActionMetaDataExt; 57 | ``` 58 | 59 | 3. Create an instance of the `ParentTransactionCache` context: 60 | 61 | ```no_run 62 | # use near_lake_parent_transaction_cache::ParentTransactionCacheBuilder; 63 | let parent_transaction_cache_ctx = ParentTransactionCacheBuilder::default(); 64 | ``` 65 | 66 | 4. Configure the Lake Framework and run it with the created context: 67 | 68 | ```ignore 69 | near_lake_framework::LakeBuilder::default() 70 | .mainnet() 71 | .start_block_height() 72 | .build()? 73 | .run_with_context(, &parent_transaction_cache_ctx)?; 74 | ``` 75 | 76 | Replace `` with the starting block height you want to use. Replace `` with the function you want to use to index the blocks. 77 | 78 | ## Advanced Usage 79 | 80 | ### Cache size 81 | 82 | We use [SizedCache](https://docs.rs/cached/0.43.0/cached/stores/struct.SizedCache.html) under the hood. So we can configure the cache size by using the `cache_size` method: 83 | 84 | ```no_run 85 | # use near_lake_parent_transaction_cache::ParentTransactionCacheBuilder; 86 | let parent_transaction_cache_ctx = ParentTransactionCacheBuilder::default() 87 | .cache_size(100_000); 88 | ``` 89 | 90 | By default the cache size is 100,000. 91 | 92 | ### Watch for specific accounts 93 | 94 | By default `ParentTransactionCache` context will cache the relation between Transaction and Receipt for every Transaction in the block. But you can configure it to watch for specific accounts only: 95 | 96 | #### You can pass a Vec of AccountId 97 | 98 | ```no_run 99 | # use near_lake_parent_transaction_cache::ParentTransactionCacheBuilder; 100 | use near_lake_framework::near_primitives::types::AccountId; 101 | 102 | let accounts_to_watch: Vec = vec![ 103 | String::from("alice.near").try_into().unwrap(), 104 | String::from("bob.near").try_into().unwrap(), 105 | ]; 106 | let parent_transaction_cache_ctx = ParentTransactionCacheBuilder::default() 107 | .for_accounts(accounts_to_watch); 108 | ``` 109 | 110 | #### You can pass accounts to watch one by one using `for_account` method 111 | 112 | ```no_run 113 | # use near_lake_parent_transaction_cache::ParentTransactionCacheBuilder; 114 | use near_lake_framework::near_primitives::types::AccountId; 115 | 116 | let parent_transaction_cache_ctx = ParentTransactionCacheBuilder::default() 117 | .for_account(String::from("alice.near").try_into().unwrap()) 118 | .for_account(String::from("bob.near").try_into().unwrap()); 119 | ``` 120 | 121 | -------------------------------------------------------------------------------- /lake-parent-transaction-cache/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![doc = include_str!("../README.md")] 2 | #[macro_use] 3 | extern crate derive_builder; 4 | 5 | use cached::{Cached, SizedCache}; 6 | use near_lake_framework::{ 7 | near_indexer_primitives::{near_primitives::types::AccountId, CryptoHash}, 8 | near_lake_primitives::{actions::ActionMetaDataExt, block::Block}, 9 | LakeContextExt, 10 | }; 11 | 12 | pub type ReceiptId = CryptoHash; 13 | pub type TransactionHash = CryptoHash; 14 | type Cache = SizedCache; 15 | 16 | #[derive(Debug, Builder)] 17 | #[builder(pattern = "owned")] 18 | pub struct ParentTransactionCache { 19 | #[builder( 20 | setter(custom = true, name = "cache_size"), 21 | default = "std::sync::RwLock::new(Cache::with_size(100_000))" 22 | )] 23 | cache: std::sync::RwLock, 24 | #[builder(setter(custom = true, name = "for_accounts"))] 25 | account_ids: Vec, 26 | } 27 | 28 | impl ParentTransactionCacheBuilder { 29 | /// Sets the size of the cache. Default is 100_000. 30 | pub fn cache_size(mut self, value: usize) -> Self { 31 | self.cache = Some(std::sync::RwLock::new(Cache::with_size(value))); 32 | self 33 | } 34 | 35 | /// Stores the Vec of [AccountId](near_lake_framework::near_indexer_primitives::near_primitives::types::AccountId) to cache transactions for. 36 | /// If not set, the cache will be created for all the Transactions in the block. 37 | /// If set the cache will be created only for the transactions that have the 38 | /// sender or receiver in the list of accounts. 39 | /// **Warning**: This method overrides the previous value. 40 | pub fn for_accounts(mut self, accounts_id: Vec) -> Self { 41 | self.account_ids = Some(accounts_id); 42 | self 43 | } 44 | 45 | /// Adds an account to the watching list for the parent transaction cache. 46 | /// Similarly to the method [for_accounts](#method.for_accounts) this method will 47 | /// create the cache only for the transactions that have the sender or receiver 48 | /// in the list of accounts. 49 | /// **Warning**: This method appends to the previous value. 50 | pub fn for_account(mut self, account_id: AccountId) -> Self { 51 | if let Some(mut accounts_id) = self.account_ids.take() { 52 | accounts_id.push(account_id); 53 | self.account_ids = Some(accounts_id); 54 | } else { 55 | self.account_ids = Some(vec![account_id]); 56 | } 57 | self 58 | } 59 | } 60 | 61 | impl LakeContextExt for ParentTransactionCache { 62 | /// The process to scan the [near_lake_primitives::Block](near_lake_framework::near_lake_primitives::block::Block) and update the cache 63 | /// with the new transactions and first expected receipts. 64 | /// The cache is used to find the parent transaction hash for a given receipt id. 65 | fn execute_before_run(&self, block: &mut Block) { 66 | // Fill up the cache with new transactions and first expected receipts 67 | // We will try to skip the transactions related to the accounts we're not watching for. 68 | // Based on `accounts_id` 69 | for tx in block.transactions().filter(move |tx| { 70 | self.account_ids.is_empty() 71 | || self.account_ids.contains(tx.signer_id()) 72 | || self.account_ids.contains(tx.receiver_id()) 73 | }) { 74 | let tx_hash = tx.transaction_hash(); 75 | tx.actions_included() 76 | .map(|action| action.metadata().receipt_id()) 77 | .for_each(|receipt_id| { 78 | let mut cache = self.cache.write().unwrap(); 79 | cache.cache_set(receipt_id, tx_hash); 80 | }); 81 | } 82 | for receipt in block.receipts() { 83 | let receipt_id = receipt.receipt_id(); 84 | let mut cache = self.cache.write().unwrap(); 85 | let parent_tx_hash = cache.cache_remove(&receipt_id); 86 | 87 | if let Some(parent_tx_hash) = parent_tx_hash { 88 | cache.cache_set(receipt_id, parent_tx_hash); 89 | } 90 | } 91 | } 92 | 93 | /// We don't need to do anything after the run. 94 | fn execute_after_run(&self) {} 95 | } 96 | 97 | impl ParentTransactionCache { 98 | /// Returns the parent transaction hash for a given receipt id. 99 | /// If the receipt id is not found in the cache, it returns None. 100 | /// If the receipt id is found in the cache, it returns the parent transaction hash. 101 | pub fn get_parent_transaction_hash(&self, receipt_id: &ReceiptId) -> Option { 102 | // **Note**: [cached::SizedCache] updates metadata on every cache access. That's why 103 | // we need to use a write lock here. 104 | let mut cache = self.cache.write().unwrap(); 105 | cache.cache_get(receipt_id).cloned() 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /lake-primitives/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "near-lake-primitives" 3 | description = "Primitives for NEAR Lake" 4 | edition = "2021" 5 | version.workspace = true 6 | license.workspace = true 7 | repository.workspace = true 8 | 9 | 10 | [dependencies] 11 | anyhow = "1.0.51" 12 | near-crypto = "0.30.0-rc.1" 13 | near-primitives-core = "0.30.0-rc.1" 14 | near-primitives = "0.30.0-rc.1" 15 | near-indexer-primitives = "0.30.0-rc.1" 16 | paste = "1.0.12" 17 | serde = { version = "1", features = ["derive"] } 18 | serde_json = "1.0.75" 19 | serde_with = "3.12.0" 20 | thiserror = "1.0.38" 21 | -------------------------------------------------------------------------------- /lake-primitives/README.md: -------------------------------------------------------------------------------- 1 | ```markdown 2 | # NEAR Lake Primitives 3 | 4 | NEAR Lake Primitives is a Rust crate that provides a set of high-level primitives specifically designed for the NEAR Lake Framework. It is part of the effort to streamline and facilitate the development of blockchain applications within the NEAR ecosystem. 5 | 6 | ## Features 7 | 8 | The crate offers a range of fundamental primitives, or basic components, that developers can use to build more complex structures and processes within the NEAR Lake Framework. These primitives are optimized for efficient computation and robust interoperability. 9 | 10 | ## Usage 11 | 12 | To use the NEAR Lake Primitives in your Rust project, add the following line to your `Cargo.toml` file: 13 | 14 | ```toml 15 | [dependencies] 16 | near_lake_primitives = "0.8.0" 17 | ``` 18 | 19 | You can then import the crate in your code as follows: 20 | 21 | ```rust 22 | use near_lake_primitives; 23 | ``` 24 | 25 | ## Examples 26 | 27 | TBD - Please provide examples here. -------------------------------------------------------------------------------- /lake-primitives/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub use near_indexer_primitives::{ 2 | self, near_primitives, types::AccountId, CryptoHash, IndexerShard, StreamerMessage, 3 | }; 4 | 5 | pub use types::{ 6 | actions::{self, Action}, 7 | block::{self, Block, BlockHeader}, 8 | delegate_actions::{self, DelegateAction}, 9 | events::{self, Event, EventsTrait, RawEvent}, 10 | receipts::{self, Receipt, ReceiptKind}, 11 | state_changes::{self, StateChange, StateChangeCause, StateChangeValue}, 12 | transactions::{self, Transaction}, 13 | ReceiptId, 14 | }; 15 | 16 | mod types; 17 | -------------------------------------------------------------------------------- /lake-primitives/src/types/actions.rs: -------------------------------------------------------------------------------- 1 | use near_crypto::{PublicKey, Signature}; 2 | use near_indexer_primitives::{ 3 | types::{AccountId, Balance, Gas}, 4 | views, CryptoHash, 5 | }; 6 | 7 | use crate::types::delegate_actions; 8 | pub use delegate_actions::{ 9 | DelegateAction, DelegateAddKey, DelegateCreateAccount, DelegateDeleteAccount, 10 | DelegateDeleteKey, DelegateDeployContract, DelegateFunctionCall, DelegateStake, 11 | DelegateTransfer, 12 | }; 13 | 14 | /// Represents the metadata of the action. 15 | /// This is the information that is common to all actions. 16 | #[derive(Debug, Clone)] 17 | pub struct ActionMetadata { 18 | pub(crate) receipt_id: CryptoHash, 19 | pub(crate) predecessor_id: AccountId, 20 | pub(crate) receiver_id: AccountId, 21 | pub(crate) signer_id: AccountId, 22 | pub(crate) signer_public_key: PublicKey, 23 | } 24 | 25 | impl ActionMetadata { 26 | /// Returns the [CryptoHash] id of the corresponding Receipt. 27 | pub fn receipt_id(&self) -> CryptoHash { 28 | self.receipt_id 29 | } 30 | 31 | /// Returns the [AccountId] of the predecessor of the action. 32 | pub fn predecessor_id(&self) -> AccountId { 33 | self.predecessor_id.clone() 34 | } 35 | 36 | /// Returns the [AccountId] of the receiver of the action. 37 | pub fn receiver_id(&self) -> AccountId { 38 | self.receiver_id.clone() 39 | } 40 | 41 | /// Returns the [AccountId] of the signer of the action. 42 | pub fn signer_id(&self) -> AccountId { 43 | self.signer_id.clone() 44 | } 45 | 46 | /// Returns the [PublicKey] of the signer of the action. 47 | pub fn signer_public_key(&self) -> PublicKey { 48 | self.signer_public_key.clone() 49 | } 50 | } 51 | 52 | pub trait ActionMetaDataExt { 53 | fn metadata(&self) -> &ActionMetadata; 54 | 55 | fn receipt_id(&self) -> CryptoHash { 56 | self.metadata().receipt_id() 57 | } 58 | fn predecessor_id(&self) -> AccountId { 59 | self.metadata().predecessor_id() 60 | } 61 | fn receiver_id(&self) -> AccountId { 62 | self.metadata().receiver_id() 63 | } 64 | fn signer_id(&self) -> AccountId { 65 | self.metadata().signer_id() 66 | } 67 | fn signer_public_key(&self) -> PublicKey { 68 | self.metadata().signer_public_key() 69 | } 70 | } 71 | 72 | /// High-level representation of the `Action`. 73 | /// 74 | /// Action is "registered" in the [Transaction](super::transactions::Transaction) to be performed on the blockchain. 75 | /// There is a predefined set of actions that can be performed on the blockchain. 76 | /// 77 | /// #### Important notes on Action enum 78 | /// 79 | /// Please, note that each enum variant is a wrapper around the corresponding action struct. Also, we have special methods 80 | /// for each action type that attempts to convert the action to the corresponding struct. For example, if you have an action 81 | /// of type `Action::Transfer`, you can call `action.as_transfer()` to get the `Transfer` struct. If the action is not of 82 | /// the corresponding type, the method will return `None`. This is done to simplify the usage of the `Action` enum. 83 | #[derive(Debug, Clone)] 84 | pub enum Action { 85 | CreateAccount(CreateAccount), 86 | DeployContract(DeployContract), 87 | FunctionCall(FunctionCall), 88 | Transfer(Transfer), 89 | Stake(Stake), 90 | AddKey(AddKey), 91 | DeleteKey(DeleteKey), 92 | DeleteAccount(DeleteAccount), 93 | Delegate(Delegate), 94 | DeployGlobalContract(DeployGlobalContract), 95 | DeployGlobalContractByAccountId(DeployGlobalContractByAccountId), 96 | UseGlobalContract(UseGlobalContract), 97 | UseGlobalContractByAccountId(UseGlobalContractByAccountId), 98 | } 99 | 100 | impl ActionMetaDataExt for Action { 101 | fn metadata(&self) -> &ActionMetadata { 102 | match self { 103 | Self::CreateAccount(action) => action.metadata(), 104 | Self::DeployContract(action) => action.metadata(), 105 | Self::FunctionCall(action) => action.metadata(), 106 | Self::Transfer(action) => action.metadata(), 107 | Self::Stake(action) => action.metadata(), 108 | Self::AddKey(action) => action.metadata(), 109 | Self::DeleteKey(action) => action.metadata(), 110 | Self::DeleteAccount(action) => action.metadata(), 111 | Self::Delegate(action) => action.metadata(), 112 | Self::DeployGlobalContract(action) => action.metadata(), 113 | Self::DeployGlobalContractByAccountId(action) => action.metadata(), 114 | Self::UseGlobalContract(action) => action.metadata(), 115 | Self::UseGlobalContractByAccountId(action) => action.metadata(), 116 | } 117 | } 118 | } 119 | 120 | macro_rules! impl_as_action_for { 121 | ($action_type:ident) => { 122 | paste::paste! { 123 | pub fn [< as_ $action_type:snake:lower >](&self) -> Option<&$action_type> { 124 | match self { 125 | Self::$action_type(action) => Some(action), 126 | _ => None, 127 | } 128 | } 129 | } 130 | }; 131 | } 132 | 133 | impl Action { 134 | impl_as_action_for!(CreateAccount); 135 | impl_as_action_for!(DeployContract); 136 | impl_as_action_for!(FunctionCall); 137 | impl_as_action_for!(Transfer); 138 | impl_as_action_for!(Stake); 139 | impl_as_action_for!(AddKey); 140 | impl_as_action_for!(DeleteKey); 141 | impl_as_action_for!(DeleteAccount); 142 | impl_as_action_for!(Delegate); 143 | } 144 | 145 | // Macro to implement ActionMetaDataExt trait for each Action variant. 146 | macro_rules! impl_action_metadata_ext { 147 | ($action:ident) => { 148 | impl ActionMetaDataExt for $action { 149 | fn metadata(&self) -> &ActionMetadata { 150 | &self.metadata 151 | } 152 | } 153 | }; 154 | } 155 | 156 | impl_action_metadata_ext!(CreateAccount); 157 | impl_action_metadata_ext!(DeployContract); 158 | impl_action_metadata_ext!(FunctionCall); 159 | impl_action_metadata_ext!(Transfer); 160 | impl_action_metadata_ext!(Stake); 161 | impl_action_metadata_ext!(AddKey); 162 | impl_action_metadata_ext!(DeleteKey); 163 | impl_action_metadata_ext!(DeleteAccount); 164 | impl_action_metadata_ext!(Delegate); 165 | impl_action_metadata_ext!(DeployGlobalContract); 166 | impl_action_metadata_ext!(DeployGlobalContractByAccountId); 167 | impl_action_metadata_ext!(UseGlobalContract); 168 | impl_action_metadata_ext!(UseGlobalContractByAccountId); 169 | 170 | /// Structure representing the `CreateAccount` action. 171 | /// This is a special action that is used to create a new account on the blockchain. It doesn't contain any 172 | /// additional data. The `receiver_id` from the metadata is the name of the account that is created by this action. 173 | #[derive(Debug, Clone)] 174 | pub struct CreateAccount { 175 | pub(crate) metadata: ActionMetadata, 176 | } 177 | 178 | /// Structure representing the `DeployContract` action. 179 | #[derive(Debug, Clone)] 180 | pub struct DeployContract { 181 | pub(crate) metadata: ActionMetadata, 182 | pub(crate) code: Vec, 183 | } 184 | 185 | impl DeployContract { 186 | /// Returns the contract code bytes. 187 | pub fn code(&self) -> &[u8] { 188 | &self.code 189 | } 190 | } 191 | 192 | /// Structure representing the `FunctionCall` action. 193 | #[derive(Debug, Clone)] 194 | pub struct FunctionCall { 195 | pub(crate) metadata: ActionMetadata, 196 | pub(crate) method_name: String, 197 | pub(crate) args: Vec, 198 | pub(crate) gas: Gas, 199 | pub(crate) deposit: Balance, 200 | } 201 | 202 | impl FunctionCall { 203 | /// Returns the method name this FunctionCall calls. 204 | pub fn method_name(&self) -> &str { 205 | &self.method_name 206 | } 207 | 208 | /// Returns the arguments bytes. 209 | pub fn args(&self) -> &[u8] { 210 | &self.args 211 | } 212 | 213 | /// Returns the gas attached to this FunctionCall. 214 | pub fn gas(&self) -> Gas { 215 | self.gas 216 | } 217 | 218 | /// Returns the deposit attached to this FunctionCall. 219 | pub fn deposit(&self) -> Balance { 220 | self.deposit 221 | } 222 | } 223 | 224 | /// Structure representing the `Transfer` action. 225 | #[derive(Debug, Clone)] 226 | pub struct Transfer { 227 | pub(crate) metadata: ActionMetadata, 228 | pub(crate) deposit: Balance, 229 | } 230 | 231 | impl Transfer { 232 | /// Returns the deposit attached to this Transfer. 233 | pub fn deposit(&self) -> Balance { 234 | self.deposit 235 | } 236 | } 237 | 238 | /// Structure representing the `Stake` action. 239 | #[derive(Debug, Clone)] 240 | pub struct Stake { 241 | pub(crate) metadata: ActionMetadata, 242 | pub(crate) stake: Balance, 243 | pub(crate) public_key: PublicKey, 244 | } 245 | 246 | impl Stake { 247 | /// Returns the stake attached to this Stake. 248 | pub fn stake(&self) -> Balance { 249 | self.stake 250 | } 251 | 252 | /// Returns the public key attached to this Stake. 253 | pub fn public_key(&self) -> &PublicKey { 254 | &self.public_key 255 | } 256 | } 257 | 258 | /// Structure representing the `AddKey` action. 259 | #[derive(Debug, Clone)] 260 | pub struct AddKey { 261 | pub(crate) metadata: ActionMetadata, 262 | pub(crate) public_key: PublicKey, 263 | pub(crate) access_key: views::AccessKeyView, 264 | } 265 | 266 | impl AddKey { 267 | /// Returns the [PublicKey] added with this AddKey. 268 | pub fn public_key(&self) -> &PublicKey { 269 | &self.public_key 270 | } 271 | 272 | /// Returns the [AccessKey](views::AccessKeyView) to the PublicKey being added with this AddKey. 273 | pub fn access_key(&self) -> &views::AccessKeyView { 274 | &self.access_key 275 | } 276 | } 277 | 278 | /// Structure representing the `DeleteKey` action. 279 | #[derive(Debug, Clone)] 280 | pub struct DeleteKey { 281 | pub(crate) metadata: ActionMetadata, 282 | pub(crate) public_key: PublicKey, 283 | } 284 | 285 | impl DeleteKey { 286 | /// Returns the [PublicKey] deleted with this DeleteKey. 287 | pub fn public_key(&self) -> &PublicKey { 288 | &self.public_key 289 | } 290 | } 291 | 292 | /// Structure representing the `DeleteAccount` action. 293 | #[derive(Debug, Clone)] 294 | pub struct DeleteAccount { 295 | pub(crate) metadata: ActionMetadata, 296 | pub(crate) beneficiary_id: AccountId, 297 | } 298 | 299 | impl DeleteAccount { 300 | /// Returns the beneficiary account ID of this DeleteAccount. 301 | pub fn beneficiary_id(&self) -> &AccountId { 302 | &self.beneficiary_id 303 | } 304 | } 305 | 306 | /// Structure representing the `Delegate` action. 307 | /// This is related to the Meta-Transactions [NEP-366](https://github.com/near/NEPs/blob/master/neps/nep-0366.md). 308 | /// 309 | /// This action is used to delegate the right to sign transactions on behalf of the signer to another account. 310 | /// The signer is the account that is signing the transaction that contains this action. 311 | /// The receiver is the account that will be able to sign transactions on behalf of the signer. 312 | /// The `delegate_action` is the action that the receiver will be able to sign on behalf of the signer. 313 | /// The `signature` is the signature of the signer on the hash of the `delegate_action`. 314 | /// 315 | /// The `delegate_action` can be any action, except for another `Delegate` action. Thus not allowing the nesting of `Delegate` actions. 316 | #[derive(Debug, Clone)] 317 | pub struct Delegate { 318 | pub(crate) metadata: ActionMetadata, 319 | pub(crate) delegate_action: Vec, 320 | pub(crate) signature: Signature, 321 | } 322 | 323 | impl Delegate { 324 | /// Returns the delegate action that the receiver will be able to sign on behalf of the signer. 325 | pub fn delegate_action(&self) -> &[delegate_actions::DelegateAction] { 326 | &self.delegate_action 327 | } 328 | 329 | /// Returns the signature of the signer on the hash of the `delegate_action`. 330 | pub fn signature(&self) -> &Signature { 331 | &self.signature 332 | } 333 | } 334 | 335 | #[derive(Debug, Clone)] 336 | pub struct DeployGlobalContract { 337 | pub(crate) metadata: ActionMetadata, 338 | pub(crate) code: Vec, 339 | } 340 | 341 | impl DeployGlobalContract { 342 | pub fn code(&self) -> &[u8] { 343 | &self.code 344 | } 345 | } 346 | 347 | #[derive(Debug, Clone)] 348 | pub struct DeployGlobalContractByAccountId { 349 | pub(crate) metadata: ActionMetadata, 350 | pub(crate) code: Vec, 351 | } 352 | 353 | impl DeployGlobalContractByAccountId { 354 | pub fn code(&self) -> &[u8] { 355 | &self.code 356 | } 357 | } 358 | 359 | #[derive(Debug, Clone)] 360 | pub struct UseGlobalContract { 361 | pub(crate) metadata: ActionMetadata, 362 | pub(crate) code_hash: CryptoHash, 363 | } 364 | 365 | impl UseGlobalContract { 366 | pub fn code_hash(&self) -> &CryptoHash { 367 | &self.code_hash 368 | } 369 | } 370 | 371 | #[derive(Debug, Clone)] 372 | pub struct UseGlobalContractByAccountId { 373 | pub(crate) metadata: ActionMetadata, 374 | pub(crate) account_id: AccountId, 375 | } 376 | 377 | impl UseGlobalContractByAccountId { 378 | pub fn account_id(&self) -> &AccountId { 379 | &self.account_id 380 | } 381 | } 382 | -------------------------------------------------------------------------------- /lake-primitives/src/types/block.rs: -------------------------------------------------------------------------------- 1 | use std::collections::HashMap; 2 | 3 | use super::actions::{self, ActionMetaDataExt}; 4 | use super::events::{self, EventsTrait}; 5 | use super::receipts::{self}; 6 | use super::state_changes; 7 | use super::transactions; 8 | use crate::near_indexer_primitives::{types::AccountId, views, CryptoHash, StreamerMessage}; 9 | 10 | /// A structure that represents an entire block in the NEAR blockchain. 11 | /// It is a high-level structure that is built on top of the low-level [StreamerMessage] structure. 12 | /// 13 | /// The access to all the data is provided through the getters. Thus we can refactor the structure yet keep the API stable and backward compatible. 14 | /// 15 | /// With a high-level update we are trying to replace the usage of the low-level [StreamerMessage] with this one. 16 | /// 17 | /// #### Important notes on the Block 18 | /// - All the entities located on different shards were merged into one single list without differentiation. 19 | /// The statement from **NEAR** is that **sharding is going to be dynamic and seamless for the users**, that’s why we’ve decided indexer 20 | /// developers don’t want to care about shards either. 21 | /// - Original [near_indexer_primitives::StreamerMessage] represents the blockchain data in *a most fair manner**. Although, it used to be 22 | /// a pain in the neck for indexer developers, we’ve decided to act as a painkiller here. 23 | /// - [Block] is not the fairest name for this structure either. **NEAR Protocol** is a sharded blockchain, so its block is actually an 24 | /// ephemeral structure that represents a collection of *real blocks* called Chunks in **NEAR Protocol**. We’ve been simplifying things here though, 25 | /// so here is a result of the simplification. 26 | #[derive(Debug)] 27 | pub struct Block { 28 | streamer_message: StreamerMessage, 29 | executed_receipts: Vec, 30 | postponed_receipts: Vec, 31 | transactions: Vec, 32 | actions: Vec, 33 | events: HashMap>, 34 | state_changes: Vec, 35 | } 36 | 37 | impl Block { 38 | /// Return a reference to the original StreamerMessage of the block. This is the low-level structure. 39 | /// 40 | /// While introducing the high-level structures, methods, and helpers, we do want to keep the low-level “door” open 41 | /// for advanced developers or edge cases which we haven’t accidentally covered, or just don’t have the capacity to cover. 42 | /// 43 | /// That’s why every instance of the Block will hold the original StreamerMessage for developers. 44 | /// Think of it as backward compatibility if you prefer. 45 | pub fn streamer_message(&self) -> &StreamerMessage { 46 | &self.streamer_message 47 | } 48 | 49 | /// Returns the block hash. It is a shortcut to get the data from the block header. 50 | pub fn block_hash(&self) -> CryptoHash { 51 | self.header().hash() 52 | } 53 | 54 | /// Returns the previous block hash. It is a shortcut to get the data from the block header. 55 | pub fn prev_block_hash(&self) -> CryptoHash { 56 | self.header().prev_hash() 57 | } 58 | 59 | /// Returns the block height. It is a shortcut to get the data from the block header. 60 | pub fn block_height(&self) -> u64 { 61 | self.header().height() 62 | } 63 | 64 | /// Returns a [BlockHeader] structure of the block 65 | /// 66 | ///See [BlockHeader] structure sections for details. 67 | pub fn header(&self) -> BlockHeader { 68 | (&self.streamer_message).into() 69 | } 70 | 71 | /// Returns an iterator over the [Receipt](crate::receipts::Receipt)s executed in this [Block]. 72 | /// 73 | /// This field is a representation of `StreamerMessage.shard[N].receipt_execution_outcomes` 74 | /// 75 | /// A reminder that `receipt_execution_outcomes` has a type [near_indexer_primitives::IndexerExecutionOutcomeWithReceipt] which is an 76 | /// ephemeral structure from `near-indexer-primitives` that hold a [near_primitives::views::ExecutionOutcomeView] 77 | /// along with the corresponding [near_primitives::views::ReceiptView]. 78 | pub fn receipts(&mut self) -> impl Iterator { 79 | if self.executed_receipts.is_empty() { 80 | self.executed_receipts = self 81 | .streamer_message 82 | .shards 83 | .iter() 84 | .flat_map(|shard| shard.receipt_execution_outcomes.iter()) 85 | .map(Into::into) 86 | .collect(); 87 | } 88 | self.executed_receipts.iter() 89 | } 90 | 91 | /// Returns an iterator of [Receipt](crate::receipts::Receipt) included yet not executed in the [Block]. 92 | /// 93 | /// [Receipts](crate::receipts::Receipt) included on the chain but not executed yet are called "postponed", 94 | /// they are represented by the same structure [Receipt](crate::receipts::Receipt). 95 | pub fn postponed_receipts(&mut self) -> impl Iterator { 96 | if self.postponed_receipts.is_empty() { 97 | let executed_receipts_ids: Vec<_> = self 98 | .receipts() 99 | .map(|receipt| receipt.receipt_id()) 100 | .collect(); 101 | self.postponed_receipts = self 102 | .streamer_message 103 | .shards 104 | .iter() 105 | .filter_map(|shard| shard.chunk.as_ref().map(|chunk| chunk.receipts.iter())) 106 | .flatten() 107 | // exclude receipts that are already executed 108 | .filter(|receipt| !executed_receipts_ids.contains(&receipt.receipt_id)) 109 | .map(Into::into) 110 | .collect(); 111 | } 112 | self.postponed_receipts.iter() 113 | } 114 | 115 | /// Returns an iterator of the [Transactions](crate::transactions::Transaction) included in the [Block]. 116 | /// 117 | /// **Heads up!** Some indexer developers care about [Transaction](crate::transactions::Transaction)s for the knowledge where 118 | /// the action chain has begun. Other indexer developers care about it because of the habits 119 | /// from other blockchains like Ethereum where a transaction is a main asset. In case of NEAR 120 | /// [Receipts](crate::receipts::Receipt) are more important. 121 | pub fn transactions(&mut self) -> impl Iterator { 122 | if self.transactions.is_empty() { 123 | self.transactions = self 124 | .streamer_message 125 | .shards 126 | .iter() 127 | .filter_map(|shard| shard.chunk.as_ref().map(|chunk| chunk.transactions.iter())) 128 | .flatten() 129 | .map(TryInto::try_into) 130 | .filter_map(|transactions| transactions.ok()) 131 | .collect(); 132 | } 133 | self.transactions.iter() 134 | } 135 | 136 | /// Internal method to build the cache of actions on demand 137 | fn actions_from_streamer_message(&self) -> Vec { 138 | self.streamer_message() 139 | .shards 140 | .iter() 141 | .flat_map(|shard| shard.receipt_execution_outcomes.iter()) 142 | .filter_map(|receipt_execution_outcome| { 143 | actions::Action::try_vec_from_receipt_view(&receipt_execution_outcome.receipt).ok() 144 | }) 145 | .flatten() 146 | .collect() 147 | } 148 | 149 | /// Returns an iterator of the [Actions](crate::actions::Action) executed in the [Block] 150 | pub fn actions(&mut self) -> impl Iterator { 151 | if self.actions.is_empty() { 152 | self.build_actions_cache(); 153 | } 154 | self.actions.iter() 155 | } 156 | 157 | /// Returns an iterator of the [Events](crate::events::Event) emitted in the [Block] 158 | pub fn events(&mut self) -> impl Iterator { 159 | if self.events.is_empty() { 160 | self.build_events_hashmap(); 161 | } 162 | self.events.values().flatten() 163 | } 164 | 165 | /// Returns an iterator of the [StateChanges](crate::state_changes::StateChange) happened in the [Block] 166 | pub fn state_changes(&mut self) -> impl Iterator { 167 | if self.state_changes.is_empty() { 168 | self.state_changes = self 169 | .streamer_message 170 | .shards 171 | .iter() 172 | .flat_map(|shard| shard.state_changes.iter()) 173 | .map(Into::into) 174 | .collect(); 175 | } 176 | self.state_changes.iter() 177 | } 178 | 179 | /// Helper to get all the [Actions](crate::actions::Action) by the single [Receipt](crate::receipts::Receipt) 180 | /// 181 | /// **Heads up!** This methods searches for the actions in the current [Block] only. 182 | pub fn actions_by_receipt_id<'a>( 183 | &'a mut self, 184 | receipt_id: &'a super::ReceiptId, 185 | ) -> impl Iterator + 'a { 186 | self.actions() 187 | .filter(move |action| &action.receipt_id() == receipt_id) 188 | } 189 | 190 | /// Helper to get all the [Events](crate::events::Event) emitted by the specific [Receipt](crate::receipts::Receipt) 191 | pub fn events_by_receipt_id(&mut self, receipt_id: &super::ReceiptId) -> Vec { 192 | if self.events.is_empty() { 193 | self.build_events_hashmap(); 194 | } 195 | if let Some(events) = self.events.get(receipt_id) { 196 | events.to_vec() 197 | } else { 198 | vec![] 199 | } 200 | } 201 | 202 | /// Helper to get all the [Events](crate::events::Event) emitted by the specific contract ([AccountId](crate::near_indexer_primitives::types::AccountId)) 203 | pub fn events_by_contract_id<'a>( 204 | &'a mut self, 205 | account_id: &'a crate::near_indexer_primitives::types::AccountId, 206 | ) -> impl Iterator + 'a { 207 | self.events() 208 | .filter(move |event| event.is_emitted_by_contract(&account_id.clone())) 209 | } 210 | 211 | /// Helper to get a specific [Receipt](crate::receipts::Receipt) by the [ReceiptId](crate::types::ReceiptId) 212 | pub fn receipt_by_id(&mut self, receipt_id: &super::ReceiptId) -> Option<&receipts::Receipt> { 213 | self.receipts() 214 | .find(|receipt| &receipt.receipt_id() == receipt_id) 215 | } 216 | } 217 | 218 | impl Block { 219 | // Internal method to build the cache of actions on demand 220 | fn build_actions_cache(&mut self) { 221 | self.actions = self.actions_from_streamer_message().to_vec(); 222 | } 223 | 224 | // Internal method to build the cache of events on demand 225 | fn build_events_hashmap(&mut self) { 226 | self.events = self 227 | .receipts() 228 | .map(|receipt| (receipt.receipt_id(), receipt.events())) 229 | .collect(); 230 | } 231 | } 232 | 233 | impl From for Block { 234 | fn from(streamer_message: StreamerMessage) -> Self { 235 | Self { 236 | streamer_message, 237 | executed_receipts: vec![], 238 | postponed_receipts: vec![], 239 | transactions: vec![], 240 | actions: vec![], 241 | events: HashMap::new(), 242 | state_changes: vec![], 243 | } 244 | } 245 | } 246 | 247 | /// Replacement for [`BlockHeaderView`](near_primitives::views::BlockHeaderView) from `near-primitives`. Shrank and simplified. 248 | /// We were trying to leave only the fields indexer developers might be interested in. 249 | /// 250 | /// Friendly reminder, the original [`BlockHeaderView`](near_primitives::views::BlockHeaderView) is still accessible 251 | /// via [`.streamer_message()`](Block::streamer_message()) method. 252 | #[derive(Debug, Clone)] 253 | pub struct BlockHeader { 254 | height: u64, 255 | hash: CryptoHash, 256 | prev_hash: CryptoHash, 257 | author: AccountId, 258 | timestamp_nanosec: u64, 259 | epoch_id: CryptoHash, 260 | next_epoch_id: CryptoHash, 261 | gas_price: u128, 262 | total_supply: u128, 263 | latest_protocol_version: u32, 264 | random_value: CryptoHash, 265 | chunks_included: u64, 266 | validator_proposals: Vec, 267 | } 268 | 269 | impl BlockHeader { 270 | /// The height of the [Block] 271 | pub fn height(&self) -> u64 { 272 | self.height 273 | } 274 | 275 | /// The hash of the [Block] 276 | pub fn hash(&self) -> CryptoHash { 277 | self.hash 278 | } 279 | 280 | /// The hash of the previous [Block] 281 | pub fn prev_hash(&self) -> CryptoHash { 282 | self.prev_hash 283 | } 284 | 285 | /// The [AccountId](crate::near_indexer_primitives::types::AccountId) of the author of the [Block] 286 | pub fn author(&self) -> AccountId { 287 | self.author.clone() 288 | } 289 | 290 | /// The timestamp of the [Block] in nanoseconds 291 | pub fn timestamp_nanosec(&self) -> u64 { 292 | self.timestamp_nanosec 293 | } 294 | 295 | /// The [CryptoHash] of the epoch the [Block] belongs to 296 | pub fn epoch_id(&self) -> CryptoHash { 297 | self.epoch_id 298 | } 299 | 300 | /// The [CryptoHash] of the next epoch 301 | pub fn next_epoch_id(&self) -> CryptoHash { 302 | self.next_epoch_id 303 | } 304 | 305 | /// The gas price of the [Block] 306 | pub fn gas_price(&self) -> u128 { 307 | self.gas_price 308 | } 309 | 310 | /// The total supply of the [Block] 311 | pub fn total_supply(&self) -> u128 { 312 | self.total_supply 313 | } 314 | 315 | /// The latest protocol version of the [Block] 316 | pub fn latest_protocol_version(&self) -> u32 { 317 | self.latest_protocol_version 318 | } 319 | 320 | /// The random value of the [Block] 321 | pub fn random_value(&self) -> CryptoHash { 322 | self.random_value 323 | } 324 | 325 | /// The number of chunks included in the [Block] 326 | pub fn chunks_included(&self) -> u64 { 327 | self.chunks_included 328 | } 329 | 330 | /// The validator proposals of the [Block] 331 | /// 332 | /// **Heads up!** This methods returns types defined in the `near-primitives` crate as is. 333 | /// It is a subject of change in the future (once we define the corresponding Lake Primitives types) 334 | pub fn validator_proposals(&self) -> Vec { 335 | self.validator_proposals.clone() 336 | } 337 | } 338 | 339 | impl From<&StreamerMessage> for BlockHeader { 340 | fn from(streamer_message: &StreamerMessage) -> Self { 341 | Self { 342 | height: streamer_message.block.header.height, 343 | hash: streamer_message.block.header.hash, 344 | prev_hash: streamer_message.block.header.prev_hash, 345 | author: streamer_message.block.author.clone(), 346 | timestamp_nanosec: streamer_message.block.header.timestamp_nanosec, 347 | epoch_id: streamer_message.block.header.epoch_id, 348 | next_epoch_id: streamer_message.block.header.next_epoch_id, 349 | gas_price: streamer_message.block.header.gas_price, 350 | total_supply: streamer_message.block.header.total_supply, 351 | latest_protocol_version: streamer_message.block.header.latest_protocol_version, 352 | random_value: streamer_message.block.header.random_value, 353 | chunks_included: streamer_message.block.header.chunks_included, 354 | validator_proposals: streamer_message.block.header.validator_proposals.clone(), 355 | } 356 | } 357 | } 358 | -------------------------------------------------------------------------------- /lake-primitives/src/types/delegate_actions.rs: -------------------------------------------------------------------------------- 1 | use near_crypto::PublicKey; 2 | use near_indexer_primitives::{ 3 | types::{AccountId, Balance, Gas}, 4 | views::{self, AccessKeyView}, 5 | }; 6 | 7 | /// Similarly to the [Action](super::actions::Action) enum, this enum represents the different types of actions that can be 8 | /// delegated to a contract. 9 | /// 10 | /// `DelegateAction` enum has a corresponding `Action` variant for every possible `Action` except the `DelegateAction` itself. 11 | /// Thus forbidding the nesting of `DelegateActions` and making the `Action` enum exhaustive. 12 | /// Another difference is that `DelegateAction` itself and it's variants do not hold metadata and don't implement `ActionMetaDataExt`. 13 | #[derive(Debug, Clone)] 14 | pub enum DelegateAction { 15 | DelegateCreateAccount(DelegateCreateAccount), 16 | DelegateDeployContract(DelegateDeployContract), 17 | DelegateFunctionCall(DelegateFunctionCall), 18 | DelegateTransfer(DelegateTransfer), 19 | DelegateStake(DelegateStake), 20 | DelegateAddKey(DelegateAddKey), 21 | DelegateDeleteKey(DelegateDeleteKey), 22 | DelegateDeleteAccount(DelegateDeleteAccount), 23 | } 24 | 25 | impl DelegateAction { 26 | /// Attempts to return the [DelegateFunctionCall](struct@DelegateFunctionCall) struct if the variant is [DelegateAction::DelegateFunctionCall]. Otherwise returns `None`. 27 | pub fn as_delegate_function_call(&self) -> Option<&DelegateFunctionCall> { 28 | match self { 29 | DelegateAction::DelegateFunctionCall(action) => Some(action), 30 | _ => None, 31 | } 32 | } 33 | 34 | /// Attempts to return the [DelegateCreateAccount] struct if the variant is [DelegateAction::DelegateCreateAccount]. Otherwise returns `None`. 35 | pub fn as_delegate_create_account(&self) -> Option<&DelegateCreateAccount> { 36 | match self { 37 | DelegateAction::DelegateCreateAccount(action) => Some(action), 38 | _ => None, 39 | } 40 | } 41 | 42 | /// Attempts to return the [DelegateDeployContract] struct if the variant is [DelegateAction::DelegateDeployContract]. Otherwise returns `None`. 43 | pub fn as_delegate_deploy_contract(&self) -> Option<&DelegateDeployContract> { 44 | match self { 45 | DelegateAction::DelegateDeployContract(action) => Some(action), 46 | _ => None, 47 | } 48 | } 49 | 50 | /// Attempts to return the [DelegateTransfer] struct if the variant is [DelegateAction::DelegateTransfer]. Otherwise returns `None`. 51 | pub fn as_delegate_transfer(&self) -> Option<&DelegateTransfer> { 52 | match self { 53 | DelegateAction::DelegateTransfer(action) => Some(action), 54 | _ => None, 55 | } 56 | } 57 | 58 | /// Attempts to return the [DelegateStake] struct if the variant is [DelegateAction::DelegateStake]. Otherwise returns `None`. 59 | pub fn as_delegate_stake(&self) -> Option<&DelegateStake> { 60 | match self { 61 | DelegateAction::DelegateStake(action) => Some(action), 62 | _ => None, 63 | } 64 | } 65 | 66 | /// Attempts to return the [DelegateAddKey] struct if the variant is [DelegateAction::DelegateAddKey]. Otherwise returns `None`. 67 | pub fn as_delegate_add_key(&self) -> Option<&DelegateAddKey> { 68 | match self { 69 | DelegateAction::DelegateAddKey(action) => Some(action), 70 | _ => None, 71 | } 72 | } 73 | 74 | /// Attempts to return the [DelegateDeleteKey] struct if the variant is [DelegateAction::DelegateDeleteKey]. Otherwise returns `None`. 75 | pub fn as_delegate_delete_key(&self) -> Option<&DelegateDeleteKey> { 76 | match self { 77 | DelegateAction::DelegateDeleteKey(action) => Some(action), 78 | _ => None, 79 | } 80 | } 81 | 82 | /// Attempts to return the [DelegateDeleteAccount] struct if the variant is [DelegateAction::DelegateDeleteAccount]. Otherwise returns `None`. 83 | pub fn as_delegate_delete_account(&self) -> Option<&DelegateDeleteAccount> { 84 | match self { 85 | DelegateAction::DelegateDeleteAccount(action) => Some(action), 86 | _ => None, 87 | } 88 | } 89 | } 90 | 91 | /// Similarly to [CreateAccount](super::actions::CreateAccount), this struct represents the `CreateAccount` action that is delegated. 92 | #[derive(Debug, Clone)] 93 | pub struct DelegateCreateAccount; 94 | 95 | /// Similarly to [DeployContract](super::actions::DeployContract), this struct represents the `DeployContract` action that is delegated. 96 | #[derive(Debug, Clone)] 97 | pub struct DelegateDeployContract { 98 | pub(crate) code: Vec, 99 | } 100 | 101 | impl DelegateDeployContract { 102 | /// Returns the bytes of the contract code that is being deployed. 103 | pub fn code(&self) -> &[u8] { 104 | &self.code 105 | } 106 | } 107 | 108 | /// Similarly to [FunctionCall](super::actions::FunctionCall), this struct represents the `FunctionCall` action that is delegated. 109 | #[derive(Debug, Clone)] 110 | pub struct DelegateFunctionCall { 111 | pub(crate) method_name: String, 112 | pub(crate) args: Vec, 113 | pub(crate) gas: Gas, 114 | pub(crate) deposit: Balance, 115 | } 116 | 117 | impl DelegateFunctionCall { 118 | /// Returns the name of the method that is being called. 119 | pub fn method_name(&self) -> &str { 120 | &self.method_name 121 | } 122 | 123 | /// Returns the bytes of the arguments that are being passed to the method. 124 | pub fn args(&self) -> &[u8] { 125 | &self.args 126 | } 127 | 128 | /// Returns the amount of gas that is being used for the method call. 129 | pub fn gas(&self) -> Gas { 130 | self.gas 131 | } 132 | 133 | /// Returns the amount of tokens that are being deposited to the contract. 134 | pub fn deposit(&self) -> Balance { 135 | self.deposit 136 | } 137 | } 138 | 139 | /// Similarly to [Transfer](super::actions::Transfer), this struct represents the `Transfer` action that is delegated. 140 | #[derive(Debug, Clone)] 141 | pub struct DelegateTransfer { 142 | pub(crate) deposit: Balance, 143 | } 144 | 145 | impl DelegateTransfer { 146 | /// Returns the amount of tokens that are being transferred. 147 | pub fn deposit(&self) -> Balance { 148 | self.deposit 149 | } 150 | } 151 | 152 | /// Similarly to [Stake](super::actions::Stake), this struct represents the `Stake` action that is delegated. 153 | #[derive(Debug, Clone)] 154 | pub struct DelegateStake { 155 | pub(crate) stake: Balance, 156 | pub(crate) public_key: PublicKey, 157 | } 158 | 159 | impl DelegateStake { 160 | /// Returns the amount of tokens that are being staked. 161 | pub fn stake(&self) -> Balance { 162 | self.stake 163 | } 164 | 165 | /// Returns the public key of the staking pool. 166 | pub fn public_key(&self) -> &PublicKey { 167 | &self.public_key 168 | } 169 | } 170 | 171 | /// Similarly to [AddKey](super::actions::AddKey), this struct represents the `AddKey` action that is delegated. 172 | #[derive(Debug, Clone)] 173 | pub struct DelegateAddKey { 174 | pub(crate) public_key: PublicKey, 175 | pub(crate) access_key: AccessKeyView, 176 | } 177 | 178 | impl DelegateAddKey { 179 | /// Returns the public key that is being added. 180 | pub fn public_key(&self) -> &PublicKey { 181 | &self.public_key 182 | } 183 | 184 | /// Returns the access key that is being added. 185 | pub fn access_key(&self) -> &AccessKeyView { 186 | &self.access_key 187 | } 188 | } 189 | 190 | /// Similarly to [DeleteKey](super::actions::DeleteKey), this struct represents the `DeleteKey` action that is delegated. 191 | #[derive(Debug, Clone)] 192 | pub struct DelegateDeleteKey { 193 | pub(crate) public_key: PublicKey, 194 | } 195 | 196 | impl DelegateDeleteKey { 197 | /// Returns the public key that is being deleted. 198 | pub fn public_key(&self) -> &PublicKey { 199 | &self.public_key 200 | } 201 | } 202 | 203 | /// Similarly to [DeleteAccount](super::actions::DeleteAccount), this struct represents the `DeleteAccount` action that is delegated. 204 | #[derive(Debug, Clone)] 205 | pub struct DelegateDeleteAccount { 206 | pub(crate) beneficiary_id: AccountId, 207 | } 208 | 209 | impl DelegateDeleteAccount { 210 | /// Returns the account ID of the beneficiary. 211 | pub fn beneficiary_id(&self) -> &AccountId { 212 | &self.beneficiary_id 213 | } 214 | } 215 | 216 | impl DelegateAction { 217 | // Tries to convert a `near_primitives::delegate_action::DelegateAction` into a [Vec]. 218 | pub fn try_from_delegate_action( 219 | delegate_action: &near_primitives::action::delegate::DelegateAction, 220 | ) -> Result, &'static str> { 221 | let mut actions = Vec::with_capacity(delegate_action.actions.len()); 222 | 223 | for nearcore_action in delegate_action.clone().actions { 224 | let action = match views::ActionView::from( 225 | >::into(nearcore_action), 228 | ) { 229 | views::ActionView::CreateAccount => { 230 | Self::DelegateCreateAccount(DelegateCreateAccount) 231 | } 232 | views::ActionView::DeployContract { code } => { 233 | Self::DelegateDeployContract(DelegateDeployContract { code }) 234 | } 235 | views::ActionView::FunctionCall { 236 | method_name, 237 | args, 238 | gas, 239 | deposit, 240 | } => Self::DelegateFunctionCall(DelegateFunctionCall { 241 | method_name, 242 | args: args.into(), 243 | gas, 244 | deposit, 245 | }), 246 | views::ActionView::Transfer { deposit } => { 247 | Self::DelegateTransfer(DelegateTransfer { deposit }) 248 | } 249 | views::ActionView::Stake { stake, public_key } => { 250 | Self::DelegateStake(DelegateStake { stake, public_key }) 251 | } 252 | views::ActionView::AddKey { 253 | public_key, 254 | access_key, 255 | } => Self::DelegateAddKey(DelegateAddKey { 256 | public_key, 257 | access_key, 258 | }), 259 | views::ActionView::DeleteKey { public_key } => { 260 | Self::DelegateDeleteKey(DelegateDeleteKey { public_key }) 261 | } 262 | views::ActionView::DeleteAccount { beneficiary_id } => { 263 | Self::DelegateDeleteAccount(DelegateDeleteAccount { beneficiary_id }) 264 | } 265 | _ => return Err("Cannot delegate DelegateAction"), 266 | }; 267 | actions.push(action); 268 | } 269 | Ok(actions) 270 | } 271 | } 272 | -------------------------------------------------------------------------------- /lake-primitives/src/types/events.rs: -------------------------------------------------------------------------------- 1 | use crate::AccountId; 2 | 3 | use super::receipts::Receipt; 4 | 5 | /// Hight-level representation of the Event according to the [Events Format](https://nomicon.io/Standards/EventsFormat.html). 6 | /// In addition to the event this structure holds the data about the related [Receipt]: `receipt_id`, `receiver_id` and `predecessor_id`. All these fields are accessible via the corresponding getters. 7 | #[derive(Clone, Debug)] 8 | pub struct Event { 9 | pub(crate) related_receipt_id: crate::CryptoHash, 10 | pub(crate) receiver_id: AccountId, 11 | pub(crate) predecessor_id: AccountId, 12 | pub(crate) raw_event: RawEvent, 13 | } 14 | 15 | impl Event { 16 | /// Returns the `event` value from the [RawEvent]. 17 | pub fn event(&self) -> &str { 18 | &self.raw_event.event 19 | } 20 | 21 | /// Returns the `standard` value from the [RawEvent]. 22 | pub fn standard(&self) -> &str { 23 | &self.raw_event.standard 24 | } 25 | 26 | /// Returns the `version` value from the [RawEvent]. 27 | pub fn version(&self) -> &str { 28 | &self.raw_event.version 29 | } 30 | 31 | /// Returns the `data` value from the [RawEvent] if present, otherwise returns `None`. 32 | pub fn data(&self) -> Option<&serde_json::Value> { 33 | self.raw_event.data.as_ref() 34 | } 35 | 36 | /// Returns the [CryptoHash](crate::CryptoHash) id of the related [Receipt]. 37 | /// 38 | /// **Please note** that events are emitted through the `ExecutionOutcome` logs. In turn, the `ExecutionOutcome` 39 | /// is a result of the execution of the [Receipt]. 40 | pub fn related_receipt_id(&self) -> crate::CryptoHash { 41 | self.related_receipt_id 42 | } 43 | 44 | /// Returns the [AccountId] of the receiver of the related [Receipt]. 45 | pub fn related_receipt_receiver_id(&self) -> &AccountId { 46 | &self.receiver_id 47 | } 48 | 49 | /// Returns the [AccountId] of the predecessor of the related [Receipt]. 50 | pub fn related_receipt_predecessor_id(&self) -> &AccountId { 51 | &self.predecessor_id 52 | } 53 | 54 | /// Returns true if the event is produced by the given contract id. 55 | pub fn is_emitted_by_contract(&self, contract_account_id: &AccountId) -> bool { 56 | &self.receiver_id == contract_account_id 57 | } 58 | } 59 | 60 | /// This structure is an honest representation of the Events Format standard described here 61 | /// 62 | #[derive(Clone, Debug, serde::Deserialize)] 63 | pub struct RawEvent { 64 | pub event: String, 65 | pub standard: String, 66 | pub version: String, 67 | pub data: Option, 68 | } 69 | 70 | impl RawEvent { 71 | /// Parses the log message (originated from `ExecutionOutcome` but not limited) and returns the RawEvent. 72 | pub fn from_log(log: &str) -> anyhow::Result { 73 | let prefix = "EVENT_JSON:"; 74 | if !log.starts_with(prefix) { 75 | anyhow::bail!("log message doesn't start from required prefix"); 76 | } 77 | 78 | Ok(serde_json::from_str::<'_, Self>( 79 | log[prefix.len()..].trim(), 80 | )?) 81 | } 82 | } 83 | 84 | pub trait EventsTrait { 85 | fn events(&self) -> Vec; 86 | } 87 | 88 | impl EventsTrait for Receipt { 89 | /// Reads the logs from the [Receipt] and extracts all the [Events](Event) from it into a Vec. 90 | fn events(&self) -> Vec { 91 | self.logs() 92 | .iter() 93 | .filter_map(|log| RawEvent::from_log(log).ok()) 94 | .map(|raw_event| Event { 95 | related_receipt_id: self.receipt_id(), 96 | receiver_id: self.receiver_id(), 97 | predecessor_id: self.predecessor_id(), 98 | raw_event, 99 | }) 100 | .collect() 101 | } 102 | } 103 | -------------------------------------------------------------------------------- /lake-primitives/src/types/impl_actions.rs: -------------------------------------------------------------------------------- 1 | use near_indexer_primitives::{views, IndexerTransactionWithOutcome}; 2 | 3 | use crate::actions::{Action, ActionMetadata, DelegateAction}; 4 | 5 | impl Action { 6 | // Tries to convert a [&ReceiptView](views::ReceiptView) into a vector of [Action]. 7 | pub fn try_vec_from_receipt_view( 8 | receipt_view: &views::ReceiptView, 9 | ) -> Result, &'static str> { 10 | if let views::ReceiptEnumView::Action { 11 | actions, 12 | signer_id, 13 | signer_public_key, 14 | .. 15 | } = &receipt_view.receipt 16 | { 17 | let metadata = ActionMetadata { 18 | receipt_id: receipt_view.receipt_id, 19 | predecessor_id: receipt_view.predecessor_id.clone(), 20 | receiver_id: receipt_view.receiver_id.clone(), 21 | signer_id: signer_id.clone(), 22 | signer_public_key: signer_public_key.clone(), 23 | }; 24 | 25 | let mut result = Vec::with_capacity(actions.len()); 26 | 27 | for action in actions { 28 | let action_kind = match action { 29 | views::ActionView::CreateAccount => { 30 | Self::CreateAccount(crate::actions::CreateAccount { 31 | metadata: metadata.clone(), 32 | }) 33 | } 34 | views::ActionView::DeployContract { code } => { 35 | Self::DeployContract(crate::actions::DeployContract { 36 | metadata: metadata.clone(), 37 | code: code.clone(), 38 | }) 39 | } 40 | views::ActionView::FunctionCall { 41 | method_name, 42 | args, 43 | gas, 44 | deposit, 45 | } => Self::FunctionCall(crate::actions::FunctionCall { 46 | metadata: metadata.clone(), 47 | method_name: method_name.clone(), 48 | args: args.clone().into(), 49 | gas: *gas, 50 | deposit: *deposit, 51 | }), 52 | views::ActionView::Transfer { deposit } => { 53 | Self::Transfer(crate::actions::Transfer { 54 | metadata: metadata.clone(), 55 | deposit: *deposit, 56 | }) 57 | } 58 | views::ActionView::Stake { stake, public_key } => { 59 | Self::Stake(crate::actions::Stake { 60 | metadata: metadata.clone(), 61 | stake: *stake, 62 | public_key: public_key.clone(), 63 | }) 64 | } 65 | views::ActionView::AddKey { 66 | public_key, 67 | access_key, 68 | } => Self::AddKey(crate::actions::AddKey { 69 | metadata: metadata.clone(), 70 | public_key: public_key.clone(), 71 | access_key: access_key.clone(), 72 | }), 73 | views::ActionView::DeleteKey { public_key } => { 74 | Self::DeleteKey(crate::actions::DeleteKey { 75 | metadata: metadata.clone(), 76 | public_key: public_key.clone(), 77 | }) 78 | } 79 | views::ActionView::DeleteAccount { beneficiary_id } => { 80 | Self::DeleteAccount(crate::actions::DeleteAccount { 81 | metadata: metadata.clone(), 82 | beneficiary_id: beneficiary_id.clone(), 83 | }) 84 | } 85 | views::ActionView::Delegate { 86 | delegate_action, 87 | signature, 88 | } => { 89 | let delegate_actions = 90 | DelegateAction::try_from_delegate_action(delegate_action)?; 91 | 92 | Self::Delegate(crate::actions::Delegate { 93 | metadata: metadata.clone(), 94 | delegate_action: delegate_actions, 95 | signature: signature.clone(), 96 | }) 97 | } 98 | views::ActionView::DeployGlobalContract { code } => { 99 | Self::DeployGlobalContract(crate::actions::DeployGlobalContract { 100 | metadata: metadata.clone(), 101 | code: code.to_vec(), 102 | }) 103 | } 104 | views::ActionView::DeployGlobalContractByAccountId { code } => { 105 | Self::DeployGlobalContractByAccountId( 106 | crate::actions::DeployGlobalContractByAccountId { 107 | metadata: metadata.clone(), 108 | code: code.to_vec(), 109 | }, 110 | ) 111 | } 112 | views::ActionView::UseGlobalContract { code_hash } => { 113 | Self::UseGlobalContract(crate::actions::UseGlobalContract { 114 | metadata: metadata.clone(), 115 | code_hash: *code_hash, 116 | }) 117 | } 118 | views::ActionView::UseGlobalContractByAccountId { account_id } => { 119 | Self::UseGlobalContractByAccountId( 120 | crate::actions::UseGlobalContractByAccountId { 121 | metadata: metadata.clone(), 122 | account_id: account_id.clone(), 123 | }, 124 | ) 125 | } 126 | }; 127 | result.push(action_kind); 128 | } 129 | Ok(result) 130 | } else { 131 | Err("Only `ReceiptEnumView::Action` can be converted into Vec") 132 | } 133 | } 134 | 135 | // Tries to convert a [IndexerTransactionWithOutcome] to a [Vec] 136 | pub fn try_vec_from_transaction_outcome( 137 | transaction_with_outcome: &IndexerTransactionWithOutcome, 138 | ) -> Result, &'static str> { 139 | let metadata = ActionMetadata { 140 | receipt_id: *transaction_with_outcome 141 | .outcome 142 | .execution_outcome 143 | .outcome 144 | .receipt_ids 145 | .first() 146 | .ok_or("Transaction conversion ReceiptId is missing")?, 147 | predecessor_id: transaction_with_outcome.transaction.signer_id.clone(), 148 | receiver_id: transaction_with_outcome.transaction.receiver_id.clone(), 149 | signer_id: transaction_with_outcome.transaction.signer_id.clone(), 150 | signer_public_key: transaction_with_outcome.transaction.public_key.clone(), 151 | }; 152 | 153 | let mut actions: Vec = vec![]; 154 | 155 | for nearcore_action in &transaction_with_outcome.transaction.actions { 156 | let action = match nearcore_action { 157 | views::ActionView::CreateAccount => { 158 | Self::CreateAccount(crate::actions::CreateAccount { 159 | metadata: metadata.clone(), 160 | }) 161 | } 162 | views::ActionView::DeployContract { code } => { 163 | Self::DeployContract(crate::actions::DeployContract { 164 | metadata: metadata.clone(), 165 | code: code.to_vec(), 166 | }) 167 | } 168 | views::ActionView::FunctionCall { 169 | method_name, 170 | args, 171 | gas, 172 | deposit, 173 | } => Self::FunctionCall(crate::actions::FunctionCall { 174 | metadata: metadata.clone(), 175 | method_name: method_name.to_string(), 176 | args: args.to_vec(), 177 | gas: *gas, 178 | deposit: *deposit, 179 | }), 180 | views::ActionView::Transfer { deposit } => { 181 | Self::Transfer(crate::actions::Transfer { 182 | metadata: metadata.clone(), 183 | deposit: *deposit, 184 | }) 185 | } 186 | views::ActionView::Stake { stake, public_key } => { 187 | Self::Stake(crate::actions::Stake { 188 | metadata: metadata.clone(), 189 | stake: *stake, 190 | public_key: public_key.clone(), 191 | }) 192 | } 193 | views::ActionView::AddKey { 194 | public_key, 195 | access_key, 196 | } => Self::AddKey(crate::actions::AddKey { 197 | metadata: metadata.clone(), 198 | public_key: public_key.clone(), 199 | access_key: access_key.clone(), 200 | }), 201 | views::ActionView::DeleteKey { public_key } => { 202 | Self::DeleteKey(crate::actions::DeleteKey { 203 | metadata: metadata.clone(), 204 | public_key: public_key.clone(), 205 | }) 206 | } 207 | views::ActionView::DeleteAccount { beneficiary_id } => { 208 | Self::DeleteAccount(crate::actions::DeleteAccount { 209 | metadata: metadata.clone(), 210 | beneficiary_id: beneficiary_id.clone(), 211 | }) 212 | } 213 | views::ActionView::Delegate { 214 | delegate_action, 215 | signature, 216 | } => Self::Delegate(crate::actions::Delegate { 217 | metadata: metadata.clone(), 218 | delegate_action: DelegateAction::try_from_delegate_action(delegate_action)?, 219 | signature: signature.clone(), 220 | }), 221 | views::ActionView::DeployGlobalContract { code } => { 222 | Self::DeployGlobalContract(crate::actions::DeployGlobalContract { 223 | metadata: metadata.clone(), 224 | code: code.to_vec(), 225 | }) 226 | } 227 | views::ActionView::DeployGlobalContractByAccountId { code } => { 228 | Self::DeployGlobalContractByAccountId( 229 | crate::actions::DeployGlobalContractByAccountId { 230 | metadata: metadata.clone(), 231 | code: code.to_vec(), 232 | }, 233 | ) 234 | } 235 | views::ActionView::UseGlobalContract { code_hash } => { 236 | Self::UseGlobalContract(crate::actions::UseGlobalContract { 237 | metadata: metadata.clone(), 238 | code_hash: *code_hash, 239 | }) 240 | } 241 | views::ActionView::UseGlobalContractByAccountId { account_id } => { 242 | Self::UseGlobalContractByAccountId( 243 | crate::actions::UseGlobalContractByAccountId { 244 | metadata: metadata.clone(), 245 | account_id: account_id.clone(), 246 | }, 247 | ) 248 | } 249 | }; 250 | 251 | actions.push(action); 252 | } 253 | 254 | Ok(actions) 255 | } 256 | } 257 | -------------------------------------------------------------------------------- /lake-primitives/src/types/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod actions; 2 | pub mod block; 3 | pub mod delegate_actions; 4 | pub mod events; 5 | mod impl_actions; 6 | pub mod receipts; 7 | pub mod state_changes; 8 | pub mod transactions; 9 | 10 | /// Since both [transactions::Transaction] hash and [receipts::Receipt] id are the [crate::CryptoHash] type, 11 | /// we use this type alias to make the code more readable. 12 | pub type ReceiptId = near_indexer_primitives::CryptoHash; 13 | -------------------------------------------------------------------------------- /lake-primitives/src/types/receipts.rs: -------------------------------------------------------------------------------- 1 | use crate::near_indexer_primitives::{ 2 | types::AccountId, views, CryptoHash, IndexerExecutionOutcomeWithReceipt, 3 | }; 4 | 5 | /// Simplified representation of the `Receipt`. 6 | /// 7 | /// This is a simplification from the [near_primitives::views::ReceiptView] and [near_primitives::views::ReceiptEnumView] into a more flat structure. 8 | /// The [ReceiptKind] is used to distinguish between the different types of receipts: Action and Data. 9 | /// 10 | /// #### Important notes on the Receipt 11 | /// 12 | /// The original low-level Receipt is represented by the enum that differentiates between the Action and Data receipts. In turn this enum is a field 13 | /// `receipt` in the parent `ReceiptView` struct. 14 | /// Parent structure has a set of fields that are common for both Action and Data receipts. 15 | /// During the simplification we have put the common fields into the [Receipt] struct itself and extracted the `actions` from Action Receipt into a separate struct. 16 | /// Since the high-level NEAR Lake Framework update we encourage developers to create more actions-and-events oriented indexers instead. 17 | #[derive(Debug, Clone)] 18 | pub struct Receipt { 19 | receipt_kind: ReceiptKind, 20 | receipt_id: CryptoHash, 21 | receiver_id: AccountId, 22 | predecessor_id: AccountId, 23 | status: ExecutionStatus, 24 | execution_outcome_id: Option, 25 | logs: Vec, 26 | } 27 | 28 | impl Receipt { 29 | /// Returns the [ReceiptKind](ReceiptKind) of the receipt. 30 | /// 31 | /// This is a simplification from the [near_primitives::views::ReceiptEnumView::Action] into a more flat structure 32 | /// that has a type. 33 | pub fn receipt_kind(&self) -> ReceiptKind { 34 | self.receipt_kind.clone() 35 | } 36 | 37 | /// Returns the [CryptoHash] id of the receipt. 38 | pub fn receipt_id(&self) -> CryptoHash { 39 | self.receipt_id 40 | } 41 | 42 | /// Returns the [AccountId] of the receiver of the receipt. 43 | pub fn receiver_id(&self) -> AccountId { 44 | self.receiver_id.clone() 45 | } 46 | 47 | /// Returns the [AccountId] of the predecessor of the receipt. 48 | pub fn predecessor_id(&self) -> AccountId { 49 | self.predecessor_id.clone() 50 | } 51 | 52 | /// Returns the [ExecutionStatus] of the corresponding ExecutionOutcome. 53 | /// 54 | /// Note that the status will be `Postponed` for the receipts that are included in the block but not executed yet. 55 | pub fn status(&self) -> ExecutionStatus { 56 | self.status.clone() 57 | } 58 | 59 | /// Returns the [CryptoHash] id of the corresponding ExecutionOutcome if it exists. 60 | /// 61 | /// Note that this is an optional field because the ExecutionOutcome might not be available 62 | /// if the [Receipt] is "postponed" (included in the block but not executed yet) 63 | pub fn execution_outcome_id(&self) -> Option { 64 | self.execution_outcome_id 65 | } 66 | 67 | /// Returns the logs of the corresponding ExecutionOutcome. 68 | /// Might be an empty Vec if the ExecutionOutcome is not available. 69 | pub fn logs(&self) -> Vec { 70 | self.logs.clone() 71 | } 72 | } 73 | 74 | impl From<&IndexerExecutionOutcomeWithReceipt> for Receipt { 75 | fn from(outcome_with_receipt: &IndexerExecutionOutcomeWithReceipt) -> Self { 76 | Self { 77 | receipt_kind: (&outcome_with_receipt.receipt.receipt).into(), 78 | receipt_id: outcome_with_receipt.receipt.receipt_id, 79 | receiver_id: outcome_with_receipt.receipt.receiver_id.clone(), 80 | predecessor_id: outcome_with_receipt.receipt.predecessor_id.clone(), 81 | execution_outcome_id: Some(outcome_with_receipt.execution_outcome.id), 82 | logs: outcome_with_receipt 83 | .execution_outcome 84 | .outcome 85 | .logs 86 | .iter() 87 | .map(Clone::clone) 88 | .collect(), 89 | status: (&outcome_with_receipt.execution_outcome.outcome.status).into(), 90 | } 91 | } 92 | } 93 | 94 | impl From<&views::ReceiptView> for Receipt { 95 | fn from(receipt: &views::ReceiptView) -> Self { 96 | Self { 97 | receipt_kind: (&receipt.receipt).into(), 98 | receipt_id: receipt.receipt_id, 99 | receiver_id: receipt.receiver_id.clone(), 100 | predecessor_id: receipt.predecessor_id.clone(), 101 | status: ExecutionStatus::Postponed, 102 | execution_outcome_id: None, 103 | logs: vec![], 104 | } 105 | } 106 | } 107 | 108 | /// Represents the Receipt kind: Action or Data. 109 | #[derive(Debug, Clone)] 110 | pub enum ReceiptKind { 111 | /// For the Action Receipt 112 | Action, 113 | /// For the Data Receipt 114 | Data, 115 | /// For the Global Contract Distribution Receipt 116 | GlobalContractDistribution, 117 | } 118 | 119 | impl From<&views::ReceiptEnumView> for ReceiptKind { 120 | fn from(receipt_enum: &views::ReceiptEnumView) -> Self { 121 | match receipt_enum { 122 | views::ReceiptEnumView::Action { .. } => Self::Action, 123 | views::ReceiptEnumView::Data { .. } => Self::Data, 124 | views::ReceiptEnumView::GlobalContractDistribution { .. } => { 125 | Self::GlobalContractDistribution 126 | } 127 | } 128 | } 129 | } 130 | 131 | /// Representation of the execution status for the [Receipt]. 132 | #[derive(Debug, Clone)] 133 | pub enum ExecutionStatus { 134 | /// Execution succeeded with a value, value is represented by [`Vec`] and literally can be anything. 135 | SuccessValue(Vec), 136 | /// Execution succeeded and a result of the execution is a new [Receipt] with the id represented by [CryptoHash] 137 | SuccessReceiptId(CryptoHash), 138 | // TODO: handle the Failure and all the nested errors it has 139 | /// Execution failed with an error represented by a [String] 140 | /// **WARNINNG!** Here must be our representation of the `TxExecutionError from `near-primitives` instead of the [String]. 141 | /// It requires some additional work on our version of the error, meanwhile we’ve left the [String] here, **this is subject to change 142 | /// in the nearest updates**. 143 | Failure(String), 144 | /// Execution hasn’t started yet, it is postponed (delayed) and will be later. 145 | /// The Receipt with such status is considered as postponed too (included, yet not executed) 146 | Postponed, 147 | } 148 | 149 | impl From<&views::ExecutionStatusView> for ExecutionStatus { 150 | fn from(execution_status_view: &views::ExecutionStatusView) -> Self { 151 | match execution_status_view { 152 | views::ExecutionStatusView::Unknown => Self::Postponed, 153 | views::ExecutionStatusView::SuccessValue(value) => Self::SuccessValue(value.clone()), 154 | views::ExecutionStatusView::SuccessReceiptId(receipt_id) => { 155 | Self::SuccessReceiptId(*receipt_id) 156 | } 157 | views::ExecutionStatusView::Failure(tx_execution_error) => { 158 | // TODO: handle the Failure and all the nested errors it has instead of stringifying 159 | Self::Failure(tx_execution_error.to_string()) 160 | } 161 | } 162 | } 163 | } 164 | -------------------------------------------------------------------------------- /lake-primitives/src/types/state_changes.rs: -------------------------------------------------------------------------------- 1 | use near_crypto::PublicKey; 2 | 3 | use crate::near_indexer_primitives::{ 4 | types::AccountId, 5 | views::{ 6 | AccessKeyView, AccountView, StateChangeCauseView, StateChangeValueView, 7 | StateChangeWithCauseView, 8 | }, 9 | CryptoHash, 10 | }; 11 | 12 | /// Represents the changes to the state of the account. 13 | #[derive(Debug, Clone)] 14 | pub struct StateChange { 15 | affected_account_id: AccountId, 16 | cause: StateChangeCause, 17 | value: StateChangeValue, 18 | } 19 | 20 | impl StateChange { 21 | /// Returns the [AccountId] of the account that was affected by the state change. 22 | pub fn affected_account_id(&self) -> AccountId { 23 | self.affected_account_id.clone() 24 | } 25 | 26 | /// Returns the [StateChangeCause] of the state change. 27 | pub fn cause(&self) -> StateChangeCause { 28 | self.cause.clone() 29 | } 30 | 31 | /// Returns the [StateChangeValue] of the state change. 32 | pub fn value(&self) -> StateChangeValue { 33 | self.value.clone() 34 | } 35 | } 36 | 37 | impl From<&StateChangeWithCauseView> for StateChange { 38 | fn from(state_change_with_cause_view: &StateChangeWithCauseView) -> Self { 39 | let cause: StateChangeCause = (&state_change_with_cause_view.cause).into(); 40 | let value: StateChangeValue = (&state_change_with_cause_view.value).into(); 41 | Self { 42 | affected_account_id: value.affected_account_id(), 43 | cause, 44 | value, 45 | } 46 | } 47 | } 48 | 49 | #[derive(Debug, Clone)] 50 | pub enum StateChangeCause { 51 | NotWritableToDisk, 52 | InitialState, 53 | TransactionProcessing { tx_hash: CryptoHash }, 54 | ActionReceiptProcessingStarted { receipt_hash: CryptoHash }, 55 | ActionReceiptGasReward { receipt_hash: CryptoHash }, 56 | ReceiptProcessing { receipt_hash: CryptoHash }, 57 | PostponedReceipt { receipt_hash: CryptoHash }, 58 | UpdatedDelayedReceipts, 59 | ValidatorAccountsUpdate, 60 | Migration, 61 | ReshardingV2, 62 | BadwidthSchedulerStateUpdate, 63 | } 64 | 65 | impl From<&StateChangeCauseView> for StateChangeCause { 66 | fn from(state_change_cause: &StateChangeCauseView) -> Self { 67 | match state_change_cause { 68 | StateChangeCauseView::NotWritableToDisk => Self::NotWritableToDisk, 69 | StateChangeCauseView::InitialState => Self::InitialState, 70 | StateChangeCauseView::TransactionProcessing { tx_hash } => { 71 | Self::TransactionProcessing { tx_hash: *tx_hash } 72 | } 73 | StateChangeCauseView::ActionReceiptProcessingStarted { receipt_hash } => { 74 | Self::ActionReceiptProcessingStarted { 75 | receipt_hash: *receipt_hash, 76 | } 77 | } 78 | StateChangeCauseView::ActionReceiptGasReward { receipt_hash } => { 79 | Self::ActionReceiptGasReward { 80 | receipt_hash: *receipt_hash, 81 | } 82 | } 83 | StateChangeCauseView::ReceiptProcessing { receipt_hash } => Self::ReceiptProcessing { 84 | receipt_hash: *receipt_hash, 85 | }, 86 | StateChangeCauseView::PostponedReceipt { receipt_hash } => Self::PostponedReceipt { 87 | receipt_hash: *receipt_hash, 88 | }, 89 | StateChangeCauseView::UpdatedDelayedReceipts => Self::UpdatedDelayedReceipts, 90 | StateChangeCauseView::ValidatorAccountsUpdate => Self::ValidatorAccountsUpdate, 91 | StateChangeCauseView::Migration => Self::Migration, 92 | StateChangeCauseView::ReshardingV2 => Self::ReshardingV2, 93 | StateChangeCauseView::BandwidthSchedulerStateUpdate => { 94 | Self::BadwidthSchedulerStateUpdate 95 | } 96 | } 97 | } 98 | } 99 | 100 | #[derive(Debug, Clone)] 101 | pub enum StateChangeValue { 102 | AccountUpdate { 103 | account_id: AccountId, 104 | account: AccountView, 105 | }, 106 | AccountDeletion { 107 | account_id: AccountId, 108 | }, 109 | AccessKeyUpdate { 110 | account_id: AccountId, 111 | public_key: PublicKey, 112 | access_key: AccessKeyView, 113 | }, 114 | AccessKeyDeletion { 115 | account_id: AccountId, 116 | public_key: PublicKey, 117 | }, 118 | DataUpdate { 119 | account_id: AccountId, 120 | key: Vec, 121 | value: Vec, 122 | }, 123 | DataDeletion { 124 | account_id: AccountId, 125 | key: Vec, 126 | }, 127 | ContractCodeUpdate { 128 | account_id: AccountId, 129 | code: Vec, 130 | }, 131 | ContractCodeDeletion { 132 | account_id: AccountId, 133 | }, 134 | } 135 | 136 | impl StateChangeValue { 137 | pub fn affected_account_id(&self) -> AccountId { 138 | match self { 139 | Self::AccountUpdate { account_id, .. } => account_id.clone(), 140 | Self::AccountDeletion { account_id } => account_id.clone(), 141 | Self::AccessKeyUpdate { account_id, .. } => account_id.clone(), 142 | Self::AccessKeyDeletion { account_id, .. } => account_id.clone(), 143 | Self::DataUpdate { account_id, .. } => account_id.clone(), 144 | Self::DataDeletion { account_id, .. } => account_id.clone(), 145 | Self::ContractCodeUpdate { account_id, .. } => account_id.clone(), 146 | Self::ContractCodeDeletion { account_id } => account_id.clone(), 147 | } 148 | } 149 | } 150 | 151 | impl From<&StateChangeValueView> for StateChangeValue { 152 | fn from(state_change_value: &StateChangeValueView) -> Self { 153 | match state_change_value { 154 | StateChangeValueView::AccountUpdate { 155 | account_id, 156 | account, 157 | } => Self::AccountUpdate { 158 | account_id: account_id.clone(), 159 | account: account.clone(), 160 | }, 161 | StateChangeValueView::AccountDeletion { account_id } => Self::AccountDeletion { 162 | account_id: account_id.clone(), 163 | }, 164 | StateChangeValueView::AccessKeyUpdate { 165 | account_id, 166 | public_key, 167 | access_key, 168 | } => Self::AccessKeyUpdate { 169 | account_id: account_id.clone(), 170 | public_key: public_key.clone(), 171 | access_key: access_key.clone(), 172 | }, 173 | StateChangeValueView::AccessKeyDeletion { 174 | account_id, 175 | public_key, 176 | } => Self::AccessKeyDeletion { 177 | account_id: account_id.clone(), 178 | public_key: public_key.clone(), 179 | }, 180 | StateChangeValueView::DataUpdate { 181 | account_id, 182 | key, 183 | value, 184 | } => { 185 | let key: &[u8] = key.as_ref(); 186 | let value: &[u8] = value.as_ref(); 187 | Self::DataUpdate { 188 | account_id: account_id.clone(), 189 | key: key.to_vec(), 190 | value: value.to_vec(), 191 | } 192 | } 193 | StateChangeValueView::DataDeletion { account_id, key } => { 194 | let key: &[u8] = key.as_ref(); 195 | Self::DataDeletion { 196 | account_id: account_id.clone(), 197 | key: key.to_vec(), 198 | } 199 | } 200 | StateChangeValueView::ContractCodeUpdate { account_id, code } => { 201 | Self::ContractCodeUpdate { 202 | account_id: account_id.clone(), 203 | code: code.clone(), 204 | } 205 | } 206 | StateChangeValueView::ContractCodeDeletion { account_id } => { 207 | Self::ContractCodeDeletion { 208 | account_id: account_id.clone(), 209 | } 210 | } 211 | } 212 | } 213 | } 214 | -------------------------------------------------------------------------------- /lake-primitives/src/types/transactions.rs: -------------------------------------------------------------------------------- 1 | use near_crypto::{PublicKey, Signature}; 2 | 3 | use super::receipts::ExecutionStatus; 4 | use crate::near_indexer_primitives::{types::AccountId, CryptoHash, IndexerTransactionWithOutcome}; 5 | 6 | /// High-level representation of the `Transaction`. 7 | /// 8 | /// The structure basically combines the `Transaction` itself and the corresponding `ExecutionOutcome`. 9 | /// **Reminder**: the result of the transaction execution is always a [Receipt](super::receipts::Receipt) 10 | /// that looks pretty much like the `Transaction` itself. 11 | /// 12 | /// #### Important notes on the Transaction 13 | /// 14 | /// Transaction's `actions` are represented by the [Action](super::actions::Action) enum. Actions are 15 | /// included for the informational purpose to help developers to know what exactly should happen after the 16 | /// `Transaction` is executed. 17 | #[derive(Debug, Clone)] 18 | pub struct Transaction { 19 | transaction_hash: CryptoHash, 20 | signer_id: AccountId, 21 | signer_public_key: PublicKey, 22 | signature: Signature, 23 | receiver_id: AccountId, 24 | status: ExecutionStatus, 25 | execution_outcome_id: CryptoHash, 26 | actions: Vec, 27 | } 28 | 29 | impl Transaction { 30 | /// Returns the [CryptoHash] hash of the transaction. 31 | pub fn transaction_hash(&self) -> CryptoHash { 32 | self.transaction_hash 33 | } 34 | 35 | /// Returns the [AccountId] of the signer of the transaction. 36 | pub fn signer_id(&self) -> &AccountId { 37 | &self.signer_id 38 | } 39 | 40 | /// Returns the [PublicKey] of the signer of the transaction. 41 | pub fn signer_public_key(&self) -> &PublicKey { 42 | &self.signer_public_key 43 | } 44 | 45 | /// Returns the [Signature] of the transaction. 46 | pub fn signature(&self) -> &Signature { 47 | &self.signature 48 | } 49 | 50 | /// Returns the [AccountId] of the receiver of the transaction. 51 | pub fn receiver_id(&self) -> &AccountId { 52 | &self.receiver_id 53 | } 54 | 55 | /// Returns the [ExecutionStatus] of the corresponding ExecutionOutcome. 56 | pub fn status(&self) -> &ExecutionStatus { 57 | &self.status 58 | } 59 | 60 | /// Returns the [CryptoHash] id of the corresponding ExecutionOutcome. 61 | pub fn execution_outcome_id(&self) -> CryptoHash { 62 | self.execution_outcome_id 63 | } 64 | 65 | /// Returns the [Action](super::actions::Action) of the transaction. 66 | pub fn actions_included(&self) -> impl Iterator { 67 | self.actions.iter() 68 | } 69 | } 70 | 71 | impl TryFrom<&IndexerTransactionWithOutcome> for Transaction { 72 | type Error = &'static str; 73 | 74 | fn try_from(tx_with_outcome: &IndexerTransactionWithOutcome) -> Result { 75 | Ok(Self { 76 | transaction_hash: tx_with_outcome.transaction.hash, 77 | signer_id: tx_with_outcome.transaction.signer_id.clone(), 78 | signer_public_key: tx_with_outcome.transaction.public_key.clone(), 79 | signature: tx_with_outcome.transaction.signature.clone(), 80 | receiver_id: tx_with_outcome.transaction.receiver_id.clone(), 81 | execution_outcome_id: tx_with_outcome.outcome.execution_outcome.id, 82 | status: (&tx_with_outcome.outcome.execution_outcome.outcome.status).into(), 83 | actions: super::actions::Action::try_vec_from_transaction_outcome(tx_with_outcome)?, 84 | }) 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /release-plz.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | # Use `near-lake-framework-rs` crate CHANGELOG as top-level one 3 | changelog_update = false 4 | 5 | [[package]] 6 | name = "lake-framework" 7 | changelog_update = true 8 | changelog_path = "./CHANGELOG.md" 9 | -------------------------------------------------------------------------------- /rust-toolchain: -------------------------------------------------------------------------------- 1 | 1.85.0 2 | --------------------------------------------------------------------------------