├── SECURITY.md ├── .gitignore ├── macros ├── Cargo.toml ├── NOTICE ├── src │ ├── lib.rs │ ├── index_list.rs │ └── newtype.rs └── README.md ├── NOTICE ├── MIGRATING.md ├── src ├── namespace.rs ├── indexes │ ├── mod.rs │ ├── unique.rs │ ├── multi.rs │ └── prefix.rs ├── endian.rs ├── helpers.rs ├── lib.rs ├── path.rs ├── int_key.rs ├── iter_helpers.rs ├── bound.rs ├── item.rs ├── de.rs ├── snapshot │ ├── item.rs │ └── mod.rs ├── keys.rs └── prefix.rs ├── Cargo.toml ├── tests ├── index_list.rs └── newtype.rs ├── .github └── workflows │ └── checks.yml ├── benches └── main.rs ├── Taskfile.yml └── LICENSE /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | This repository is part of the **CosmWasm** stack. 4 | Please see the [Advisories] for its security policy. 5 | 6 | [Advisories]: https://github.com/CosmWasm/advisories/blob/main/SECURITY.md 7 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # macOS 2 | .DS_Store 3 | 4 | # Text file backups 5 | **/*.rs.bk 6 | 7 | # Build results 8 | target/ 9 | 10 | # IDEs 11 | .vscode/ 12 | .idea/ 13 | *.iml 14 | 15 | # Auto-gen 16 | .cargo-ok 17 | 18 | # Build artifacts 19 | *.wasm 20 | hash.txt 21 | contracts.txt 22 | artifacts/ 23 | 24 | # code coverage 25 | tarpaulin-report.* 26 | 27 | packages/*/schema 28 | contracts/*/schema 29 | -------------------------------------------------------------------------------- /macros/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "cw-storage-macro" 3 | version = "3.0.1" 4 | authors = ["yoisha <48324733+y-pakorn@users.noreply.github.com>"] 5 | edition = "2018" 6 | description = "Macro helpers for storage-plus" 7 | license = "Apache-2.0" 8 | repository = "https://github.com/CosmWasm/cw-storage-plus" 9 | homepage = "https://cosmwasm.com" 10 | documentation = "https://docs.cosmwasm.com" 11 | 12 | [lib] 13 | proc-macro = true 14 | 15 | [dependencies] 16 | proc-macro2 = "1.0.86" 17 | quote = "1.0.37" 18 | syn = { version = "2", features = ["full"] } 19 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2021-2025 Confio GmbH 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | -------------------------------------------------------------------------------- /macros/NOTICE: -------------------------------------------------------------------------------- 1 | CW-Storage-Macro: Macro helpers for storage-plus 2 | Copyright (C) 2022 Confio GmbH 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | -------------------------------------------------------------------------------- /macros/src/lib.rs: -------------------------------------------------------------------------------- 1 | /*! 2 | Procedural macros helper for interacting with cw-storage-plus and cosmwasm-storage. 3 | 4 | For more information on this package, please check out the 5 | [README](https://github.com/CosmWasm/cw-storage-plus/blob/main/macros/README.md). 6 | */ 7 | 8 | mod index_list; 9 | mod newtype; 10 | 11 | use proc_macro::TokenStream; 12 | 13 | // Re-export the procedural macro functions 14 | 15 | #[proc_macro_attribute] 16 | pub fn index_list(attr: TokenStream, item: TokenStream) -> TokenStream { 17 | index_list::index_list(attr, item) 18 | } 19 | 20 | #[proc_macro_derive(NewTypeKey)] 21 | pub fn cw_storage_newtype_key_derive(input: TokenStream) -> TokenStream { 22 | newtype::cw_storage_newtype_key_derive(input) 23 | } 24 | -------------------------------------------------------------------------------- /macros/README.md: -------------------------------------------------------------------------------- 1 | # CW-Storage-Plus: Macro helpers for storage-plus 2 | 3 | Procedural macros helper for interacting with cw-storage-plus and cosmwasm-storage. 4 | 5 | ## Current features 6 | 7 | Auto generate an `IndexList` impl for your indexes struct. 8 | 9 | ```rust 10 | #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] 11 | struct TestStruct { 12 | id: u64, 13 | id2: u32, 14 | addr: Addr, 15 | } 16 | 17 | #[index_list(TestStruct)] // <- Add this line right here. 18 | struct TestIndexes<'a> { 19 | id: MultiIndex<'a, u32, TestStruct, u64>, 20 | addr: UniqueIndex<'a, Addr, TestStruct, String>, 21 | } 22 | ``` 23 | 24 | Auto generate the required impls to use a newtype as a key 25 | 26 | ```rust 27 | #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] 28 | #[derive(NewTypeKey)] // <- Add this line right here. 29 | struct TestKey(u64); 30 | 31 | // You can now use `TestKey` as a key in `Map` 32 | ``` -------------------------------------------------------------------------------- /MIGRATING.md: -------------------------------------------------------------------------------- 1 | # Migrating 2 | 3 | This guide lists API changes between `cw-storage-plus` major releases. 4 | 5 | ## v2.0.x -> v3.0.0 6 | 7 | No breaking changes in API. Upgrade to `cosmwasm-std` v3.0.0. 8 | 9 | ## v1.0.x -> v2.0.0 10 | 11 | ### Breaking Issues / PRs 12 | 13 | - The `UniqueIndex` `PK` trait parameter is now mandatory [\#37](https://github.com/CosmWasm/cw-storage-plus/issues/37). 14 | 15 | The migration is straightforward: just add the `PK` type parameter to your `UniqueIndex` implementation. If you don't 16 | plan to deserialize it, you can use `()` as your `UniqueIndex` `PK` type, which was the default before. 17 | 18 | - The `KeyDeserialize` trait now includes a `KEY_ELEMS` const [\#34](https://github.com/CosmWasm/cw-storage-plus/pull/34), 19 | that needs to be defined when implementing this trait. This const defines the number of elements in the key, and its 20 | value would typically be `1`. 21 | 22 | This only affect users that implement `KeyDeserialize` for their own types. If you only use the provided types, you 23 | don't need to worry about this. 24 | -------------------------------------------------------------------------------- /macros/src/index_list.rs: -------------------------------------------------------------------------------- 1 | use proc_macro::TokenStream; 2 | use syn::{ 3 | Ident, 4 | __private::{quote::quote, Span}, 5 | parse_macro_input, ItemStruct, 6 | }; 7 | 8 | pub fn index_list(attr: TokenStream, item: TokenStream) -> TokenStream { 9 | let input = parse_macro_input!(item as ItemStruct); 10 | 11 | let ty = Ident::new(&attr.to_string(), Span::call_site()); 12 | let struct_ty = input.ident.clone(); 13 | 14 | let names = input 15 | .fields 16 | .clone() 17 | .into_iter() 18 | .map(|e| { 19 | let name = e.ident.unwrap(); 20 | quote! { &self.#name } 21 | }) 22 | .collect::>(); 23 | 24 | let expanded = quote! { 25 | #input 26 | 27 | impl cw_storage_plus::IndexList<#ty> for #struct_ty<'_> { 28 | fn get_indexes(&'_ self) -> Box> + '_> { 29 | let v: Vec<&dyn cw_storage_plus::Index<#ty>> = vec![#(#names),*]; 30 | Box::new(v.into_iter()) 31 | } 32 | } 33 | }; 34 | 35 | TokenStream::from(expanded) 36 | } 37 | -------------------------------------------------------------------------------- /src/namespace.rs: -------------------------------------------------------------------------------- 1 | use std::borrow::Cow; 2 | 3 | /// The namespace of a storage container. Meant to be constructed from "stringy" types. 4 | /// 5 | /// This type is generally not meant to be constructed directly. It's exported for 6 | /// documentation purposes. Most of the time, you should just pass a [`String`] or 7 | /// `&'static str` to an [`Item`](crate::Item)/collection constructor. 8 | #[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] 9 | pub struct Namespace(Cow<'static, [u8]>); 10 | 11 | impl Namespace { 12 | pub const fn from_static_str(s: &'static str) -> Namespace { 13 | Namespace(Cow::Borrowed(s.as_bytes())) 14 | } 15 | 16 | pub fn as_slice(&self) -> &[u8] { 17 | self.0.as_ref() 18 | } 19 | } 20 | 21 | impl From<&'static str> for Namespace { 22 | fn from(s: &'static str) -> Self { 23 | Namespace(Cow::Borrowed(s.as_bytes())) 24 | } 25 | } 26 | 27 | impl From for Namespace { 28 | fn from(s: String) -> Self { 29 | Namespace(Cow::Owned(s.into_bytes())) 30 | } 31 | } 32 | 33 | impl From> for Namespace { 34 | fn from(s: Cow<'static, [u8]>) -> Self { 35 | Namespace(s) 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /src/indexes/mod.rs: -------------------------------------------------------------------------------- 1 | // this module requires iterator to be useful at all 2 | #![cfg(feature = "iterator")] 3 | mod multi; 4 | mod prefix; 5 | mod unique; 6 | 7 | pub use multi::MultiIndex; 8 | pub use prefix::IndexPrefix; 9 | pub use unique::UniqueIndex; 10 | 11 | use serde::de::DeserializeOwned; 12 | use serde::Serialize; 13 | 14 | use cosmwasm_std::{StdResult, Storage}; 15 | 16 | // Note: we cannot store traits with generic functions inside `Box`, 17 | // so I pull S: Storage to a top-level 18 | pub trait Index 19 | where 20 | T: Serialize + DeserializeOwned + Clone, 21 | { 22 | fn save(&self, store: &mut dyn Storage, pk: &[u8], data: &T) -> StdResult<()>; 23 | fn remove(&self, store: &mut dyn Storage, pk: &[u8], old_data: &T) -> StdResult<()>; 24 | } 25 | 26 | #[cfg(test)] 27 | pub mod test { 28 | 29 | pub fn index_string(data: &str) -> Vec { 30 | data.as_bytes().to_vec() 31 | } 32 | 33 | pub fn index_tuple(name: &str, age: u32) -> (Vec, u32) { 34 | (index_string(name), age) 35 | } 36 | 37 | pub fn index_string_tuple(data1: &str, data2: &str) -> (Vec, Vec) { 38 | (index_string(data1), index_string(data2)) 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = [".", "macros"] 3 | 4 | resolver = "2" 5 | 6 | [package] 7 | name = "cw-storage-plus" 8 | version = "3.0.1" 9 | authors = ["Ethan Frey "] 10 | edition = "2021" 11 | description = "Enhanced storage engines" 12 | license = "Apache-2.0" 13 | repository = "https://github.com/CosmWasm/cw-storage-plus" 14 | homepage = "https://cosmwasm.com" 15 | 16 | [package.metadata.docs.rs] 17 | all-features = true # include macro feature when building docs 18 | 19 | [features] 20 | default = ["iterator"] 21 | iterator = ["cosmwasm-std/iterator"] 22 | macro = ["dep:cw-storage-macro"] 23 | 24 | [lib] 25 | # See https://bheisler.github.io/criterion.rs/book/faq.html#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options 26 | bench = false 27 | 28 | [dependencies] 29 | cosmwasm-std = { version = "3.0.1", default-features = false, features = ["std"] } 30 | schemars = "0.8.22" 31 | serde = { version = "1.0", default-features = false, features = ["derive"] } 32 | cw-storage-macro = { path = "macros", version = "3.0.1", optional = true } 33 | 34 | [dev-dependencies] 35 | # See https://bheisler.github.io/criterion.rs/book/user_guide/command_line_options.html for using baselines 36 | criterion = { version = "0.7.0", features = ["html_reports"] } 37 | rand = { version = "0.9.1", default-features = false } 38 | rand_xoshiro = { version = "0.7.0", default-features = false } 39 | derive_more = { version = "2.0.1", features = ["full"] } 40 | 41 | [[bench]] 42 | name = "main" 43 | harness = false 44 | -------------------------------------------------------------------------------- /src/endian.rs: -------------------------------------------------------------------------------- 1 | //! This code is inspired by (and partially borrowed from) 2 | //! 3 | //! but there was a lot in that crate I did not want, the name did not inspire 4 | //! confidence, and I wanted a different return value, so I just took the code 5 | //! to modify slightly. 6 | 7 | // TODO: figure out these macros and let us replace (self: Self) with self 8 | #![allow(clippy::needless_arbitrary_self_type)] 9 | 10 | use std::mem; 11 | 12 | pub trait Endian: Sized + Copy { 13 | type Buf: AsRef<[u8]> + AsMut<[u8]> + Into> + Default; 14 | 15 | fn to_le_bytes(self) -> Self::Buf; 16 | fn to_be_bytes(self) -> Self::Buf; 17 | 18 | fn from_le_bytes(bytes: Self::Buf) -> Self; 19 | fn from_be_bytes(bytes: Self::Buf) -> Self; 20 | } 21 | 22 | macro_rules! delegate { 23 | ($ty:ty, [$($method:ident),* $(,)?], ($param:ident : $param_ty:ty) -> $ret:ty) => { 24 | delegate!(@inner $ty, [$($method),*], $param, $param_ty, $ret); 25 | }; 26 | (@inner $ty:ty, [$($method:ident),*], $param:ident, $param_ty:ty, $ret:ty) => { 27 | $( 28 | #[inline] 29 | fn $method ($param: $param_ty) -> $ret { <$ty>::$method($param) } 30 | )* 31 | }; 32 | } 33 | 34 | macro_rules! impl_primitives { 35 | ($($ty:ty),* $(,)?) => { 36 | $( 37 | impl Endian for $ty { 38 | type Buf = [u8; mem::size_of::<$ty>()]; 39 | 40 | delegate!($ty, [ 41 | to_le_bytes, 42 | to_be_bytes, 43 | ], (self: Self) -> Self::Buf); 44 | 45 | delegate!($ty, [ 46 | from_le_bytes, 47 | from_be_bytes, 48 | ], (bytes: Self::Buf) -> Self); 49 | } 50 | )* 51 | }; 52 | } 53 | 54 | #[rustfmt::skip] 55 | impl_primitives![ 56 | i8, i16, i32, i64, i128, 57 | u8, u16, u32, u64, u128, 58 | ]; 59 | -------------------------------------------------------------------------------- /tests/index_list.rs: -------------------------------------------------------------------------------- 1 | #[cfg(all(test, feature = "iterator", feature = "macro"))] 2 | mod test { 3 | use cosmwasm_std::{testing::MockStorage, Addr}; 4 | use cw_storage_macro::index_list; 5 | use cw_storage_plus::{IndexedMap, MultiIndex, UniqueIndex}; 6 | use serde::{Deserialize, Serialize}; 7 | 8 | #[test] 9 | fn index_list_compiles() { 10 | #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] 11 | struct TestStruct { 12 | id: u64, 13 | id2: u32, 14 | addr: Addr, 15 | } 16 | 17 | #[index_list(TestStruct)] 18 | struct TestIndexes<'a> { 19 | id: MultiIndex<'a, u32, TestStruct, u64>, 20 | addr: UniqueIndex<'a, Addr, TestStruct, ()>, 21 | } 22 | 23 | let _: IndexedMap = IndexedMap::new( 24 | "t", 25 | TestIndexes { 26 | id: MultiIndex::new(|_pk, t| t.id2, "t", "t_id2"), 27 | addr: UniqueIndex::new(|t| t.addr.clone(), "t_addr"), 28 | }, 29 | ); 30 | } 31 | 32 | #[test] 33 | fn index_list_works() { 34 | #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] 35 | struct TestStruct { 36 | id: u64, 37 | id2: u32, 38 | addr: Addr, 39 | } 40 | 41 | #[index_list(TestStruct)] 42 | struct TestIndexes<'a> { 43 | id: MultiIndex<'a, u32, TestStruct, u64>, 44 | addr: UniqueIndex<'a, Addr, TestStruct, ()>, 45 | } 46 | 47 | let mut storage = MockStorage::new(); 48 | let idm: IndexedMap = IndexedMap::new( 49 | "t", 50 | TestIndexes { 51 | id: MultiIndex::new(|_pk, t| t.id2, "t", "t_2"), 52 | addr: UniqueIndex::new(|t| t.addr.clone(), "t_addr"), 53 | }, 54 | ); 55 | 56 | idm.save( 57 | &mut storage, 58 | 0, 59 | &TestStruct { 60 | id: 0, 61 | id2: 100, 62 | addr: Addr::unchecked("1"), 63 | }, 64 | ) 65 | .unwrap(); 66 | 67 | assert_eq!( 68 | idm.load(&storage, 0).unwrap(), 69 | TestStruct { 70 | id: 0, 71 | id2: 100, 72 | addr: Addr::unchecked("1"), 73 | } 74 | ); 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /src/helpers.rs: -------------------------------------------------------------------------------- 1 | //! This module is an implementation of a namespacing scheme described 2 | //! in 3 | //! 4 | //! Everything in this file is only responsible for building such keys 5 | //! and is in no way specific to any kind of storage. 6 | 7 | use std::any::type_name; 8 | 9 | use cosmwasm_std::{ 10 | to_json_vec, Addr, Binary, ContractResult, CustomQuery, QuerierWrapper, QueryRequest, StdError, 11 | StdResult, SystemResult, WasmQuery, 12 | }; 13 | 14 | /// Use this in Map/SnapshotMap/etc when you want to provide a QueryRaw helper. 15 | /// This is similar to `querier.query(WasmQuery::Raw {})`, except it does NOT parse the 16 | /// result, but return a possibly empty Binary to be handled by the calling code. 17 | /// That is essential to handle b"" as None. 18 | pub(crate) fn query_raw( 19 | querier: &QuerierWrapper, 20 | contract_addr: Addr, 21 | key: Binary, 22 | ) -> StdResult { 23 | let request: QueryRequest = WasmQuery::Raw { 24 | contract_addr: contract_addr.into(), 25 | key, 26 | } 27 | .into(); 28 | 29 | let raw = to_json_vec(&request).map_err(|serialize_err| { 30 | StdError::msg(format!("Serializing QueryRequest: {serialize_err}")) 31 | })?; 32 | match querier.raw_query(&raw) { 33 | SystemResult::Err(system_err) => { 34 | Err(StdError::msg(format!("Querier system error: {system_err}"))) 35 | } 36 | SystemResult::Ok(ContractResult::Err(contract_err)) => Err(StdError::msg(format!( 37 | "Querier contract error: {contract_err}" 38 | ))), 39 | SystemResult::Ok(ContractResult::Ok(value)) => Ok(value), 40 | } 41 | } 42 | 43 | /// Returns a debug identifier to explain what was not found 44 | pub(crate) fn not_found_object_info(key: &[u8]) -> String { 45 | let type_name = type_name::(); 46 | format!("type: {type_name}; key: {key:02X?}") 47 | } 48 | 49 | #[cfg(test)] 50 | mod test { 51 | use super::*; 52 | use cosmwasm_std::Uint128; 53 | use serde::{Deserialize, Serialize}; 54 | 55 | #[derive(Serialize, Deserialize, PartialEq, Debug)] 56 | struct Person { 57 | pub name: String, 58 | pub age: i32, 59 | } 60 | 61 | #[test] 62 | fn not_found_object_info_works() { 63 | assert_eq!( 64 | not_found_object_info::(&[0xaa, 0xBB]), 65 | "type: cw_storage_plus::helpers::test::Person; key: [AA, BB]" 66 | ); 67 | assert_eq!( 68 | not_found_object_info::(&[]), 69 | "type: cw_storage_plus::helpers::test::Person; key: []" 70 | ); 71 | assert_eq!( 72 | not_found_object_info::(b"foo"), 73 | "type: cosmwasm_std::math::uint128::Uint128; key: [66, 6F, 6F]" 74 | ); 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /macros/src/newtype.rs: -------------------------------------------------------------------------------- 1 | use proc_macro::TokenStream; 2 | use quote::quote; 3 | use syn::{parse_macro_input, spanned::Spanned, ItemStruct}; 4 | 5 | pub fn cw_storage_newtype_key_derive(input: TokenStream) -> TokenStream { 6 | let input = parse_macro_input!(input as ItemStruct); 7 | 8 | impl_newtype(&input) 9 | .unwrap_or_else(syn::Error::into_compile_error) 10 | .into() 11 | } 12 | 13 | fn impl_newtype(input: &ItemStruct) -> syn::Result { 14 | // Extract the struct name 15 | let name = &input.ident; 16 | 17 | // Extract the inner type 18 | let inner_type = if let syn::Fields::Unnamed(fields) = &input.fields { 19 | if fields.unnamed.len() == 1 { 20 | &fields.unnamed[0].ty 21 | } else { 22 | return Err(syn::Error::new( 23 | input.span(), 24 | format!( 25 | "Too many fields for NewTypeKey. Expected 1, got {}", 26 | fields.unnamed.len() 27 | ), 28 | )); 29 | } 30 | } else { 31 | return Err(syn::Error::new( 32 | input.span(), 33 | "NewTypeKey can only be derived for newtypes (tuple structs with one field)", 34 | )); 35 | }; 36 | 37 | // Implement PrimaryKey 38 | let impl_primary_key = quote! { 39 | impl<'a> cw_storage_plus::PrimaryKey<'a> for #name 40 | where 41 | #inner_type: cw_storage_plus::PrimaryKey<'a>, 42 | { 43 | type Prefix = (); 44 | type SubPrefix = (); 45 | type Suffix = Self; 46 | type SuperSuffix = Self; 47 | 48 | fn key(&self) -> Vec { 49 | self.0.key() 50 | } 51 | } 52 | }; 53 | 54 | // Implement Prefixer 55 | let impl_prefixer = quote! { 56 | impl<'a> cw_storage_plus::Prefixer<'a> for #name 57 | where 58 | #inner_type: cw_storage_plus::Prefixer<'a>, 59 | { 60 | fn prefix(&self) -> Vec { 61 | self.0.prefix() 62 | } 63 | } 64 | }; 65 | 66 | // Implement KeyDeserialize 67 | let impl_key_deserialize = quote! { 68 | impl cw_storage_plus::KeyDeserialize for #name 69 | where 70 | #inner_type: cw_storage_plus::KeyDeserialize, 71 | { 72 | type Output = #name; 73 | const KEY_ELEMS: u16 = 1; 74 | 75 | #[inline(always)] 76 | fn from_vec(value: Vec) -> cosmwasm_std::StdResult { 77 | <#inner_type as cw_storage_plus::KeyDeserialize>::from_vec(value).map(#name) 78 | } 79 | } 80 | }; 81 | 82 | // Combine all implementations 83 | let expanded = quote! { 84 | #impl_primary_key 85 | #impl_prefixer 86 | #impl_key_deserialize 87 | }; 88 | 89 | Ok(expanded) 90 | } 91 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | /*! 2 | After building `cosmwasm-storage`, we realized many of the design decisions were 3 | limiting us and producing a lot of needless boilerplate. The decision was made to leave 4 | those APIs stable for anyone wanting a very basic abstraction on the KV-store and to 5 | build a much more powerful and complex ORM layer that can provide powerful accessors 6 | using complex key types, which are transparently turned into bytes. 7 | 8 | This led to a number of breaking API changes in this package of the course of several 9 | releases as we updated this with lots of experience, user feedback, and deep dives to harness 10 | the full power of generics. 11 | 12 | For more information on this package, please check out the 13 | [README](https://github.com/CosmWasm/cw-plus/blob/main/packages/storage-plus/README.md). 14 | */ 15 | 16 | mod bound; 17 | mod de; 18 | mod deque; 19 | mod endian; 20 | mod helpers; 21 | mod indexed_map; 22 | mod indexed_snapshot; 23 | mod indexes; 24 | mod int_key; 25 | mod item; 26 | mod iter_helpers; 27 | mod keys; 28 | mod map; 29 | mod namespace; 30 | mod path; 31 | mod prefix; 32 | mod snapshot; 33 | 34 | #[cfg(feature = "iterator")] 35 | pub use bound::{Bound, Bounder, PrefixBound, RawBound}; 36 | pub use de::KeyDeserialize; 37 | pub use deque::Deque; 38 | pub use deque::DequeIter; 39 | pub use endian::Endian; 40 | #[cfg(feature = "iterator")] 41 | pub use indexed_map::{IndexList, IndexedMap}; 42 | #[cfg(feature = "iterator")] 43 | pub use indexed_snapshot::IndexedSnapshotMap; 44 | #[cfg(feature = "iterator")] 45 | pub use indexes::{Index, IndexPrefix, MultiIndex, UniqueIndex}; 46 | pub use int_key::IntKey; 47 | pub use item::Item; 48 | pub use keys::{Key, Prefixer, PrimaryKey}; 49 | pub use map::Map; 50 | pub use namespace::Namespace; 51 | pub use path::Path; 52 | #[cfg(feature = "iterator")] 53 | pub use prefix::{range_with_prefix, Prefix}; 54 | #[cfg(feature = "iterator")] 55 | pub use snapshot::{SnapshotItem, SnapshotMap, Strategy}; 56 | 57 | #[cfg(all(feature = "iterator", feature = "macro"))] 58 | /// Auto generate an `IndexList` impl for your indexes struct. 59 | /// 60 | /// # Example 61 | /// 62 | /// ```rust 63 | /// use cosmwasm_std::Addr; 64 | /// use cw_storage_plus::{MultiIndex, UniqueIndex, index_list}; 65 | /// use serde::{Serialize, Deserialize}; 66 | /// 67 | /// #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] 68 | /// struct TestStruct { 69 | /// id: u64, 70 | /// id2: u32, 71 | /// addr: Addr, 72 | /// } 73 | /// 74 | /// #[index_list(TestStruct)] // <- Add this line right here. 75 | /// struct TestIndexes<'a> { 76 | /// id: MultiIndex<'a, u32, TestStruct, u64>, 77 | /// addr: UniqueIndex<'a, Addr, TestStruct, ()>, 78 | /// } 79 | /// ``` 80 | /// 81 | pub use cw_storage_macro::index_list; 82 | 83 | #[cfg(all(feature = "iterator", feature = "macro"))] 84 | /// Auto generate the required impls to use a newtype as a key 85 | /// # Example 86 | /// 87 | /// ```rust 88 | /// use cw_storage_plus::NewTypeKey; 89 | /// use serde::{Serialize, Deserialize}; 90 | /// 91 | /// #[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] 92 | /// #[derive(NewTypeKey)] // <- Add this line right here. 93 | /// struct TestKey(u64); 94 | /// 95 | /// // You can now use `TestKey` as a key in `Map` 96 | /// ``` 97 | /// 98 | pub use cw_storage_macro::NewTypeKey; 99 | -------------------------------------------------------------------------------- /.github/workflows/checks.yml: -------------------------------------------------------------------------------- 1 | name: checks 2 | 3 | on: [ push ] 4 | 5 | jobs: 6 | formatting: 7 | runs-on: ubuntu-latest 8 | steps: 9 | - uses: actions/checkout@v5 10 | - uses: dtolnay/rust-toolchain@stable 11 | - run: cargo fmt -- --check 12 | 13 | lint-all-features: 14 | runs-on: ubuntu-latest 15 | steps: 16 | - uses: actions/checkout@v5 17 | - uses: dtolnay/rust-toolchain@stable 18 | - run: cargo clippy --all-features --all-targets -- -D warnings 19 | 20 | lint-default-features: 21 | runs-on: ubuntu-latest 22 | steps: 23 | - uses: actions/checkout@v5 24 | - uses: dtolnay/rust-toolchain@stable 25 | - run: cargo clippy --all-targets -- -D warnings 26 | 27 | lint-no-default-features: 28 | runs-on: ubuntu-latest 29 | steps: 30 | - uses: actions/checkout@v5 31 | - uses: dtolnay/rust-toolchain@stable 32 | - run: cargo clippy --no-default-features --all-targets -- -D warnings 33 | 34 | build-all-features: 35 | runs-on: ubuntu-latest 36 | steps: 37 | - uses: actions/checkout@v5 38 | - uses: dtolnay/rust-toolchain@stable 39 | - run: cargo build --release --all-features 40 | 41 | build-default-features: 42 | runs-on: ubuntu-latest 43 | steps: 44 | - uses: actions/checkout@v5 45 | - uses: dtolnay/rust-toolchain@stable 46 | - run: cargo build --release 47 | 48 | build-no-default-features: 49 | runs-on: ubuntu-latest 50 | steps: 51 | - uses: actions/checkout@v5 52 | - uses: dtolnay/rust-toolchain@stable 53 | - run: cargo build --release --no-default-features 54 | 55 | build-wasm-all-features: 56 | runs-on: ubuntu-latest 57 | steps: 58 | - uses: actions/checkout@v5 59 | - uses: dtolnay/rust-toolchain@stable 60 | with: 61 | targets: wasm32-unknown-unknown 62 | - run: cargo +stable build --release --target=wasm32-unknown-unknown --all-features 63 | 64 | build-wasm-default-features: 65 | runs-on: ubuntu-latest 66 | steps: 67 | - uses: actions/checkout@v5 68 | - uses: dtolnay/rust-toolchain@stable 69 | with: 70 | targets: wasm32-unknown-unknown 71 | - run: cargo +stable build --release --target=wasm32-unknown-unknown 72 | 73 | build-wasm-no-default-features: 74 | runs-on: ubuntu-latest 75 | steps: 76 | - uses: actions/checkout@v5 77 | - uses: dtolnay/rust-toolchain@stable 78 | with: 79 | targets: wasm32-unknown-unknown 80 | - run: cargo +stable build --release --target=wasm32-unknown-unknown --no-default-features 81 | 82 | build-maximal: 83 | runs-on: ubuntu-latest 84 | steps: 85 | - uses: actions/checkout@v5 86 | - uses: dtolnay/rust-toolchain@stable 87 | - run: cargo update 88 | - run: cargo build --release --all-features 89 | 90 | test-all-features: 91 | runs-on: ubuntu-latest 92 | steps: 93 | - uses: actions/checkout@v5 94 | - uses: dtolnay/rust-toolchain@stable 95 | - run: cargo test --all-features 96 | 97 | test-default-features: 98 | runs-on: ubuntu-latest 99 | steps: 100 | - uses: actions/checkout@v5 101 | - uses: dtolnay/rust-toolchain@stable 102 | - run: cargo test 103 | 104 | test-no-default-features: 105 | runs-on: ubuntu-latest 106 | steps: 107 | - uses: actions/checkout@v5 108 | - uses: dtolnay/rust-toolchain@stable 109 | - run: cargo test --no-default-features 110 | 111 | test-maximal: 112 | runs-on: ubuntu-latest 113 | steps: 114 | - uses: actions/checkout@v5 115 | - uses: dtolnay/rust-toolchain@stable 116 | - run: cargo update 117 | - run: cargo test --all-features 118 | 119 | benchmarking: 120 | runs-on: ubuntu-latest 121 | steps: 122 | - uses: actions/checkout@v5 123 | - uses: dtolnay/rust-toolchain@stable 124 | - run: cargo bench 125 | -------------------------------------------------------------------------------- /src/path.rs: -------------------------------------------------------------------------------- 1 | use cosmwasm_std::storage_keys::namespace_with_key; 2 | use serde::de::DeserializeOwned; 3 | use serde::Serialize; 4 | use std::marker::PhantomData; 5 | 6 | use crate::helpers::not_found_object_info; 7 | use cosmwasm_std::{from_json, to_json_vec, StdError, StdResult, Storage}; 8 | use std::ops::Deref; 9 | 10 | #[derive(Debug, Clone)] 11 | pub struct Path 12 | where 13 | T: Serialize + DeserializeOwned, 14 | { 15 | /// all namespaces prefixes and concatenated with the key 16 | pub(crate) storage_key: Vec, 17 | // see https://doc.rust-lang.org/std/marker/struct.PhantomData.html#unused-type-parameters for why this is needed 18 | data: PhantomData, 19 | } 20 | 21 | impl Deref for Path 22 | where 23 | T: Serialize + DeserializeOwned, 24 | { 25 | type Target = [u8]; 26 | 27 | fn deref(&self) -> &[u8] { 28 | &self.storage_key 29 | } 30 | } 31 | 32 | impl Path 33 | where 34 | T: Serialize + DeserializeOwned, 35 | { 36 | pub fn new(namespace: &[u8], keys: &[&[u8]]) -> Self { 37 | let l = keys.len(); 38 | 39 | // Combine namespace and all but last keys. 40 | // This is a single vector allocation with references as elements. 41 | let calculated_len = 1 + keys.len() - 1; 42 | let mut combined: Vec<&[u8]> = Vec::with_capacity(calculated_len); 43 | combined.push(namespace); 44 | combined.extend(keys[0..l - 1].iter()); 45 | debug_assert_eq!(calculated_len, combined.len()); // as long as we calculate correctly, we don't need to reallocate 46 | let storage_key = namespace_with_key(&combined, keys[l - 1]); 47 | Path { 48 | storage_key, 49 | data: PhantomData, 50 | } 51 | } 52 | 53 | /// save will serialize the model and store, returns an error on serialization issues 54 | pub fn save(&self, store: &mut dyn Storage, data: &T) -> StdResult<()> { 55 | store.set(&self.storage_key, &to_json_vec(data)?); 56 | Ok(()) 57 | } 58 | 59 | pub fn remove(&self, store: &mut dyn Storage) { 60 | store.remove(&self.storage_key); 61 | } 62 | 63 | /// load will return an error if no data is set at the given key, or on parse error 64 | pub fn load(&self, store: &dyn Storage) -> StdResult { 65 | if let Some(value) = store.get(&self.storage_key) { 66 | from_json(value) 67 | } else { 68 | let object_info = not_found_object_info::(&self.storage_key); 69 | Err(StdError::msg(format!("{object_info} not found"))) 70 | } 71 | } 72 | 73 | /// may_load will parse the data stored at the key if present, returns Ok(None) if no data there. 74 | /// returns an error on issues parsing 75 | pub fn may_load(&self, store: &dyn Storage) -> StdResult> { 76 | let value = store.get(&self.storage_key); 77 | value.map(|v| from_json(v)).transpose() 78 | } 79 | 80 | /// has returns true or false if any data is at this key, without parsing or interpreting the 81 | /// contents. It will returns true for an length-0 byte array (Some(b"")), if you somehow manage to set that. 82 | pub fn has(&self, store: &dyn Storage) -> bool { 83 | store.get(&self.storage_key).is_some() 84 | } 85 | 86 | /// Loads the data, perform the specified action, and store the result 87 | /// in the database. This is shorthand for some common sequences, which may be useful. 88 | /// 89 | /// If the data exists, `action(Some(value))` is called. Otherwise, `action(None)` is called. 90 | pub fn update(&self, store: &mut dyn Storage, action: A) -> Result 91 | where 92 | A: FnOnce(Option) -> Result, 93 | E: From, 94 | { 95 | let input = self.may_load(store)?; 96 | let output = action(input)?; 97 | self.save(store, &output)?; 98 | Ok(output) 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /tests/newtype.rs: -------------------------------------------------------------------------------- 1 | #[cfg(all(test, feature = "iterator", feature = "macro"))] 2 | mod test { 3 | use std::ops::Deref; 4 | 5 | use cosmwasm_std::{testing::MockStorage, Addr, Uint128}; 6 | use cw_storage_macro::NewTypeKey; 7 | use cw_storage_plus::Map; 8 | use derive_more::with_trait::Display; 9 | use serde::{Deserialize, Serialize}; 10 | 11 | #[test] 12 | fn newtype_compiles() { 13 | #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, NewTypeKey)] 14 | struct TestKey(u64); 15 | 16 | let _ = TestKey(100); 17 | } 18 | 19 | #[test] 20 | fn newtype_works() { 21 | #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, NewTypeKey, Display)] 22 | struct TestKeyU8(u8); 23 | impl Deref for TestKeyU8 { 24 | type Target = u8; 25 | 26 | fn deref(&self) -> &Self::Target { 27 | &self.0 28 | } 29 | } 30 | 31 | #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, NewTypeKey, Display)] 32 | struct TestKeyU64(u64); 33 | impl Deref for TestKeyU64 { 34 | type Target = u64; 35 | 36 | fn deref(&self) -> &Self::Target { 37 | &self.0 38 | } 39 | } 40 | 41 | #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, NewTypeKey, Display)] 42 | struct TestKeyU128(u128); 43 | impl Deref for TestKeyU128 { 44 | type Target = u128; 45 | 46 | fn deref(&self) -> &Self::Target { 47 | &self.0 48 | } 49 | } 50 | 51 | #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, NewTypeKey, Display)] 52 | struct TestKeyUint128(Uint128); 53 | impl Deref for TestKeyUint128 { 54 | type Target = Uint128; 55 | 56 | fn deref(&self) -> &Self::Target { 57 | &self.0 58 | } 59 | } 60 | 61 | #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, NewTypeKey, Display)] 62 | struct TestKeyString(String); 63 | impl Deref for TestKeyString { 64 | type Target = String; 65 | 66 | fn deref(&self) -> &Self::Target { 67 | &self.0 68 | } 69 | } 70 | 71 | #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, NewTypeKey, Display)] 72 | struct TestKeyAddr(Addr); 73 | impl Deref for TestKeyAddr { 74 | type Target = Addr; 75 | 76 | fn deref(&self) -> &Self::Target { 77 | &self.0 78 | } 79 | } 80 | 81 | fn run<'a, T, U>(key: T, map: Map, expected_str: &str) 82 | where 83 | T: cw_storage_plus::PrimaryKey<'a> + Display + Deref, 84 | U: cw_storage_plus::PrimaryKey<'a>, 85 | { 86 | let mut storage = MockStorage::new(); 87 | 88 | // they should serialize to the same string 89 | assert_eq!(key.to_string(), expected_str); 90 | 91 | // they should have the same underlying key 92 | let inner_key: &U = key.deref(); 93 | assert_eq!(inner_key.joined_key(), key.joined_key()); 94 | 95 | // the newtype wrapper should work for maps 96 | let value = "value".to_string(); 97 | map.save(&mut storage, key.clone(), &value).unwrap(); 98 | assert_eq!(map.load(&storage, key.clone()).unwrap(), value); 99 | } 100 | 101 | run::<_, u8>(TestKeyU8(1u8), Map::new("map-1"), "1"); 102 | run::<_, u64>(TestKeyU64(2u64), Map::new("map-2"), "2"); 103 | run::<_, u128>(TestKeyU128(3u128), Map::new("map-3"), "3"); 104 | run::<_, Uint128>(TestKeyUint128(Uint128::new(4u128)), Map::new("map-4"), "4"); 105 | run::<_, String>( 106 | TestKeyString("my_key".to_string()), 107 | Map::new("map-5"), 108 | "my_key", 109 | ); 110 | run::<_, Addr>( 111 | TestKeyAddr(Addr::unchecked("my_addr".to_string())), 112 | Map::new("map-6"), 113 | "my_addr", 114 | ); 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /benches/main.rs: -------------------------------------------------------------------------------- 1 | use criterion::{criterion_group, criterion_main, Criterion}; 2 | use cw_storage_plus::IntKey; 3 | use rand::Rng; 4 | use rand_xoshiro::{rand_core::SeedableRng, Xoshiro256PlusPlus}; 5 | use std::hint::black_box; 6 | use std::time::Duration; 7 | 8 | fn bench_signed_int_key(c: &mut Criterion) { 9 | let mut group = c.benchmark_group("Signed int keys"); 10 | 11 | #[inline(always)] 12 | fn k() -> i32 { 13 | Xoshiro256PlusPlus::seed_from_u64(42).random_range(i32::MIN..i32::MAX) 14 | } 15 | // Save a value for future asserts. 16 | let k_check = k(); 17 | 18 | type Buf = [u8; size_of::()]; 19 | 20 | group.bench_function("i32 to_cw_bytes: xored (u32) + to_be_bytes", |b| { 21 | #[inline(always)] 22 | fn to_cw_bytes(value: &i32) -> Buf { 23 | (*value as u32 ^ i32::MIN as u32).to_be_bytes() 24 | } 25 | 26 | assert_eq!(to_cw_bytes(&0), i32::to_cw_bytes(&0)); 27 | assert_eq!(to_cw_bytes(&k_check), i32::to_cw_bytes(&k_check)); 28 | assert_eq!( 29 | to_cw_bytes(&k_check.wrapping_neg()), 30 | i32::to_cw_bytes(&k_check.wrapping_neg()) 31 | ); 32 | 33 | b.iter(|| { 34 | let k = k(); 35 | black_box(to_cw_bytes(&k)); 36 | black_box(to_cw_bytes(&k.wrapping_neg())); 37 | }); 38 | }); 39 | 40 | group.bench_function("i32 to_cw_bytes: xored (u128) + to_be_bytes", |b| { 41 | #[inline(always)] 42 | fn to_cw_bytes(value: &i32) -> Buf { 43 | ((*value as u128 ^ i32::MIN as u128) as i32).to_be_bytes() 44 | } 45 | 46 | assert_eq!(to_cw_bytes(&0), i32::to_cw_bytes(&0)); 47 | assert_eq!(to_cw_bytes(&k_check), i32::to_cw_bytes(&k_check)); 48 | assert_eq!( 49 | to_cw_bytes(&k_check.wrapping_neg()), 50 | i32::to_cw_bytes(&k_check.wrapping_neg()) 51 | ); 52 | 53 | b.iter(|| { 54 | let k = k(); 55 | black_box(to_cw_bytes(&k)); 56 | black_box(to_cw_bytes(&k.wrapping_neg())); 57 | }); 58 | }); 59 | 60 | group.bench_function("i32 to_cw_bytes: mut to_be_bytes + xor", |b| { 61 | #[inline(always)] 62 | fn to_cw_bytes(value: &i32) -> Buf { 63 | let mut buf = i32::to_be_bytes(*value); 64 | buf[0] ^= 0x80; 65 | buf 66 | } 67 | 68 | assert_eq!(to_cw_bytes(&0), i32::to_cw_bytes(&0)); 69 | assert_eq!(to_cw_bytes(&k_check), i32::to_cw_bytes(&k_check)); 70 | assert_eq!( 71 | to_cw_bytes(&k_check.wrapping_neg()), 72 | i32::to_cw_bytes(&k_check.wrapping_neg()) 73 | ); 74 | 75 | b.iter(|| { 76 | let k = k(); 77 | black_box(to_cw_bytes(&k)); 78 | black_box(to_cw_bytes(&k.wrapping_neg())); 79 | }); 80 | }); 81 | 82 | group.bench_function("i32 to_cw_bytes: branching plus / minus", |b| { 83 | #[inline(always)] 84 | fn to_cw_bytes(value: &i32) -> Buf { 85 | if value >= &0i32 { 86 | (*value as u32).wrapping_sub(i32::MIN as u32).to_be_bytes() 87 | } else { 88 | (*value as u32).wrapping_add(i32::MIN as u32).to_be_bytes() 89 | } 90 | } 91 | 92 | assert_eq!(to_cw_bytes(&0), i32::to_cw_bytes(&0)); 93 | assert_eq!(to_cw_bytes(&k_check), i32::to_cw_bytes(&k_check)); 94 | assert_eq!( 95 | to_cw_bytes(&k_check.wrapping_neg()), 96 | i32::to_cw_bytes(&k_check.wrapping_neg()) 97 | ); 98 | 99 | b.iter(|| { 100 | let k = k(); 101 | black_box(to_cw_bytes(&k)); 102 | black_box(to_cw_bytes(&k.wrapping_neg())); 103 | }); 104 | }); 105 | 106 | group.finish(); 107 | } 108 | 109 | fn bench_unsigned_int_key(c: &mut Criterion) { 110 | let mut group = c.benchmark_group("Unsigned int keys"); 111 | 112 | #[inline(always)] 113 | fn k() -> u32 { 114 | Xoshiro256PlusPlus::seed_from_u64(42).random_range(u32::MIN..u32::MAX) 115 | } 116 | // Save a value for future asserts. 117 | let k_check = k(); 118 | 119 | type Buf = [u8; size_of::()]; 120 | 121 | group.bench_function("u32 to_cw_bytes", |b| { 122 | #[inline(always)] 123 | fn to_cw_bytes(value: &u32) -> Buf { 124 | value.to_be_bytes() 125 | } 126 | 127 | assert_eq!(to_cw_bytes(&0), u32::to_cw_bytes(&0)); 128 | assert_eq!(to_cw_bytes(&k_check), u32::to_cw_bytes(&k_check)); 129 | 130 | b.iter(|| { 131 | let k = k(); 132 | // Run twice for comparability with other benchmarks. 133 | black_box(to_cw_bytes(&k)); 134 | black_box(to_cw_bytes(&k)); 135 | }); 136 | }); 137 | 138 | group.finish(); 139 | } 140 | 141 | fn make_config() -> Criterion { 142 | Criterion::default() 143 | .without_plots() 144 | .measurement_time(Duration::new(5, 0)) 145 | .sample_size(10) 146 | .configure_from_args() 147 | } 148 | 149 | criterion_group!( 150 | name = signed_int_key; 151 | config = make_config(); 152 | targets = bench_signed_int_key 153 | ); 154 | 155 | criterion_group!( 156 | name = unsigned_int_key; 157 | config = make_config(); 158 | targets = bench_unsigned_int_key 159 | ); 160 | 161 | criterion_main!(signed_int_key, unsigned_int_key); 162 | -------------------------------------------------------------------------------- /src/int_key.rs: -------------------------------------------------------------------------------- 1 | use std::mem; 2 | 3 | use cosmwasm_std::{Int128, Int64, Uint128, Uint64}; 4 | 5 | /// Our int keys are simply the big-endian representation bytes for unsigned ints, 6 | /// but "sign-flipped" (xored msb) big-endian bytes for signed ints. 7 | /// 8 | /// So that the representation of signed integers is in the right lexicographical order. 9 | pub trait IntKey: Sized + Copy { 10 | type Buf: AsRef<[u8]> + AsMut<[u8]> + Into> + Default; 11 | 12 | fn to_cw_bytes(&self) -> Self::Buf; 13 | fn from_cw_bytes(bytes: Self::Buf) -> Self; 14 | } 15 | 16 | macro_rules! cw_uint_keys { 17 | (for $($t:ty),+) => { 18 | $(impl IntKey for $t { 19 | type Buf = [u8; mem::size_of::<$t>()]; 20 | 21 | #[inline] 22 | fn to_cw_bytes(&self) -> Self::Buf { 23 | self.to_be_bytes() 24 | } 25 | 26 | #[inline] 27 | fn from_cw_bytes(bytes: Self::Buf) -> Self { 28 | Self::from_be_bytes(bytes) 29 | } 30 | })* 31 | } 32 | } 33 | 34 | cw_uint_keys!(for u8, u16, u32, u64, u128); 35 | 36 | macro_rules! cw_int_keys { 37 | (for $($t:ty, $ut:ty),+) => { 38 | $(impl IntKey for $t { 39 | type Buf = [u8; mem::size_of::<$t>()]; 40 | 41 | #[inline] 42 | fn to_cw_bytes(&self) -> Self::Buf { 43 | (*self as $ut ^ <$t>::MIN as $ut).to_be_bytes() 44 | } 45 | 46 | #[inline] 47 | fn from_cw_bytes(bytes: Self::Buf) -> Self { 48 | (Self::from_be_bytes(bytes) as $ut ^ <$t>::MIN as $ut) as _ 49 | } 50 | })* 51 | } 52 | } 53 | 54 | cw_int_keys!(for i8, u8, i16, u16, i32, u32, i64, u64, i128, u128); 55 | 56 | macro_rules! cw_uint_std_keys { 57 | (for $($t:ty),+) => { 58 | $(impl IntKey for $t { 59 | type Buf = [u8; mem::size_of::<$t>()]; 60 | 61 | #[inline] 62 | fn to_cw_bytes(&self) -> Self::Buf { 63 | self.to_be_bytes() 64 | } 65 | 66 | #[inline] 67 | fn from_cw_bytes(bytes: Self::Buf) -> Self { 68 | Self::new(IntKey::from_cw_bytes(bytes)) 69 | } 70 | })* 71 | } 72 | } 73 | 74 | cw_uint_std_keys!(for Uint64, Uint128); 75 | 76 | macro_rules! cw_int_std_keys { 77 | (for $($t:ty),+) => { 78 | $(impl IntKey for $t { 79 | type Buf = [u8; mem::size_of::<$t>()]; 80 | 81 | #[inline] 82 | fn to_cw_bytes(&self) -> Self::Buf { 83 | let mut bytes = self.to_be_bytes(); 84 | bytes[0] ^= 0x80; 85 | bytes 86 | } 87 | 88 | #[inline] 89 | fn from_cw_bytes(bytes: Self::Buf) -> Self { 90 | Self::new(IntKey::from_cw_bytes(bytes)) 91 | } 92 | })* 93 | } 94 | } 95 | 96 | cw_int_std_keys!(for Int64, Int128); 97 | 98 | #[cfg(test)] 99 | mod test { 100 | use super::*; 101 | 102 | #[test] 103 | fn x8_int_key_works() { 104 | assert_eq!(0x42u8.to_cw_bytes(), [0x42]); 105 | assert_eq!(0x42i8.to_cw_bytes(), [0xc2]); 106 | assert_eq!((-0x3ei8).to_cw_bytes(), [0x42]); 107 | } 108 | 109 | #[test] 110 | fn x16_int_key_works() { 111 | assert_eq!(0x4243u16.to_cw_bytes(), [0x42, 0x43]); 112 | assert_eq!(0x4243i16.to_cw_bytes(), [0xc2, 0x43]); 113 | assert_eq!((-0x3dbdi16).to_cw_bytes(), [0x42, 0x43]); 114 | } 115 | 116 | #[test] 117 | fn x32_int_key_works() { 118 | assert_eq!(0x424344u32.to_cw_bytes(), [0x00, 0x42, 0x43, 0x44]); 119 | assert_eq!(0x424344i32.to_cw_bytes(), [0x80, 0x42, 0x43, 0x44]); 120 | assert_eq!((-0x7fbdbcbci32).to_cw_bytes(), [0x00, 0x42, 0x43, 0x44]); 121 | } 122 | 123 | #[test] 124 | fn x64_int_key_works() { 125 | assert_eq!( 126 | 0x42434445u64.to_cw_bytes(), 127 | [0x00, 0x00, 0x00, 0x00, 0x42, 0x43, 0x44, 0x45] 128 | ); 129 | assert_eq!( 130 | 0x42434445i64.to_cw_bytes(), 131 | [0x80, 0x00, 0x00, 0x00, 0x42, 0x43, 0x44, 0x45] 132 | ); 133 | assert_eq!( 134 | (-0x7fffffffbdbcbbbbi64).to_cw_bytes(), 135 | [0x00, 0x00, 0x00, 0x00, 0x42, 0x43, 0x44, 0x45] 136 | ); 137 | } 138 | 139 | #[test] 140 | fn x128_int_key_works() { 141 | assert_eq!( 142 | 0x4243444546u128.to_cw_bytes(), 143 | [ 144 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x42, 0x43, 0x44, 145 | 0x45, 0x46 146 | ] 147 | ); 148 | assert_eq!( 149 | 0x4243444546i128.to_cw_bytes(), 150 | [ 151 | 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x42, 0x43, 0x44, 152 | 0x45, 0x46 153 | ] 154 | ); 155 | assert_eq!( 156 | (-0x7fffffffffffffffffffffbdbcbbbabai128).to_cw_bytes(), 157 | [ 158 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x42, 0x43, 0x44, 159 | 0x45, 0x46 160 | ] 161 | ); 162 | } 163 | 164 | #[test] 165 | fn unsigned_int_key_order() { 166 | assert!(0u32.to_cw_bytes() < 652u32.to_cw_bytes()); 167 | } 168 | 169 | #[test] 170 | fn signed_int_key_order() { 171 | assert!((-321i32).to_cw_bytes() < 0i32.to_cw_bytes()); 172 | assert!(0i32.to_cw_bytes() < 652i32.to_cw_bytes()); 173 | } 174 | } 175 | -------------------------------------------------------------------------------- /src/iter_helpers.rs: -------------------------------------------------------------------------------- 1 | #![cfg(feature = "iterator")] 2 | 3 | use serde::de::DeserializeOwned; 4 | 5 | use cosmwasm_std::{from_json, Record, StdResult}; 6 | 7 | use crate::de::KeyDeserialize; 8 | 9 | #[allow(dead_code)] 10 | pub(crate) fn deserialize_v(kv: Record) -> StdResult> { 11 | let (k, v) = kv; 12 | let t = from_json::(&v)?; 13 | Ok((k, t)) 14 | } 15 | 16 | pub(crate) fn deserialize_kv( 17 | kv: Record, 18 | ) -> StdResult<(K::Output, T)> { 19 | let (k, v) = kv; 20 | let kt = K::from_vec(k)?; 21 | let vt = from_json::(&v)?; 22 | Ok((kt, vt)) 23 | } 24 | 25 | // TODO: add a check here that it is the real prefix? 26 | #[inline] 27 | pub(crate) fn trim(namespace: &[u8], key: &[u8]) -> Vec { 28 | key[namespace.len()..].to_vec() 29 | } 30 | 31 | #[inline] 32 | pub(crate) fn concat(namespace: &[u8], key: &[u8]) -> Vec { 33 | let mut k = namespace.to_vec(); 34 | k.extend_from_slice(key); 35 | k 36 | } 37 | 38 | // currently disabled tests as they require a bunch of legacy non-sense 39 | // TODO: enable 40 | #[cfg(test)] 41 | #[cfg(not(feature = "iterator"))] 42 | mod namespace_test { 43 | use super::*; 44 | use cosmwasm_std::testing::MockStorage; 45 | 46 | #[test] 47 | fn test_range() { 48 | let mut storage = MockStorage::new(); 49 | let prefix = to_length_prefixed(b"foo"); 50 | let other_prefix = to_length_prefixed(b"food"); 51 | 52 | // set some values in this range 53 | set_with_prefix(&mut storage, &prefix, b"bar", b"none"); 54 | set_with_prefix(&mut storage, &prefix, b"snowy", b"day"); 55 | 56 | // set some values outside this range 57 | set_with_prefix(&mut storage, &other_prefix, b"moon", b"buggy"); 58 | 59 | // ensure we get proper result from prefixed_range iterator 60 | let mut iter = range_with_prefix(&storage, &prefix, None, None, Order::Descending); 61 | let first = iter.next().unwrap(); 62 | assert_eq!(first, (b"snowy".to_vec(), b"day".to_vec())); 63 | let second = iter.next().unwrap(); 64 | assert_eq!(second, (b"bar".to_vec(), b"none".to_vec())); 65 | assert!(iter.next().is_none()); 66 | 67 | // ensure we get raw result from base range 68 | let iter = storage.range(None, None, Order::Ascending); 69 | assert_eq!(3, iter.count()); 70 | 71 | // foo comes first 72 | let mut iter = storage.range(None, None, Order::Ascending); 73 | let first = iter.next().unwrap(); 74 | let expected_key = concat(&prefix, b"bar"); 75 | assert_eq!(first, (expected_key, b"none".to_vec())); 76 | } 77 | 78 | #[test] 79 | fn test_range_with_prefix_wrapover() { 80 | let mut storage = MockStorage::new(); 81 | // if we don't properly wrap over there will be issues here (note 255+1 is used to calculate end) 82 | let prefix = to_length_prefixed(b"f\xff\xff"); 83 | let other_prefix = to_length_prefixed(b"f\xff\x44"); 84 | 85 | // set some values in this range 86 | set_with_prefix(&mut storage, &prefix, b"bar", b"none"); 87 | set_with_prefix(&mut storage, &prefix, b"snowy", b"day"); 88 | 89 | // set some values outside this range 90 | set_with_prefix(&mut storage, &other_prefix, b"moon", b"buggy"); 91 | 92 | // ensure we get proper result from prefixed_range iterator 93 | let iter = range_with_prefix(&storage, &prefix, None, None, Order::Descending); 94 | let elements: Vec = iter.collect(); 95 | assert_eq!( 96 | elements, 97 | vec![ 98 | (b"snowy".to_vec(), b"day".to_vec()), 99 | (b"bar".to_vec(), b"none".to_vec()), 100 | ] 101 | ); 102 | } 103 | 104 | #[test] 105 | fn test_range_with_start_end_set() { 106 | let mut storage = MockStorage::new(); 107 | // if we don't properly wrap over there will be issues here (note 255+1 is used to calculate end) 108 | let prefix = to_length_prefixed(b"f\xff\xff"); 109 | let other_prefix = to_length_prefixed(b"f\xff\x44"); 110 | 111 | // set some values in this range 112 | set_with_prefix(&mut storage, &prefix, b"bar", b"none"); 113 | set_with_prefix(&mut storage, &prefix, b"snowy", b"day"); 114 | 115 | // set some values outside this range 116 | set_with_prefix(&mut storage, &other_prefix, b"moon", b"buggy"); 117 | 118 | // make sure start and end are applied properly 119 | let res: Vec = 120 | range_with_prefix(&storage, &prefix, Some(b"b"), Some(b"c"), Order::Ascending) 121 | .collect(); 122 | assert_eq!(res.len(), 1); 123 | assert_eq!(res[0], (b"bar".to_vec(), b"none".to_vec())); 124 | 125 | // make sure start and end are applied properly 126 | let res: Vec = range_with_prefix( 127 | &storage, 128 | &prefix, 129 | Some(b"bas"), 130 | Some(b"sno"), 131 | Order::Ascending, 132 | ) 133 | .collect(); 134 | assert_eq!(res.len(), 0); 135 | 136 | let res: Vec = 137 | range_with_prefix(&storage, &prefix, Some(b"ant"), None, Order::Ascending).collect(); 138 | assert_eq!(res.len(), 2); 139 | assert_eq!(res[0], (b"bar".to_vec(), b"none".to_vec())); 140 | assert_eq!(res[1], (b"snowy".to_vec(), b"day".to_vec())); 141 | } 142 | 143 | #[test] 144 | fn test_namespace_upper_bound() { 145 | assert_eq!(namespace_upper_bound(b"bob"), b"boc".to_vec()); 146 | assert_eq!(namespace_upper_bound(b"fo\xfe"), b"fo\xff".to_vec()); 147 | assert_eq!(namespace_upper_bound(b"fo\xff"), b"fp\x00".to_vec()); 148 | // multiple \xff roll over 149 | assert_eq!( 150 | namespace_upper_bound(b"fo\xff\xff\xff"), 151 | b"fp\x00\x00\x00".to_vec() 152 | ); 153 | // \xff not at the end are ignored 154 | assert_eq!(namespace_upper_bound(b"\xffabc"), b"\xffabd".to_vec()); 155 | } 156 | } 157 | -------------------------------------------------------------------------------- /src/bound.rs: -------------------------------------------------------------------------------- 1 | #![cfg(feature = "iterator")] 2 | 3 | use cosmwasm_std::Addr; 4 | use std::marker::PhantomData; 5 | 6 | use crate::de::KeyDeserialize; 7 | use crate::{Prefixer, PrimaryKey}; 8 | 9 | /// `RawBound` is used to define the two ends of a range, more explicit than `Option`. 10 | /// `None` means that we don't limit that side of the range at all. 11 | /// `Inclusive` means we use the given bytes as a limit and *include* anything at that exact key. 12 | /// `Exclusive` means we use the given bytes as a limit and *exclude* anything at that exact key. 13 | /// See `Bound` for a type safe way to build these bounds. 14 | #[derive(Clone, Debug)] 15 | pub enum RawBound { 16 | Inclusive(Vec), 17 | Exclusive(Vec), 18 | } 19 | 20 | /// `Bound` is used to define the two ends of a range. 21 | /// `None` means that we don't limit that side of the range at all. 22 | /// `Inclusive` means we use the given value as a limit and *include* anything at that exact key. 23 | /// `Exclusive` means we use the given value as a limit and *exclude* anything at that exact key. 24 | #[derive(Clone, Debug)] 25 | pub enum Bound<'a, K: PrimaryKey<'a>> { 26 | Inclusive((K, PhantomData<&'a bool>)), 27 | Exclusive((K, PhantomData<&'a bool>)), 28 | InclusiveRaw(Vec), 29 | ExclusiveRaw(Vec), 30 | } 31 | 32 | impl<'a, K: PrimaryKey<'a>> Bound<'a, K> { 33 | pub fn inclusive>(k: T) -> Self { 34 | Self::Inclusive((k.into(), PhantomData)) 35 | } 36 | 37 | pub fn exclusive>(k: T) -> Self { 38 | Self::Exclusive((k.into(), PhantomData)) 39 | } 40 | 41 | pub fn to_raw_bound(&self) -> RawBound { 42 | match self { 43 | Bound::Inclusive((k, _)) => RawBound::Inclusive(k.joined_key()), 44 | Bound::Exclusive((k, _)) => RawBound::Exclusive(k.joined_key()), 45 | Bound::ExclusiveRaw(raw_k) => RawBound::Exclusive(raw_k.clone()), 46 | Bound::InclusiveRaw(raw_k) => RawBound::Inclusive(raw_k.clone()), 47 | } 48 | } 49 | } 50 | 51 | #[derive(Clone, Debug)] 52 | pub enum PrefixBound<'a, K: Prefixer<'a>> { 53 | Inclusive((K, PhantomData<&'a bool>)), 54 | Exclusive((K, PhantomData<&'a bool>)), 55 | } 56 | 57 | impl<'a, K: Prefixer<'a>> PrefixBound<'a, K> { 58 | pub fn inclusive>(k: T) -> Self { 59 | Self::Inclusive((k.into(), PhantomData)) 60 | } 61 | 62 | pub fn exclusive>(k: T) -> Self { 63 | Self::Exclusive((k.into(), PhantomData)) 64 | } 65 | 66 | pub fn to_raw_bound(&self) -> RawBound { 67 | match self { 68 | PrefixBound::Exclusive((k, _)) => RawBound::Exclusive(k.joined_prefix()), 69 | PrefixBound::Inclusive((k, _)) => RawBound::Inclusive(k.joined_prefix()), 70 | } 71 | } 72 | } 73 | 74 | pub trait Bounder<'a>: PrimaryKey<'a> + Sized { 75 | fn inclusive_bound(self) -> Option>; 76 | fn exclusive_bound(self) -> Option>; 77 | } 78 | 79 | impl<'a> Bounder<'a> for () { 80 | fn inclusive_bound(self) -> Option> { 81 | None 82 | } 83 | fn exclusive_bound(self) -> Option> { 84 | None 85 | } 86 | } 87 | 88 | impl<'a> Bounder<'a> for &'a [u8] { 89 | fn inclusive_bound(self) -> Option> { 90 | Some(Bound::inclusive(self)) 91 | } 92 | fn exclusive_bound(self) -> Option> { 93 | Some(Bound::exclusive(self)) 94 | } 95 | } 96 | 97 | impl< 98 | 'a, 99 | T: PrimaryKey<'a> + KeyDeserialize + Prefixer<'a> + Clone, 100 | U: PrimaryKey<'a> + KeyDeserialize + Clone, 101 | > Bounder<'a> for (T, U) 102 | { 103 | fn inclusive_bound(self) -> Option> { 104 | Some(Bound::inclusive(self)) 105 | } 106 | fn exclusive_bound(self) -> Option> { 107 | Some(Bound::exclusive(self)) 108 | } 109 | } 110 | 111 | impl< 112 | 'a, 113 | T: PrimaryKey<'a> + Prefixer<'a> + Clone, 114 | U: PrimaryKey<'a> + Prefixer<'a> + KeyDeserialize + Clone, 115 | V: PrimaryKey<'a> + KeyDeserialize + Clone, 116 | > Bounder<'a> for (T, U, V) 117 | { 118 | fn inclusive_bound(self) -> Option> { 119 | Some(Bound::inclusive(self)) 120 | } 121 | fn exclusive_bound(self) -> Option> { 122 | Some(Bound::exclusive(self)) 123 | } 124 | } 125 | 126 | impl<'a> Bounder<'a> for &'a str { 127 | fn inclusive_bound(self) -> Option> { 128 | Some(Bound::inclusive(self)) 129 | } 130 | fn exclusive_bound(self) -> Option> { 131 | Some(Bound::exclusive(self)) 132 | } 133 | } 134 | 135 | impl<'a> Bounder<'a> for String { 136 | fn inclusive_bound(self) -> Option> { 137 | Some(Bound::inclusive(self)) 138 | } 139 | fn exclusive_bound(self) -> Option> { 140 | Some(Bound::exclusive(self)) 141 | } 142 | } 143 | 144 | impl<'a> Bounder<'a> for Vec { 145 | fn inclusive_bound(self) -> Option> { 146 | Some(Bound::inclusive(self)) 147 | } 148 | fn exclusive_bound(self) -> Option> { 149 | Some(Bound::exclusive(self)) 150 | } 151 | } 152 | 153 | impl<'a> Bounder<'a> for &'a Addr { 154 | fn inclusive_bound(self) -> Option> { 155 | Some(Bound::inclusive(self)) 156 | } 157 | fn exclusive_bound(self) -> Option> { 158 | Some(Bound::exclusive(self)) 159 | } 160 | } 161 | 162 | impl<'a> Bounder<'a> for Addr { 163 | fn inclusive_bound(self) -> Option> { 164 | Some(Bound::inclusive(self)) 165 | } 166 | fn exclusive_bound(self) -> Option> { 167 | Some(Bound::exclusive(self)) 168 | } 169 | } 170 | 171 | macro_rules! integer_bound { 172 | (for $($t:ty),+) => { 173 | $(impl<'a> Bounder<'a> for $t { 174 | fn inclusive_bound(self) -> Option> { 175 | Some(Bound::inclusive(self)) 176 | } 177 | fn exclusive_bound(self) -> Option> { 178 | Some(Bound::exclusive(self)) 179 | } 180 | })* 181 | } 182 | } 183 | 184 | integer_bound!(for i8, u8, i16, u16, i32, u32, i64, u64); 185 | -------------------------------------------------------------------------------- /Taskfile.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | silent: true 4 | 5 | tasks: 6 | 7 | all: 8 | desc: Runs all checks 9 | summary: | 10 | Runs all viable checks for cw-storage-plus. 11 | Execute this task before pushing any changes. 12 | This task takes a significant amount of time to complete. 13 | cmds: 14 | - task: clean 15 | - task: build 16 | - task: clippy 17 | - task: test 18 | - task: bench 19 | - task: hack 20 | 21 | bench: 22 | desc: Runs all benchmarks 23 | cmds: 24 | - cmd: cargo +stable bench 25 | 26 | build: 27 | desc: Runs multiple building variants 28 | cmds: 29 | - task: build-all-features 30 | - task: build-default-features 31 | - task: build-no-default-features 32 | - task: build-wasm-all-features 33 | - task: build-wasm-default-features 34 | - task: build-wasm-no-default-features 35 | 36 | build-all-features: 37 | desc: Builds in debug mode with all features enabled 38 | cmds: 39 | - cmd: cargo +stable build --all-features --workspace 40 | 41 | build-default-features: 42 | desc: Builds in debug mode with default features 43 | cmds: 44 | - cmd: cargo +stable build --workspace 45 | 46 | build-no-default-features: 47 | desc: Builds in debug mode without default features 48 | cmds: 49 | - cmd: cargo +stable build --no-default-features --workspace 50 | 51 | build-wasm-all-features: 52 | desc: Builds WASM in debug mode with all features enabled 53 | cmds: 54 | - cmd: cargo +stable build --target wasm32-unknown-unknown --all-features --workspace 55 | 56 | build-wasm-default-features: 57 | desc: Builds WASM in debug mode with default features 58 | cmds: 59 | - cmd: cargo +stable build --target wasm32-unknown-unknown --workspace 60 | 61 | build-wasm-no-default-features: 62 | desc: Builds WASM in debug mode without default features 63 | cmds: 64 | - cmd: cargo +stable build --target wasm32-unknown-unknown --no-default-features --workspace 65 | 66 | clean: 67 | desc: Cleans target output 68 | cmds: 69 | - cmd: cargo clean 70 | 71 | clippy: 72 | desc: Runs multiple clippy variants 73 | cmds: 74 | - task: clippy-all-features 75 | - task: clippy-default-features 76 | - task: clippy-no-default-features 77 | 78 | clippy-all-features: 79 | desc: Runs clippy with all features enabled 80 | cmds: 81 | - cmd: cargo +stable clippy --all-features --all-targets --workspace 82 | 83 | clippy-default-features: 84 | desc: Runs clippy with default features 85 | cmds: 86 | - cmd: cargo +stable clippy --all-targets --workspace 87 | 88 | clippy-no-default-features: 89 | desc: Runs clippy without default features 90 | cmds: 91 | - cmd: cargo +stable clippy --no-default-features --all-targets --workspace 92 | 93 | cov: 94 | desc: Generates workspace code coverage report in text format and prints it to stdout 95 | cmds: 96 | - cmd: cargo +stable llvm-cov clean 97 | - cmd: cargo +stable llvm-cov --no-cfg-coverage --all-features --workspace 98 | 99 | cov-html: 100 | desc: Generates workspace code coverage report in HTML format and opens it in a browser 101 | cmds: 102 | - cmd: cargo +stable llvm-cov clean 103 | - cmd: cargo +stable llvm-cov --no-cfg-coverage --all-features --workspace --html --open 104 | 105 | cov-badge-cw-storage-plus: 106 | desc: Generates code coverage badge for cw-storage-plus 107 | cmds: 108 | - cmd: cargo +stable llvm-cov clean 109 | - cmd: cargo +stable llvm-cov -p cw-storage-plus --no-cfg-coverage --all-features --json --summary-only | coverio 110 | 111 | cov-badge-cw-storage-macro: 112 | desc: Generates code coverage badge for cw-storage-macro 113 | cmds: 114 | - cmd: cargo +stable llvm-cov clean 115 | - cmd: cargo +stable llvm-cov -p cw-storage-macro --no-cfg-coverage --all-features --json --summary-only | coverio 116 | 117 | fmt: 118 | desc: Runs code formatter 119 | cmds: 120 | - cmd: cargo +stable fmt 121 | 122 | doc: 123 | desc: Generates documentation 124 | cmds: 125 | - cmd: cargo +stable doc --no-deps 126 | 127 | doc-open: 128 | desc: Generates documentation and opens it in a browser 129 | cmds: 130 | - cmd: cargo +stable doc --no-deps --open 131 | 132 | test: 133 | desc: Runs multiple test variants 134 | cmds: 135 | - task: test-all-features 136 | - task: test-default-features 137 | - task: test-no-default-features 138 | 139 | test-all-features: 140 | desc: Runs all tests in debug mode with all features enabled 141 | cmds: 142 | - cmd: cargo +stable test --all-features --workspace 143 | 144 | test-default-features: 145 | desc: Runs all tests in debug mode with default features 146 | cmds: 147 | - cmd: cargo +stable test --workspace 148 | 149 | test-no-default-features: 150 | desc: Runs all tests in debug mode without default features 151 | cmds: 152 | - cmd: cargo +stable test --no-default-features --workspace 153 | 154 | testn: 155 | desc: Runs all tests in debug mode using nextest 156 | cmds: 157 | - task: testn-all-features 158 | - task: testn-default-features 159 | - task: testn-no-default-features 160 | 161 | testn-all-features: 162 | desc: Runs all tests in debug mode with all features enabled using nextest 163 | cmds: 164 | - cmd: cargo +stable nextest run --all-features --workspace 165 | 166 | testn-default-features: 167 | desc: Runs all tests in debug mode with default features using nextest 168 | cmds: 169 | - cmd: cargo +stable nextest run --workspace 170 | 171 | testn-no-default-features: 172 | desc: Runs all tests in debug mode without default features using nextest 173 | cmds: 174 | - cmd: cargo +stable nextest run --no-default-features --workspace 175 | 176 | hack: 177 | desc: Runs checks based on all feature combinations 178 | cmds: 179 | - task: hack-build 180 | - task: hack-clippy 181 | - task: hack-test 182 | 183 | hack-build: 184 | desc: Builds in debug mode using all feature combinations 185 | cmds: 186 | - cmd: cargo +stable hack build --feature-powerset --workspace 187 | 188 | hack-clippy: 189 | desc: Runs clippy using all feature combinations 190 | cmds: 191 | - cmd: cargo +stable hack clippy --feature-powerset --workspace 192 | 193 | hack-test: 194 | desc: Runs tests using all feature combinations 195 | cmds: 196 | - cmd: cargo +stable hack test --feature-powerset --workspace 197 | -------------------------------------------------------------------------------- /src/indexes/unique.rs: -------------------------------------------------------------------------------- 1 | // this module requires iterator to be useful at all 2 | #![cfg(feature = "iterator")] 3 | 4 | use std::marker::PhantomData; 5 | 6 | use serde::de::DeserializeOwned; 7 | use serde::{Deserialize, Serialize}; 8 | 9 | use cosmwasm_std::{from_json, Binary, Order, Record, StdError, StdResult, Storage}; 10 | 11 | use crate::bound::PrefixBound; 12 | use crate::de::KeyDeserialize; 13 | use crate::indexes::IndexPrefix; 14 | use crate::iter_helpers::deserialize_kv; 15 | use crate::map::Map; 16 | use crate::prefix::namespaced_prefix_range; 17 | use crate::{Bound, Index, Prefixer, PrimaryKey}; 18 | 19 | /// UniqueRef stores Binary(Vec[u8]) representation of private key and index value 20 | #[derive(Deserialize, Serialize)] 21 | pub(crate) struct UniqueRef { 22 | // note, we collapse the pk - combining everything under the namespace - even if it is composite 23 | pk: Binary, 24 | value: T, 25 | } 26 | 27 | /// UniqueIndex stores (namespace, index_name, idx_value) -> {key, value} 28 | /// Allows one value per index (i.e. unique) and copies pk and data 29 | /// The PK type defines the type of Primary Key deserialization. 30 | pub struct UniqueIndex<'a, IK, T, PK> { 31 | index: fn(&T) -> IK, 32 | idx_map: Map>, 33 | idx_namespace: &'a [u8], 34 | phantom: PhantomData, 35 | } 36 | 37 | impl UniqueIndex<'_, IK, T, PK> { 38 | /// Create a new UniqueIndex 39 | /// 40 | /// idx_fn - lambda creating index key from index value 41 | /// idx_namespace - prefix for the index value 42 | /// 43 | /// ## Example: 44 | /// 45 | /// ```rust 46 | /// use cw_storage_plus::UniqueIndex; 47 | /// 48 | /// struct Data { 49 | /// pub name: String, 50 | /// pub age: u32, 51 | /// } 52 | /// 53 | /// UniqueIndex::<_, _, ()>::new(|d: &Data| d.age, "data__age"); 54 | /// ``` 55 | pub const fn new(idx_fn: fn(&T) -> IK, idx_namespace: &'static str) -> Self { 56 | UniqueIndex { 57 | index: idx_fn, 58 | idx_map: Map::new(idx_namespace), 59 | idx_namespace: idx_namespace.as_bytes(), 60 | phantom: PhantomData, 61 | } 62 | } 63 | } 64 | 65 | impl<'a, IK, T, PK> Index for UniqueIndex<'a, IK, T, PK> 66 | where 67 | T: Serialize + DeserializeOwned + Clone, 68 | IK: PrimaryKey<'a>, 69 | { 70 | fn save(&self, store: &mut dyn Storage, pk: &[u8], data: &T) -> StdResult<()> { 71 | let idx = (self.index)(data); 72 | // error if this is already set 73 | self.idx_map 74 | .update(store, idx, |existing| -> StdResult<_> { 75 | match existing { 76 | Some(_) => Err(StdError::msg("Violates unique constraint on index")), 77 | None => Ok(UniqueRef:: { 78 | pk: pk.into(), 79 | value: data.clone(), 80 | }), 81 | } 82 | })?; 83 | Ok(()) 84 | } 85 | 86 | fn remove(&self, store: &mut dyn Storage, _pk: &[u8], old_data: &T) -> StdResult<()> { 87 | let idx = (self.index)(old_data); 88 | self.idx_map.remove(store, idx); 89 | Ok(()) 90 | } 91 | } 92 | 93 | fn deserialize_unique_v(kv: Record) -> StdResult> { 94 | let (_, v) = kv; 95 | let t = from_json::>(&v)?; 96 | Ok((t.pk.into(), t.value)) 97 | } 98 | 99 | fn deserialize_unique_kv( 100 | kv: Record, 101 | ) -> StdResult<(K::Output, T)> { 102 | let (_, v) = kv; 103 | let t = from_json::>(&v)?; 104 | Ok((K::from_vec(t.pk.into())?, t.value)) 105 | } 106 | 107 | impl<'a, IK, T, PK> UniqueIndex<'a, IK, T, PK> 108 | where 109 | T: Serialize + DeserializeOwned + Clone, 110 | IK: PrimaryKey<'a>, 111 | { 112 | pub fn index_key(&self, k: IK) -> Vec { 113 | k.joined_key() 114 | } 115 | 116 | fn no_prefix_raw(&self) -> IndexPrefix, T, IK> { 117 | IndexPrefix::with_deserialization_functions( 118 | self.idx_namespace, 119 | &[], 120 | &[], 121 | |_, _, kv| deserialize_unique_v(kv), 122 | |_, _, kv| deserialize_unique_v(kv), 123 | ) 124 | } 125 | 126 | /// returns all items that match this secondary index, always by pk Ascending 127 | pub fn item(&self, store: &dyn Storage, idx: IK) -> StdResult>> { 128 | let data = self 129 | .idx_map 130 | .may_load(store, idx)? 131 | .map(|i| (i.pk.into(), i.value)); 132 | Ok(data) 133 | } 134 | } 135 | 136 | // short-cut for simple keys, rather than .prefix(()).range_raw(...) 137 | impl<'a, IK, T, PK> UniqueIndex<'a, IK, T, PK> 138 | where 139 | T: Serialize + DeserializeOwned + Clone, 140 | IK: PrimaryKey<'a>, 141 | { 142 | // I would prefer not to copy code from Prefix, but no other way 143 | // with lifetimes (create Prefix inside function and return ref = no no) 144 | pub fn range_raw<'c>( 145 | &self, 146 | store: &'c dyn Storage, 147 | min: Option>, 148 | max: Option>, 149 | order: Order, 150 | ) -> Box>> + 'c> 151 | where 152 | T: 'c, 153 | { 154 | self.no_prefix_raw().range_raw(store, min, max, order) 155 | } 156 | 157 | pub fn keys_raw<'c>( 158 | &self, 159 | store: &'c dyn Storage, 160 | min: Option>, 161 | max: Option>, 162 | order: Order, 163 | ) -> Box> + 'c> { 164 | self.no_prefix_raw().keys_raw(store, min, max, order) 165 | } 166 | } 167 | 168 | #[cfg(feature = "iterator")] 169 | impl<'a, IK, T, PK> UniqueIndex<'a, IK, T, PK> 170 | where 171 | PK: PrimaryKey<'a> + KeyDeserialize, 172 | T: Serialize + DeserializeOwned + Clone, 173 | IK: PrimaryKey<'a>, 174 | { 175 | /// While `range` over a `prefix` fixes the prefix to one element and iterates over the 176 | /// remaining, `prefix_range` accepts bounds for the lowest and highest elements of the 177 | /// `Prefix` itself, and iterates over those (inclusively or exclusively, depending on 178 | /// `PrefixBound`). 179 | /// There are some issues that distinguish these two, and blindly casting to `Vec` doesn't 180 | /// solve them. 181 | pub fn prefix_range<'c>( 182 | &self, 183 | store: &'c dyn Storage, 184 | min: Option>, 185 | max: Option>, 186 | order: Order, 187 | ) -> Box> + 'c> 188 | where 189 | T: 'c, 190 | 'a: 'c, 191 | IK: 'c, 192 | PK: 'c, 193 | PK::Output: 'static, 194 | { 195 | let mapped = namespaced_prefix_range(store, self.idx_namespace, min, max, order) 196 | .map(deserialize_kv::); 197 | Box::new(mapped) 198 | } 199 | 200 | pub fn range<'c>( 201 | &self, 202 | store: &'c dyn Storage, 203 | min: Option>, 204 | max: Option>, 205 | order: Order, 206 | ) -> Box> + 'c> 207 | where 208 | T: 'c, 209 | PK::Output: 'static, 210 | { 211 | self.no_prefix().range(store, min, max, order) 212 | } 213 | 214 | pub fn keys<'c>( 215 | &self, 216 | store: &'c dyn Storage, 217 | min: Option>, 218 | max: Option>, 219 | order: Order, 220 | ) -> Box> + 'c> 221 | where 222 | T: 'c, 223 | PK::Output: 'static, 224 | { 225 | self.no_prefix().keys(store, min, max, order) 226 | } 227 | 228 | pub fn prefix(&self, p: IK::Prefix) -> IndexPrefix { 229 | IndexPrefix::with_deserialization_functions( 230 | self.idx_namespace, 231 | &p.prefix(), 232 | &[], 233 | |_, _, kv| deserialize_unique_kv::(kv), 234 | |_, _, kv| deserialize_unique_v(kv), 235 | ) 236 | } 237 | 238 | pub fn sub_prefix(&self, p: IK::SubPrefix) -> IndexPrefix { 239 | IndexPrefix::with_deserialization_functions( 240 | self.idx_namespace, 241 | &p.prefix(), 242 | &[], 243 | |_, _, kv| deserialize_unique_kv::(kv), 244 | |_, _, kv| deserialize_unique_v(kv), 245 | ) 246 | } 247 | 248 | fn no_prefix(&self) -> IndexPrefix { 249 | IndexPrefix::with_deserialization_functions( 250 | self.idx_namespace, 251 | &[], 252 | &[], 253 | |_, _, kv| deserialize_unique_kv::(kv), 254 | |_, _, kv| deserialize_unique_v(kv), 255 | ) 256 | } 257 | } 258 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /src/indexes/multi.rs: -------------------------------------------------------------------------------- 1 | // this module requires iterator to be useful at all 2 | #![cfg(feature = "iterator")] 3 | 4 | use cosmwasm_std::storage_keys::namespace_with_key; 5 | use serde::de::DeserializeOwned; 6 | use serde::Serialize; 7 | 8 | use cosmwasm_std::{from_json, Order, Record, StdError, StdResult, Storage}; 9 | 10 | use crate::bound::PrefixBound; 11 | use crate::de::KeyDeserialize; 12 | use crate::indexes::IndexPrefix; 13 | use crate::iter_helpers::deserialize_kv; 14 | use crate::map::Map; 15 | use crate::prefix::namespaced_prefix_range; 16 | use crate::{Bound, Index, Prefixer, PrimaryKey}; 17 | use std::marker::PhantomData; 18 | 19 | /// MultiIndex stores (namespace, index_name, idx_value, pk) -> b"pk_len". 20 | /// Allows many values per index, and references pk. 21 | /// The associated primary key value is stored in the main (pk_namespace) map, 22 | /// which stores (namespace, pk_namespace, pk) -> value. 23 | /// 24 | /// The stored pk_len is used to recover the pk from the index namespace, and perform 25 | /// the secondary load of the associated value from the main map. 26 | /// 27 | /// The PK type defines the type of Primary Key, both for deserialization, and 28 | /// more important, as the type-safe bound key type. 29 | /// This type must match the encompassing `IndexedMap` primary key type, 30 | /// or its owned variant. 31 | pub struct MultiIndex<'a, IK, T, PK> { 32 | index: fn(&[u8], &T) -> IK, 33 | idx_namespace: &'a [u8], 34 | // note, we collapse the ik - combining everything under the namespace - and concatenating the pk 35 | idx_map: Map, u32>, 36 | pk_namespace: &'a [u8], 37 | phantom: PhantomData, 38 | } 39 | 40 | impl<'a, IK, T, PK> MultiIndex<'a, IK, T, PK> 41 | where 42 | T: Serialize + DeserializeOwned + Clone, 43 | { 44 | /// Create a new MultiIndex 45 | /// 46 | /// idx_fn - lambda creating index key from value 47 | /// pk_namespace - prefix for the primary key 48 | /// idx_namespace - prefix for the index value 49 | /// 50 | /// ## Example: 51 | /// 52 | /// ```rust 53 | /// use cw_storage_plus::MultiIndex; 54 | /// use serde::{Deserialize, Serialize}; 55 | /// 56 | /// #[derive(Deserialize, Serialize, Clone)] 57 | /// struct Data { 58 | /// pub name: String, 59 | /// pub age: u32, 60 | /// } 61 | /// 62 | /// let index: MultiIndex<_, _, String> = MultiIndex::new( 63 | /// |_pk: &[u8], d: &Data| d.age, 64 | /// "age", 65 | /// "age__owner", 66 | /// ); 67 | /// ``` 68 | pub const fn new( 69 | idx_fn: fn(&[u8], &T) -> IK, 70 | pk_namespace: &'a str, 71 | idx_namespace: &'static str, 72 | ) -> Self { 73 | MultiIndex { 74 | index: idx_fn, 75 | idx_namespace: idx_namespace.as_bytes(), 76 | idx_map: Map::new(idx_namespace), 77 | pk_namespace: pk_namespace.as_bytes(), 78 | phantom: PhantomData, 79 | } 80 | } 81 | } 82 | 83 | fn deserialize_multi_v( 84 | store: &dyn Storage, 85 | pk_namespace: &[u8], 86 | kv: Record, 87 | ) -> StdResult> { 88 | let (key, pk_len) = kv; 89 | 90 | // Deserialize pk_len 91 | let pk_len = from_json::(pk_len.as_slice())?; 92 | 93 | // Recover pk from last part of k 94 | let offset = key.len() - pk_len as usize; 95 | let pk = &key[offset..]; 96 | 97 | let full_key = namespace_with_key(&[pk_namespace], pk); 98 | 99 | let v = store 100 | .get(&full_key) 101 | .ok_or_else(|| StdError::msg("pk not found"))?; 102 | let v = from_json::(&v)?; 103 | 104 | Ok((pk.to_vec(), v)) 105 | } 106 | 107 | fn deserialize_multi_kv( 108 | store: &dyn Storage, 109 | pk_namespace: &[u8], 110 | kv: Record, 111 | ) -> StdResult<(K::Output, T)> { 112 | let (key, pk_len) = kv; 113 | 114 | // Deserialize pk_len 115 | let pk_len = from_json::(pk_len.as_slice())?; 116 | 117 | // Recover pk from last part of k 118 | let offset = key.len() - pk_len as usize; 119 | let pk = &key[offset..]; 120 | 121 | let full_key = namespace_with_key(&[pk_namespace], pk); 122 | 123 | let v = store 124 | .get(&full_key) 125 | .ok_or_else(|| StdError::msg("pk not found"))?; 126 | let v = from_json::(&v)?; 127 | 128 | // We return deserialized `pk` here for consistency 129 | Ok((K::from_slice(pk)?, v)) 130 | } 131 | 132 | impl<'a, IK, T, PK> Index for MultiIndex<'a, IK, T, PK> 133 | where 134 | T: Serialize + DeserializeOwned + Clone, 135 | IK: PrimaryKey<'a>, 136 | { 137 | fn save(&self, store: &mut dyn Storage, pk: &[u8], data: &T) -> StdResult<()> { 138 | let idx = (self.index)(pk, data).joined_extra_key(pk); 139 | self.idx_map.save(store, idx, &(pk.len() as u32)) 140 | } 141 | 142 | fn remove(&self, store: &mut dyn Storage, pk: &[u8], old_data: &T) -> StdResult<()> { 143 | let idx = (self.index)(pk, old_data).joined_extra_key(pk); 144 | self.idx_map.remove(store, idx); 145 | Ok(()) 146 | } 147 | } 148 | 149 | impl<'a, IK, T, PK> MultiIndex<'a, IK, T, PK> 150 | where 151 | T: Serialize + DeserializeOwned + Clone, 152 | IK: PrimaryKey<'a> + Prefixer<'a>, 153 | { 154 | fn no_prefix_raw(&self) -> IndexPrefix, T, (IK, PK)> { 155 | IndexPrefix::with_deserialization_functions( 156 | self.idx_namespace, 157 | &[], 158 | self.pk_namespace, 159 | deserialize_multi_v, 160 | deserialize_multi_v, 161 | ) 162 | } 163 | } 164 | 165 | impl<'a, IK, T, PK> MultiIndex<'a, IK, T, PK> 166 | where 167 | PK: PrimaryKey<'a> + KeyDeserialize, 168 | T: Serialize + DeserializeOwned + Clone, 169 | IK: PrimaryKey<'a> + Prefixer<'a>, 170 | { 171 | pub fn index_key(&self, k: IK) -> Vec { 172 | k.joined_extra_key(b"") 173 | } 174 | 175 | #[cfg(test)] 176 | pub fn count(&self, store: &dyn Storage, p: IK) -> usize { 177 | let prefix = self.prefix(p); 178 | prefix.keys_raw(store, None, None, Order::Ascending).count() 179 | } 180 | 181 | #[cfg(test)] 182 | pub fn all_pks(&self, store: &dyn Storage, p: IK) -> Vec> { 183 | let prefix = self.prefix(p); 184 | prefix 185 | .keys_raw(store, None, None, Order::Ascending) 186 | .collect::>>() 187 | } 188 | 189 | #[cfg(test)] 190 | pub fn all_items(&self, store: &dyn Storage, p: IK) -> StdResult>> { 191 | let prefix = self.prefix(p); 192 | prefix 193 | .range_raw(store, None, None, Order::Ascending) 194 | .collect() 195 | } 196 | } 197 | 198 | // short-cut for simple keys, rather than .prefix(()).range_raw(...) 199 | impl<'a, IK, T, PK> MultiIndex<'a, IK, T, PK> 200 | where 201 | T: Serialize + DeserializeOwned + Clone, 202 | IK: PrimaryKey<'a> + Prefixer<'a> + KeyDeserialize, 203 | PK: PrimaryKey<'a> + KeyDeserialize, 204 | { 205 | // I would prefer not to copy code from Prefix, but no other way 206 | // with lifetimes (create Prefix inside function and return ref = no no) 207 | pub fn range_raw<'c>( 208 | &'c self, 209 | store: &'c dyn Storage, 210 | min: Option>, 211 | max: Option>, 212 | order: Order, 213 | ) -> Box>> + 'c> 214 | where 215 | T: 'c, 216 | { 217 | self.no_prefix_raw().range_raw(store, min, max, order) 218 | } 219 | 220 | pub fn keys_raw<'c>( 221 | &'c self, 222 | store: &'c dyn Storage, 223 | min: Option>, 224 | max: Option>, 225 | order: Order, 226 | ) -> Box> + 'c> { 227 | self.no_prefix_raw().keys_raw(store, min, max, order) 228 | } 229 | 230 | /// While `range_raw` over a `prefix` fixes the prefix to one element and iterates over the 231 | /// remaining, `prefix_range_raw` accepts bounds for the lowest and highest elements of the 232 | /// `Prefix` itself, and iterates over those (inclusively or exclusively, depending on 233 | /// `PrefixBound`). 234 | /// There are some issues that distinguish these two, and blindly casting to `Vec` doesn't 235 | /// solve them. 236 | pub fn prefix_range_raw<'c>( 237 | &'c self, 238 | store: &'c dyn Storage, 239 | min: Option>, 240 | max: Option>, 241 | order: cosmwasm_std::Order, 242 | ) -> Box>> + 'c> 243 | where 244 | T: 'c, 245 | 'a: 'c, 246 | { 247 | let mapped = namespaced_prefix_range(store, self.idx_namespace, min, max, order) 248 | .map(move |kv| (deserialize_multi_v)(store, self.pk_namespace, kv)); 249 | Box::new(mapped) 250 | } 251 | } 252 | 253 | #[cfg(feature = "iterator")] 254 | impl<'a, IK, T, PK> MultiIndex<'a, IK, T, PK> 255 | where 256 | PK: PrimaryKey<'a> + KeyDeserialize, 257 | T: Serialize + DeserializeOwned + Clone, 258 | IK: PrimaryKey<'a> + Prefixer<'a>, 259 | { 260 | pub fn prefix(&self, p: IK) -> IndexPrefix { 261 | IndexPrefix::with_deserialization_functions( 262 | self.idx_namespace, 263 | &p.prefix(), 264 | self.pk_namespace, 265 | deserialize_multi_kv::, 266 | deserialize_multi_v, 267 | ) 268 | } 269 | 270 | pub fn sub_prefix(&self, p: IK::Prefix) -> IndexPrefix { 271 | IndexPrefix::with_deserialization_functions( 272 | self.idx_namespace, 273 | &p.prefix(), 274 | self.pk_namespace, 275 | deserialize_multi_kv::, 276 | deserialize_multi_v, 277 | ) 278 | } 279 | } 280 | 281 | #[cfg(feature = "iterator")] 282 | impl<'a, IK, T, PK> MultiIndex<'a, IK, T, PK> 283 | where 284 | PK: PrimaryKey<'a> + KeyDeserialize, 285 | T: Serialize + DeserializeOwned + Clone, 286 | IK: PrimaryKey<'a> + KeyDeserialize + Prefixer<'a>, 287 | { 288 | /// While `range` over a `prefix` fixes the prefix to one element and iterates over the 289 | /// remaining, `prefix_range` accepts bounds for the lowest and highest elements of the 290 | /// `Prefix` itself, and iterates over those (inclusively or exclusively, depending on 291 | /// `PrefixBound`). 292 | /// There are some issues that distinguish these two, and blindly casting to `Vec` doesn't 293 | /// solve them. 294 | pub fn prefix_range<'c>( 295 | &self, 296 | store: &'c dyn Storage, 297 | min: Option>, 298 | max: Option>, 299 | order: cosmwasm_std::Order, 300 | ) -> Box> + 'c> 301 | where 302 | T: 'c, 303 | 'a: 'c, 304 | IK: 'c, 305 | PK: 'c, 306 | PK::Output: 'static, 307 | { 308 | let mapped = namespaced_prefix_range(store, self.idx_namespace, min, max, order) 309 | .map(deserialize_kv::); 310 | Box::new(mapped) 311 | } 312 | 313 | pub fn range<'c>( 314 | &self, 315 | store: &'c dyn Storage, 316 | min: Option>, 317 | max: Option>, 318 | order: cosmwasm_std::Order, 319 | ) -> Box> + 'c> 320 | where 321 | T: 'c, 322 | PK::Output: 'static, 323 | { 324 | self.no_prefix().range(store, min, max, order) 325 | } 326 | 327 | pub fn keys<'c>( 328 | &self, 329 | store: &'c dyn Storage, 330 | min: Option>, 331 | max: Option>, 332 | order: cosmwasm_std::Order, 333 | ) -> Box> + 'c> 334 | where 335 | T: 'c, 336 | PK::Output: 'static, 337 | { 338 | self.no_prefix().keys(store, min, max, order) 339 | } 340 | 341 | fn no_prefix(&self) -> IndexPrefix { 342 | IndexPrefix::with_deserialization_functions( 343 | self.idx_namespace, 344 | &[], 345 | self.pk_namespace, 346 | deserialize_multi_kv::, 347 | deserialize_multi_v, 348 | ) 349 | } 350 | } 351 | -------------------------------------------------------------------------------- /src/item.rs: -------------------------------------------------------------------------------- 1 | use serde::de::DeserializeOwned; 2 | use serde::Serialize; 3 | use std::marker::PhantomData; 4 | 5 | use cosmwasm_std::{ 6 | from_json, to_json_vec, Addr, CustomQuery, QuerierWrapper, StdError, StdResult, Storage, 7 | WasmQuery, 8 | }; 9 | 10 | use crate::{helpers::not_found_object_info, namespace::Namespace}; 11 | 12 | /// Item stores one typed item at the given key. 13 | /// This is an analog of Singleton. 14 | /// It functions the same way as Path does but doesn't use a Vec and thus has a const fn constructor. 15 | pub struct Item { 16 | // this is full key - no need to length-prefix it, we only store one item 17 | storage_key: Namespace, 18 | // see https://doc.rust-lang.org/std/marker/struct.PhantomData.html#unused-type-parameters for why this is needed 19 | data_type: PhantomData, 20 | } 21 | 22 | impl Item { 23 | /// Creates a new [`Item`] with the given storage key. This is a const fn only suitable 24 | /// when you have a static string slice. 25 | pub const fn new(storage_key: &'static str) -> Self { 26 | Item { 27 | storage_key: Namespace::from_static_str(storage_key), 28 | data_type: PhantomData, 29 | } 30 | } 31 | 32 | /// Creates a new [`Item`] with the given storage key. Use this if you might need to handle 33 | /// a dynamic string. Otherwise, you might prefer [`Item::new`]. 34 | pub fn new_dyn(storage_key: impl Into) -> Self { 35 | Item { 36 | storage_key: storage_key.into(), 37 | data_type: PhantomData, 38 | } 39 | } 40 | } 41 | 42 | impl Item 43 | where 44 | T: Serialize + DeserializeOwned, 45 | { 46 | // this gets the path of the data to use elsewhere 47 | pub fn as_slice(&self) -> &[u8] { 48 | self.storage_key.as_slice() 49 | } 50 | 51 | /// save will serialize the model and store, returns an error on serialization issues 52 | pub fn save(&self, store: &mut dyn Storage, data: &T) -> StdResult<()> { 53 | store.set(self.storage_key.as_slice(), &to_json_vec(data)?); 54 | Ok(()) 55 | } 56 | 57 | pub fn remove(&self, store: &mut dyn Storage) { 58 | store.remove(self.storage_key.as_slice()); 59 | } 60 | 61 | /// load will return an error if no data is set at the given key, or on parse error 62 | pub fn load(&self, store: &dyn Storage) -> StdResult { 63 | if let Some(value) = store.get(self.storage_key.as_slice()) { 64 | from_json(value) 65 | } else { 66 | let object_info = not_found_object_info::(self.storage_key.as_slice()); 67 | Err(StdError::msg(format!("{object_info} not found"))) 68 | } 69 | } 70 | 71 | /// may_load will parse the data stored at the key if present, returns `Ok(None)` if no data there. 72 | /// returns an error on issues parsing 73 | pub fn may_load(&self, store: &dyn Storage) -> StdResult> { 74 | let value = store.get(self.storage_key.as_slice()); 75 | value.map(|v| from_json(v)).transpose() 76 | } 77 | 78 | /// Returns `true` if data is stored at the key, `false` otherwise. 79 | pub fn exists(&self, store: &dyn Storage) -> bool { 80 | store.get(self.storage_key.as_slice()).is_some() 81 | } 82 | 83 | /// Loads the data, perform the specified action, and store the result 84 | /// in the database. This is shorthand for some common sequences, which may be useful. 85 | /// 86 | /// It assumes, that data was initialized before, and if it doesn't exist, `Err(StdError::NotFound)` 87 | /// is returned. 88 | pub fn update(&self, store: &mut dyn Storage, action: A) -> Result 89 | where 90 | A: FnOnce(T) -> Result, 91 | E: From, 92 | { 93 | let input = self.load(store)?; 94 | let output = action(input)?; 95 | self.save(store, &output)?; 96 | Ok(output) 97 | } 98 | 99 | /// If you import the proper Item from the remote contract, this will let you read the data 100 | /// from a remote contract in a type-safe way using WasmQuery::RawQuery. 101 | /// 102 | /// Note that we expect an Item to be set, and error if there is no data there 103 | pub fn query( 104 | &self, 105 | querier: &QuerierWrapper, 106 | remote_contract: Addr, 107 | ) -> StdResult { 108 | let request = WasmQuery::Raw { 109 | contract_addr: remote_contract.into(), 110 | key: (self.storage_key.as_slice()).into(), 111 | }; 112 | querier.query(&request.into()) 113 | } 114 | } 115 | 116 | #[cfg(test)] 117 | mod test { 118 | use super::*; 119 | use cosmwasm_std::testing::MockStorage; 120 | use cosmwasm_std::{to_json_vec, StdError}; 121 | use serde::{Deserialize, Serialize}; 122 | 123 | #[derive(Serialize, Deserialize, PartialEq, Debug)] 124 | struct Config { 125 | pub owner: String, 126 | pub max_tokens: i32, 127 | } 128 | 129 | // note const constructor rather than 2 funcs with Singleton 130 | const CONFIG: Item = Item::new("config"); 131 | 132 | #[test] 133 | fn save_and_load() { 134 | let mut store = MockStorage::new(); 135 | 136 | assert!(CONFIG.load(&store).is_err()); 137 | assert_eq!(CONFIG.may_load(&store).unwrap(), None); 138 | 139 | let cfg = Config { 140 | owner: "admin".to_string(), 141 | max_tokens: 1234, 142 | }; 143 | CONFIG.save(&mut store, &cfg).unwrap(); 144 | 145 | assert_eq!(cfg, CONFIG.load(&store).unwrap()); 146 | } 147 | 148 | #[test] 149 | fn owned_key_works() { 150 | let mut store = MockStorage::new(); 151 | 152 | for i in 0..3 { 153 | let key = format!("key{i}"); 154 | let item = Item::new_dyn(key); 155 | item.save(&mut store, &i).unwrap(); 156 | } 157 | 158 | assert_eq!(store.get(b"key0").unwrap(), b"0"); 159 | assert_eq!(store.get(b"key1").unwrap(), b"1"); 160 | assert_eq!(store.get(b"key2").unwrap(), b"2"); 161 | } 162 | 163 | #[test] 164 | fn exists_works() { 165 | let mut store = MockStorage::new(); 166 | 167 | assert!(!CONFIG.exists(&store)); 168 | 169 | let cfg = Config { 170 | owner: "admin".to_string(), 171 | max_tokens: 1234, 172 | }; 173 | CONFIG.save(&mut store, &cfg).unwrap(); 174 | 175 | assert!(CONFIG.exists(&store)); 176 | 177 | const OPTIONAL: Item> = Item::new("optional"); 178 | 179 | assert!(!OPTIONAL.exists(&store)); 180 | 181 | OPTIONAL.save(&mut store, &None).unwrap(); 182 | 183 | assert!(OPTIONAL.exists(&store)); 184 | } 185 | 186 | #[test] 187 | fn remove_works() { 188 | let mut store = MockStorage::new(); 189 | 190 | // store data 191 | let cfg = Config { 192 | owner: "admin".to_string(), 193 | max_tokens: 1234, 194 | }; 195 | CONFIG.save(&mut store, &cfg).unwrap(); 196 | assert_eq!(cfg, CONFIG.load(&store).unwrap()); 197 | 198 | // remove it and loads None 199 | CONFIG.remove(&mut store); 200 | assert!(!CONFIG.exists(&store)); 201 | 202 | // safe to remove 2 times 203 | CONFIG.remove(&mut store); 204 | assert!(!CONFIG.exists(&store)); 205 | } 206 | 207 | #[test] 208 | fn isolated_reads() { 209 | let mut store = MockStorage::new(); 210 | 211 | let cfg = Config { 212 | owner: "admin".to_string(), 213 | max_tokens: 1234, 214 | }; 215 | CONFIG.save(&mut store, &cfg).unwrap(); 216 | 217 | let reader = Item::::new("config"); 218 | assert_eq!(cfg, reader.load(&store).unwrap()); 219 | 220 | let other_reader = Item::::new("config2"); 221 | assert_eq!(other_reader.may_load(&store).unwrap(), None); 222 | } 223 | 224 | #[test] 225 | fn update_success() { 226 | let mut store = MockStorage::new(); 227 | 228 | let cfg = Config { 229 | owner: "admin".to_string(), 230 | max_tokens: 1234, 231 | }; 232 | CONFIG.save(&mut store, &cfg).unwrap(); 233 | 234 | let output = CONFIG.update(&mut store, |mut c| -> StdResult<_> { 235 | c.max_tokens *= 2; 236 | Ok(c) 237 | }); 238 | let expected = Config { 239 | owner: "admin".to_string(), 240 | max_tokens: 2468, 241 | }; 242 | assert_eq!(output.unwrap(), expected); 243 | assert_eq!(CONFIG.load(&store).unwrap(), expected); 244 | } 245 | 246 | #[test] 247 | fn update_can_change_variable_from_outer_scope() { 248 | let mut store = MockStorage::new(); 249 | let cfg = Config { 250 | owner: "admin".to_string(), 251 | max_tokens: 1234, 252 | }; 253 | CONFIG.save(&mut store, &cfg).unwrap(); 254 | 255 | let mut old_max_tokens = 0i32; 256 | CONFIG 257 | .update(&mut store, |mut c| -> StdResult<_> { 258 | old_max_tokens = c.max_tokens; 259 | c.max_tokens *= 2; 260 | Ok(c) 261 | }) 262 | .unwrap(); 263 | assert_eq!(old_max_tokens, 1234); 264 | } 265 | 266 | #[test] 267 | fn update_does_not_change_data_on_error() { 268 | let mut store = MockStorage::new(); 269 | 270 | let cfg = Config { 271 | owner: "admin".to_string(), 272 | max_tokens: 1234, 273 | }; 274 | CONFIG.save(&mut store, &cfg).unwrap(); 275 | 276 | let output = CONFIG.update(&mut store, |_c| Err(StdError::msg("overflow"))); 277 | assert_eq!( 278 | "kind: Other, error: overflow", 279 | output.unwrap_err().to_string() 280 | ); 281 | assert_eq!(CONFIG.load(&store).unwrap(), cfg); 282 | } 283 | 284 | #[test] 285 | fn update_supports_custom_errors() { 286 | #[derive(Debug)] 287 | enum MyError { 288 | Std(StdError), 289 | Foo, 290 | } 291 | 292 | impl From for MyError { 293 | fn from(original: StdError) -> MyError { 294 | MyError::Std(original) 295 | } 296 | } 297 | 298 | let mut store = MockStorage::new(); 299 | 300 | let cfg = Config { 301 | owner: "admin".to_string(), 302 | max_tokens: 1234, 303 | }; 304 | CONFIG.save(&mut store, &cfg).unwrap(); 305 | 306 | let res = CONFIG.update(&mut store, |mut c| { 307 | if c.max_tokens > 5000 { 308 | return Err(MyError::Foo); 309 | } 310 | if c.max_tokens > 20 { 311 | return Err(StdError::msg("broken stuff").into()); // Uses Into to convert StdError to MyError 312 | } 313 | if c.max_tokens > 10 { 314 | to_json_vec(&c)?; // Uses From to convert StdError to MyError 315 | } 316 | c.max_tokens += 20; 317 | Ok(c) 318 | }); 319 | 320 | match res.unwrap_err() { 321 | MyError::Std(std) if std.to_string() == "kind: Other, error: broken stuff" => {} 322 | err => panic!("Unexpected error: {err:?}"), 323 | } 324 | assert_eq!(CONFIG.load(&store).unwrap(), cfg); 325 | } 326 | 327 | #[test] 328 | fn readme_works() -> StdResult<()> { 329 | let mut store = MockStorage::new(); 330 | 331 | // may_load returns Option, so None if data is missing 332 | // load returns T and Err(StdError::NotFound{}) if data is missing 333 | let empty = CONFIG.may_load(&store)?; 334 | assert_eq!(None, empty); 335 | let cfg = Config { 336 | owner: "admin".to_string(), 337 | max_tokens: 1234, 338 | }; 339 | CONFIG.save(&mut store, &cfg)?; 340 | let loaded = CONFIG.load(&store)?; 341 | assert_eq!(cfg, loaded); 342 | 343 | // update an item with a closure (includes read and write) 344 | // returns the newly saved value 345 | let output = CONFIG.update(&mut store, |mut c| -> StdResult<_> { 346 | c.max_tokens *= 2; 347 | Ok(c) 348 | })?; 349 | assert_eq!(2468, output.max_tokens); 350 | 351 | // you can error in an update and nothing is saved 352 | let failed = CONFIG.update(&mut store, |_| -> StdResult<_> { 353 | Err(StdError::msg("failure mode")) 354 | }); 355 | assert!(failed.is_err()); 356 | 357 | // loading data will show the first update was saved 358 | let loaded = CONFIG.load(&store)?; 359 | let expected = Config { 360 | owner: "admin".to_string(), 361 | max_tokens: 2468, 362 | }; 363 | assert_eq!(expected, loaded); 364 | 365 | // we can remove data as well 366 | CONFIG.remove(&mut store); 367 | let empty = CONFIG.may_load(&store)?; 368 | assert_eq!(None, empty); 369 | 370 | Ok(()) 371 | } 372 | } 373 | -------------------------------------------------------------------------------- /src/de.rs: -------------------------------------------------------------------------------- 1 | use std::array::TryFromSliceError; 2 | use std::convert::TryInto; 3 | 4 | use cosmwasm_std::{Addr, Int128, Int64, StdError, StdResult, Uint128, Uint64}; 5 | 6 | use crate::int_key::IntKey; 7 | 8 | pub trait KeyDeserialize { 9 | type Output: Sized; 10 | 11 | /// The number of key elements is used for the deserialization of compound keys. 12 | /// It should be equal to PrimaryKey::key().len() 13 | const KEY_ELEMS: u16; 14 | 15 | fn from_vec(value: Vec) -> StdResult; 16 | 17 | fn from_slice(value: &[u8]) -> StdResult { 18 | Self::from_vec(value.to_vec()) 19 | } 20 | } 21 | 22 | impl KeyDeserialize for () { 23 | type Output = (); 24 | 25 | const KEY_ELEMS: u16 = 0; 26 | 27 | #[inline(always)] 28 | fn from_vec(_value: Vec) -> StdResult { 29 | Ok(()) 30 | } 31 | } 32 | 33 | impl KeyDeserialize for Vec { 34 | type Output = Vec; 35 | 36 | const KEY_ELEMS: u16 = 1; 37 | 38 | #[inline(always)] 39 | fn from_vec(value: Vec) -> StdResult { 40 | Ok(value) 41 | } 42 | } 43 | 44 | impl KeyDeserialize for &Vec { 45 | type Output = Vec; 46 | 47 | const KEY_ELEMS: u16 = 1; 48 | 49 | #[inline(always)] 50 | fn from_vec(value: Vec) -> StdResult { 51 | Ok(value) 52 | } 53 | } 54 | 55 | impl KeyDeserialize for &[u8] { 56 | type Output = Vec; 57 | 58 | const KEY_ELEMS: u16 = 1; 59 | 60 | #[inline(always)] 61 | fn from_vec(value: Vec) -> StdResult { 62 | Ok(value) 63 | } 64 | } 65 | 66 | impl KeyDeserialize for [u8; N] { 67 | type Output = [u8; N]; 68 | 69 | const KEY_ELEMS: u16 = 1; 70 | 71 | #[inline(always)] 72 | fn from_vec(value: Vec) -> StdResult { 73 | <[u8; N]>::try_from(value).map_err(|v: Vec<_>| { 74 | StdError::msg(format!( 75 | "invalid_data_size: expected {}, actual {}", 76 | N, 77 | v.len() 78 | )) 79 | }) 80 | } 81 | } 82 | 83 | impl KeyDeserialize for &[u8; N] { 84 | type Output = [u8; N]; 85 | 86 | const KEY_ELEMS: u16 = 1; 87 | 88 | #[inline(always)] 89 | fn from_vec(value: Vec) -> StdResult { 90 | <[u8; N]>::from_vec(value) 91 | } 92 | } 93 | 94 | impl KeyDeserialize for String { 95 | type Output = String; 96 | 97 | const KEY_ELEMS: u16 = 1; 98 | 99 | #[inline(always)] 100 | fn from_vec(value: Vec) -> StdResult { 101 | Ok(String::from_utf8(value)?) 102 | } 103 | } 104 | 105 | impl KeyDeserialize for &String { 106 | type Output = String; 107 | 108 | const KEY_ELEMS: u16 = 1; 109 | 110 | #[inline(always)] 111 | fn from_vec(value: Vec) -> StdResult { 112 | Self::Output::from_vec(value) 113 | } 114 | } 115 | 116 | impl KeyDeserialize for &str { 117 | type Output = String; 118 | 119 | const KEY_ELEMS: u16 = 1; 120 | 121 | #[inline(always)] 122 | fn from_vec(value: Vec) -> StdResult { 123 | Self::Output::from_vec(value) 124 | } 125 | } 126 | 127 | impl KeyDeserialize for Addr { 128 | type Output = Addr; 129 | 130 | const KEY_ELEMS: u16 = 1; 131 | 132 | #[inline(always)] 133 | fn from_vec(value: Vec) -> StdResult { 134 | Ok(Addr::unchecked(String::from_vec(value)?)) 135 | } 136 | } 137 | 138 | impl KeyDeserialize for &Addr { 139 | type Output = Addr; 140 | 141 | const KEY_ELEMS: u16 = 1; 142 | 143 | #[inline(always)] 144 | fn from_vec(value: Vec) -> StdResult { 145 | Self::Output::from_vec(value) 146 | } 147 | } 148 | 149 | macro_rules! integer_de { 150 | (for $($t:ty),+) => { 151 | $(impl KeyDeserialize for $t { 152 | type Output = $t; 153 | 154 | const KEY_ELEMS: u16 = 1; 155 | 156 | #[inline(always)] 157 | fn from_vec(value: Vec) -> StdResult { 158 | Ok(<$t>::from_cw_bytes(value.as_slice().try_into() 159 | .map_err(|err: TryFromSliceError| StdError::msg(err.to_string()))?)) 160 | } 161 | })* 162 | } 163 | } 164 | 165 | integer_de!(for i8, u8, i16, u16, i32, u32, i64, u64, i128, u128, Uint64, Uint128, Int64, Int128); 166 | 167 | fn parse_length(value: &[u8]) -> StdResult { 168 | Ok(u16::from_be_bytes( 169 | value 170 | .try_into() 171 | .map_err(|_| StdError::msg("Could not read 2 byte length"))?, 172 | ) 173 | .into()) 174 | } 175 | 176 | /// Splits the first key from the value based on the provided number of key elements. 177 | /// The return value is ordered as (first_key, remainder). 178 | /// 179 | fn split_first_key(key_elems: u16, value: &[u8]) -> StdResult<(Vec, &[u8])> { 180 | let mut index = 0; 181 | let mut first_key = Vec::new(); 182 | 183 | // Iterate over the sub keys 184 | for i in 0..key_elems { 185 | let len_slice = &value[index..index + 2]; 186 | index += 2; 187 | let is_last_key = i == key_elems - 1; 188 | 189 | if !is_last_key { 190 | first_key.extend_from_slice(len_slice); 191 | } 192 | 193 | let subkey_len = parse_length(len_slice)?; 194 | first_key.extend_from_slice(&value[index..index + subkey_len]); 195 | index += subkey_len; 196 | } 197 | 198 | let remainder = &value[index..]; 199 | Ok((first_key, remainder)) 200 | } 201 | 202 | impl KeyDeserialize for (T, U) { 203 | type Output = (T::Output, U::Output); 204 | 205 | const KEY_ELEMS: u16 = T::KEY_ELEMS + U::KEY_ELEMS; 206 | 207 | #[inline(always)] 208 | fn from_vec(value: Vec) -> StdResult { 209 | let (t, u) = split_first_key(T::KEY_ELEMS, value.as_ref())?; 210 | Ok((T::from_vec(t)?, U::from_vec(u.to_vec())?)) 211 | } 212 | } 213 | 214 | impl KeyDeserialize for (T, U, V) { 215 | type Output = (T::Output, U::Output, V::Output); 216 | 217 | const KEY_ELEMS: u16 = T::KEY_ELEMS + U::KEY_ELEMS + V::KEY_ELEMS; 218 | 219 | #[inline(always)] 220 | fn from_vec(value: Vec) -> StdResult { 221 | let (t, remainder) = split_first_key(T::KEY_ELEMS, value.as_ref())?; 222 | let (u, v) = split_first_key(U::KEY_ELEMS, remainder)?; 223 | Ok((T::from_vec(t)?, U::from_vec(u)?, V::from_vec(v.to_vec())?)) 224 | } 225 | } 226 | 227 | #[cfg(test)] 228 | mod test { 229 | use super::*; 230 | use crate::PrimaryKey; 231 | 232 | const BYTES: &[u8] = b"Hello"; 233 | const STRING: &str = "Hello"; 234 | 235 | #[test] 236 | #[allow(clippy::unit_cmp)] 237 | fn deserialize_empty_works() { 238 | assert_eq!(<()>::from_slice(BYTES).unwrap(), ()); 239 | } 240 | 241 | #[test] 242 | fn deserialize_bytes_works() { 243 | assert_eq!(>::from_slice(BYTES).unwrap(), BYTES); 244 | assert_eq!(<&Vec>::from_slice(BYTES).unwrap(), BYTES); 245 | assert_eq!(<&[u8]>::from_slice(BYTES).unwrap(), BYTES); 246 | assert_eq!(<[u8; 5]>::from_slice(BYTES).unwrap(), BYTES); 247 | assert_eq!(<&[u8; 5]>::from_slice(BYTES).unwrap(), BYTES); 248 | } 249 | 250 | #[test] 251 | fn deserialize_string_works() { 252 | assert_eq!(::from_slice(BYTES).unwrap(), STRING); 253 | assert_eq!(<&String>::from_slice(BYTES).unwrap(), STRING); 254 | assert_eq!(<&str>::from_slice(BYTES).unwrap(), STRING); 255 | } 256 | 257 | #[test] 258 | fn deserialize_broken_string_errs() { 259 | assert_eq!( 260 | "kind: Parsing, error: incomplete utf-8 byte sequence from index 0", 261 | ::from_slice(b"\xc3").unwrap_err().to_string() 262 | ); 263 | } 264 | 265 | #[test] 266 | fn deserialize_addr_works() { 267 | assert_eq!(::from_slice(BYTES).unwrap(), Addr::unchecked(STRING)); 268 | assert_eq!(<&Addr>::from_slice(BYTES).unwrap(), Addr::unchecked(STRING)); 269 | } 270 | 271 | #[test] 272 | fn deserialize_broken_addr_errs() { 273 | assert_eq!( 274 | "kind: Parsing, error: incomplete utf-8 byte sequence from index 0", 275 | ::from_slice(b"\xc3").unwrap_err().to_string() 276 | ); 277 | } 278 | 279 | #[test] 280 | fn deserialize_naked_integer_works() { 281 | assert_eq!(u8::from_slice(&[1]).unwrap(), 1u8); 282 | assert_eq!(i8::from_slice(&[127]).unwrap(), -1i8); 283 | assert_eq!(i8::from_slice(&[128]).unwrap(), 0i8); 284 | 285 | assert_eq!(u16::from_slice(&[1, 0]).unwrap(), 256u16); 286 | assert_eq!(i16::from_slice(&[128, 0]).unwrap(), 0i16); 287 | assert_eq!(i16::from_slice(&[127, 255]).unwrap(), -1i16); 288 | 289 | assert_eq!(u32::from_slice(&[1, 0, 0, 0]).unwrap(), 16777216u32); 290 | assert_eq!(i32::from_slice(&[128, 0, 0, 0]).unwrap(), 0i32); 291 | assert_eq!(i32::from_slice(&[127, 255, 255, 255]).unwrap(), -1i32); 292 | 293 | assert_eq!( 294 | u64::from_slice(&[1, 0, 0, 0, 0, 0, 0, 0]).unwrap(), 295 | 72057594037927936u64 296 | ); 297 | assert_eq!(i64::from_slice(&[128, 0, 0, 0, 0, 0, 0, 0]).unwrap(), 0i64); 298 | assert_eq!( 299 | i64::from_slice(&[127, 255, 255, 255, 255, 255, 255, 255]).unwrap(), 300 | -1i64 301 | ); 302 | 303 | assert_eq!( 304 | u128::from_slice(&[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]).unwrap(), 305 | 1329227995784915872903807060280344576u128 306 | ); 307 | assert_eq!( 308 | i128::from_slice(&[128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]).unwrap(), 309 | 0i128 310 | ); 311 | assert_eq!( 312 | i128::from_slice(&[ 313 | 127, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 314 | ]) 315 | .unwrap(), 316 | -1i128 317 | ); 318 | assert_eq!( 319 | i128::from_slice(&[ 320 | 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 321 | ]) 322 | .unwrap(), 323 | 170141183460469231731687303715884105727i128, 324 | ); 325 | } 326 | 327 | #[test] 328 | fn deserialize_tuple_works() { 329 | assert_eq!( 330 | <(&[u8], &str)>::from_slice((BYTES, STRING).joined_key().as_slice()).unwrap(), 331 | (BYTES.to_vec(), STRING.to_string()) 332 | ); 333 | } 334 | 335 | #[test] 336 | fn deserialize_tuple_of_tuples_works() { 337 | assert_eq!( 338 | <((&[u8], &str), (&[u8], &str))>::from_slice( 339 | ((BYTES, STRING), (BYTES, STRING)).joined_key().as_slice() 340 | ) 341 | .unwrap(), 342 | ( 343 | (BYTES.to_vec(), STRING.to_string()), 344 | (BYTES.to_vec(), STRING.to_string()) 345 | ) 346 | ); 347 | } 348 | 349 | #[test] 350 | fn deserialize_tuple_of_triples_works() { 351 | assert_eq!( 352 | <((&[u8], &str, u32), (&[u8], &str, u16))>::from_slice( 353 | ((BYTES, STRING, 1234u32), (BYTES, STRING, 567u16)) 354 | .joined_key() 355 | .as_slice() 356 | ) 357 | .unwrap(), 358 | ( 359 | (BYTES.to_vec(), STRING.to_string(), 1234), 360 | (BYTES.to_vec(), STRING.to_string(), 567) 361 | ) 362 | ); 363 | } 364 | 365 | #[test] 366 | fn deserialize_triple_of_tuples_works() { 367 | assert_eq!( 368 | <((u32, &str), (&str, &[u8]), (i32, i32))>::from_slice( 369 | ((1234u32, STRING), (STRING, BYTES), (1234i32, 567i32)) 370 | .joined_key() 371 | .as_slice() 372 | ) 373 | .unwrap(), 374 | ( 375 | (1234, STRING.to_string()), 376 | (STRING.to_string(), BYTES.to_vec()), 377 | (1234, 567) 378 | ) 379 | ); 380 | } 381 | 382 | #[test] 383 | fn deserialize_triple_of_triples_works() { 384 | assert_eq!( 385 | <((u32, &str, &str), (&str, &[u8], u8), (i32, u8, i32))>::from_slice( 386 | ( 387 | (1234u32, STRING, STRING), 388 | (STRING, BYTES, 123u8), 389 | (4567i32, 89u8, 10i32) 390 | ) 391 | .joined_key() 392 | .as_slice() 393 | ) 394 | .unwrap(), 395 | ( 396 | (1234, STRING.to_string(), STRING.to_string()), 397 | (STRING.to_string(), BYTES.to_vec(), 123), 398 | (4567, 89, 10) 399 | ) 400 | ); 401 | } 402 | 403 | #[test] 404 | fn deserialize_triple_works() { 405 | assert_eq!( 406 | <(&[u8], u32, &str)>::from_slice((BYTES, 1234u32, STRING).joined_key().as_slice()) 407 | .unwrap(), 408 | (BYTES.to_vec(), 1234, STRING.to_string()) 409 | ); 410 | } 411 | } 412 | -------------------------------------------------------------------------------- /src/indexes/prefix.rs: -------------------------------------------------------------------------------- 1 | #![cfg(feature = "iterator")] 2 | use core::fmt; 3 | use serde::de::DeserializeOwned; 4 | use serde::Serialize; 5 | use std::fmt::Debug; 6 | 7 | use cosmwasm_std::{Order, Record, StdResult, Storage}; 8 | use std::ops::Deref; 9 | 10 | use crate::de::KeyDeserialize; 11 | use crate::iter_helpers::{deserialize_kv, deserialize_v}; 12 | use crate::keys::Key; 13 | use crate::{Bound, PrimaryKey}; 14 | 15 | type DeserializeVFn = fn(&dyn Storage, &[u8], Record) -> StdResult>; 16 | 17 | type DeserializeKvFn = 18 | fn(&dyn Storage, &[u8], Record) -> StdResult<(::Output, T)>; 19 | 20 | pub fn default_deserializer_v( 21 | _: &dyn Storage, 22 | _: &[u8], 23 | raw: Record, 24 | ) -> StdResult> { 25 | deserialize_v(raw) 26 | } 27 | 28 | pub fn default_deserializer_kv( 29 | _: &dyn Storage, 30 | _: &[u8], 31 | raw: Record, 32 | ) -> StdResult<(K::Output, T)> { 33 | deserialize_kv::(raw) 34 | } 35 | 36 | #[derive(Clone)] 37 | pub struct IndexPrefix> 38 | where 39 | K: KeyDeserialize, 40 | T: Serialize + DeserializeOwned, 41 | { 42 | inner: crate::prefix::Prefix, 43 | pk_name: Vec, 44 | de_fn_kv: DeserializeKvFn, 45 | de_fn_v: DeserializeVFn, 46 | } 47 | 48 | impl Debug for IndexPrefix 49 | where 50 | K: KeyDeserialize, 51 | T: Serialize + DeserializeOwned, 52 | { 53 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 54 | f.debug_struct("IndexPrefix") 55 | .field("storage_prefix", &self.inner.storage_prefix) 56 | .field("pk_name", &self.pk_name) 57 | .finish_non_exhaustive() 58 | } 59 | } 60 | 61 | impl Deref for IndexPrefix 62 | where 63 | K: KeyDeserialize, 64 | T: Serialize + DeserializeOwned, 65 | { 66 | type Target = [u8]; 67 | 68 | fn deref(&self) -> &[u8] { 69 | &self.inner.storage_prefix 70 | } 71 | } 72 | 73 | impl IndexPrefix 74 | where 75 | K: KeyDeserialize, 76 | T: Serialize + DeserializeOwned, 77 | { 78 | pub fn new(top_name: &[u8], sub_names: &[Key]) -> Self { 79 | IndexPrefix::with_deserialization_functions( 80 | top_name, 81 | sub_names, 82 | &[], 83 | default_deserializer_kv::, 84 | default_deserializer_v, 85 | ) 86 | } 87 | 88 | pub fn with_deserialization_functions( 89 | top_name: &[u8], 90 | sub_names: &[Key], 91 | pk_name: &[u8], 92 | de_fn_kv: DeserializeKvFn, 93 | de_fn_v: DeserializeVFn, 94 | ) -> Self { 95 | IndexPrefix { 96 | inner: crate::prefix::Prefix::new(top_name, sub_names), 97 | pk_name: pk_name.to_vec(), 98 | de_fn_kv, 99 | de_fn_v, 100 | } 101 | } 102 | } 103 | 104 | impl<'b, K, T, B> IndexPrefix 105 | where 106 | B: PrimaryKey<'b>, 107 | K: KeyDeserialize, 108 | T: Serialize + DeserializeOwned, 109 | { 110 | pub fn range_raw<'a>( 111 | &self, 112 | store: &'a dyn Storage, 113 | min: Option>, 114 | max: Option>, 115 | order: Order, 116 | ) -> Box>> + 'a> 117 | where 118 | T: 'a, 119 | { 120 | let de_fn = self.de_fn_v; 121 | let pk_name = self.pk_name.clone(); 122 | let mapped = crate::prefix::range_with_prefix( 123 | store, 124 | &self.inner.storage_prefix, 125 | min.map(|b| b.to_raw_bound()), 126 | max.map(|b| b.to_raw_bound()), 127 | order, 128 | ) 129 | .map(move |kv| (de_fn)(store, &pk_name, kv)); 130 | Box::new(mapped) 131 | } 132 | 133 | pub fn keys_raw<'a>( 134 | &self, 135 | store: &'a dyn Storage, 136 | min: Option>, 137 | max: Option>, 138 | order: Order, 139 | ) -> Box> + 'a> { 140 | crate::prefix::keys_with_prefix( 141 | store, 142 | &self.inner.storage_prefix, 143 | min.map(|b| b.to_raw_bound()), 144 | max.map(|b| b.to_raw_bound()), 145 | order, 146 | ) 147 | } 148 | 149 | /// Clears the prefix, removing the first `limit` elements (or all if `limit == None`). 150 | pub fn clear(&self, store: &mut dyn Storage, limit: Option) { 151 | self.inner.clear(store, limit); 152 | } 153 | 154 | /// Returns `true` if the prefix is empty. 155 | pub fn is_empty(&self, store: &dyn Storage) -> bool { 156 | crate::prefix::keys_full( 157 | store, 158 | &self.inner.storage_prefix, 159 | None, 160 | None, 161 | Order::Ascending, 162 | ) 163 | .next() 164 | .is_none() 165 | } 166 | 167 | pub fn range<'a>( 168 | &self, 169 | store: &'a dyn Storage, 170 | min: Option>, 171 | max: Option>, 172 | order: Order, 173 | ) -> Box> + 'a> 174 | where 175 | T: 'a, 176 | K::Output: 'static, 177 | { 178 | let de_fn = self.de_fn_kv; 179 | let pk_name = self.pk_name.clone(); 180 | let mapped = crate::prefix::range_with_prefix( 181 | store, 182 | &self.inner.storage_prefix, 183 | min.map(|b| b.to_raw_bound()), 184 | max.map(|b| b.to_raw_bound()), 185 | order, 186 | ) 187 | .map(move |kv| (de_fn)(store, &pk_name, kv)); 188 | Box::new(mapped) 189 | } 190 | 191 | pub fn keys<'a>( 192 | &self, 193 | store: &'a dyn Storage, 194 | min: Option>, 195 | max: Option>, 196 | order: Order, 197 | ) -> Box> + 'a> 198 | where 199 | T: 'a, 200 | K::Output: 'static, 201 | { 202 | let de_fn = self.de_fn_kv; 203 | let pk_name = self.pk_name.clone(); 204 | let mapped = crate::prefix::range_with_prefix( 205 | store, 206 | &self.inner.storage_prefix, 207 | min.map(|b| b.to_raw_bound()), 208 | max.map(|b| b.to_raw_bound()), 209 | order, 210 | ) 211 | .map(move |kv| (de_fn)(store, &pk_name, kv).map(|(k, _)| k)); 212 | Box::new(mapped) 213 | } 214 | } 215 | 216 | #[cfg(test)] 217 | mod test { 218 | use super::*; 219 | 220 | use std::marker::PhantomData; 221 | 222 | use cosmwasm_std::testing::MockStorage; 223 | 224 | #[test] 225 | fn ensure_proper_range_bounds() { 226 | let mut store = MockStorage::new(); 227 | // manually create this - not testing nested prefixes here 228 | let prefix: IndexPrefix, u64> = IndexPrefix { 229 | inner: crate::prefix::Prefix { 230 | storage_prefix: b"foo".to_vec(), 231 | data: PhantomData::<(u64, _, _)>, 232 | }, 233 | pk_name: vec![], 234 | de_fn_kv: |_, _, kv| deserialize_kv::, u64>(kv), 235 | de_fn_v: |_, _, kv| deserialize_v(kv), 236 | }; 237 | 238 | // set some data, we care about "foo" prefix 239 | store.set(b"foobar", b"1"); 240 | store.set(b"foora", b"2"); 241 | store.set(b"foozi", b"3"); 242 | // these shouldn't match 243 | store.set(b"foply", b"100"); 244 | store.set(b"font", b"200"); 245 | 246 | let expected = vec![ 247 | (b"bar".to_vec(), 1u64), 248 | (b"ra".to_vec(), 2u64), 249 | (b"zi".to_vec(), 3u64), 250 | ]; 251 | let expected_reversed: Vec<(Vec, u64)> = expected.iter().rev().cloned().collect(); 252 | 253 | // let's do the basic sanity check 254 | let res: StdResult> = prefix 255 | .range_raw(&store, None, None, Order::Ascending) 256 | .collect(); 257 | assert_eq!(&expected, &res.unwrap()); 258 | let res: StdResult> = prefix 259 | .range_raw(&store, None, None, Order::Descending) 260 | .collect(); 261 | assert_eq!(&expected_reversed, &res.unwrap()); 262 | 263 | // now let's check some ascending ranges 264 | let res: StdResult> = prefix 265 | .range_raw( 266 | &store, 267 | Some(Bound::inclusive(b"ra".to_vec())), 268 | None, 269 | Order::Ascending, 270 | ) 271 | .collect(); 272 | assert_eq!(&expected[1..], res.unwrap().as_slice()); 273 | // skip excluded 274 | let res: StdResult> = prefix 275 | .range_raw( 276 | &store, 277 | Some(Bound::exclusive(b"ra".to_vec())), 278 | None, 279 | Order::Ascending, 280 | ) 281 | .collect(); 282 | assert_eq!(&expected[2..], res.unwrap().as_slice()); 283 | // if we exclude something a little lower, we get matched 284 | let res: StdResult> = prefix 285 | .range_raw( 286 | &store, 287 | Some(Bound::exclusive(b"r".to_vec())), 288 | None, 289 | Order::Ascending, 290 | ) 291 | .collect(); 292 | assert_eq!(&expected[1..], res.unwrap().as_slice()); 293 | 294 | // now let's check some descending ranges 295 | let res: StdResult> = prefix 296 | .range_raw( 297 | &store, 298 | None, 299 | Some(Bound::inclusive(b"ra".to_vec())), 300 | Order::Descending, 301 | ) 302 | .collect(); 303 | assert_eq!(&expected_reversed[1..], res.unwrap().as_slice()); 304 | // skip excluded 305 | let res: StdResult> = prefix 306 | .range_raw( 307 | &store, 308 | None, 309 | Some(Bound::exclusive(b"ra".to_vec())), 310 | Order::Descending, 311 | ) 312 | .collect(); 313 | assert_eq!(&expected_reversed[2..], res.unwrap().as_slice()); 314 | // if we exclude something a little higher, we get matched 315 | let res: StdResult> = prefix 316 | .range_raw( 317 | &store, 318 | None, 319 | Some(Bound::exclusive(b"rb".to_vec())), 320 | Order::Descending, 321 | ) 322 | .collect(); 323 | assert_eq!(&expected_reversed[1..], res.unwrap().as_slice()); 324 | 325 | // now test when both sides are set 326 | let res: StdResult> = prefix 327 | .range_raw( 328 | &store, 329 | Some(Bound::inclusive(b"ra".to_vec())), 330 | Some(Bound::exclusive(b"zi".to_vec())), 331 | Order::Ascending, 332 | ) 333 | .collect(); 334 | assert_eq!(&expected[1..2], res.unwrap().as_slice()); 335 | // and descending 336 | let res: StdResult> = prefix 337 | .range_raw( 338 | &store, 339 | Some(Bound::inclusive(b"ra".to_vec())), 340 | Some(Bound::exclusive(b"zi".to_vec())), 341 | Order::Descending, 342 | ) 343 | .collect(); 344 | assert_eq!(&expected[1..2], res.unwrap().as_slice()); 345 | // Include both sides 346 | let res: StdResult> = prefix 347 | .range_raw( 348 | &store, 349 | Some(Bound::inclusive(b"ra".to_vec())), 350 | Some(Bound::inclusive(b"zi".to_vec())), 351 | Order::Descending, 352 | ) 353 | .collect(); 354 | assert_eq!(&expected_reversed[..2], res.unwrap().as_slice()); 355 | // Exclude both sides 356 | let res: StdResult> = prefix 357 | .range_raw( 358 | &store, 359 | Some(Bound::exclusive(b"ra".to_vec())), 360 | Some(Bound::exclusive(b"zi".to_vec())), 361 | Order::Ascending, 362 | ) 363 | .collect(); 364 | assert_eq!(res.unwrap().as_slice(), &[]); 365 | } 366 | 367 | #[test] 368 | fn prefix_debug() { 369 | let prefix: IndexPrefix = IndexPrefix::new(b"lol", &[Key::Val8([8; 1])]); 370 | assert_eq!( 371 | format!("{prefix:?}"), 372 | "IndexPrefix { storage_prefix: [0, 3, 108, 111, 108, 0, 1, 8], pk_name: [], .. }" 373 | ); 374 | } 375 | 376 | #[test] 377 | fn is_empty_works() { 378 | // manually create this - not testing nested prefixes here 379 | let prefix: IndexPrefix, u64> = IndexPrefix { 380 | inner: crate::prefix::Prefix { 381 | storage_prefix: b"foo".to_vec(), 382 | data: PhantomData::<(u64, _, _)>, 383 | }, 384 | pk_name: vec![], 385 | de_fn_kv: |_, _, kv| deserialize_kv::, u64>(kv), 386 | de_fn_v: |_, _, kv| deserialize_v(kv), 387 | }; 388 | 389 | let mut storage = MockStorage::new(); 390 | 391 | assert!(prefix.is_empty(&storage)); 392 | 393 | storage.set(b"fookey1", b"1"); 394 | storage.set(b"fookey2", b"2"); 395 | 396 | assert!(!prefix.is_empty(&storage)); 397 | } 398 | } 399 | -------------------------------------------------------------------------------- /src/snapshot/item.rs: -------------------------------------------------------------------------------- 1 | use serde::de::DeserializeOwned; 2 | use serde::Serialize; 3 | 4 | use cosmwasm_std::{StdError, StdResult, Storage}; 5 | 6 | use crate::namespace::Namespace; 7 | use crate::snapshot::{ChangeSet, Snapshot}; 8 | use crate::{Item, Map, Strategy}; 9 | 10 | /// Item that maintains a snapshot of one or more checkpoints. 11 | /// We can query historical data as well as current state. 12 | /// What data is snapshotted depends on the Strategy. 13 | pub struct SnapshotItem { 14 | primary: Item, 15 | changelog_namespace: Namespace, 16 | snapshots: Snapshot<(), T>, 17 | } 18 | 19 | impl SnapshotItem { 20 | /// Creates a new [`SnapshotItem`] with the given storage keys and strategy. 21 | /// This is a const fn only suitable when all the storage keys provided are 22 | /// static strings. 23 | /// 24 | /// Example: 25 | /// 26 | /// ```rust 27 | /// use cw_storage_plus::{SnapshotItem, Strategy}; 28 | /// 29 | /// SnapshotItem::::new( 30 | /// "every", 31 | /// "every__check", 32 | /// "every__change", 33 | /// Strategy::EveryBlock); 34 | /// ``` 35 | pub const fn new( 36 | storage_key: &'static str, 37 | checkpoints: &'static str, 38 | changelog: &'static str, 39 | strategy: Strategy, 40 | ) -> Self { 41 | SnapshotItem { 42 | primary: Item::new(storage_key), 43 | changelog_namespace: Namespace::from_static_str(changelog), 44 | snapshots: Snapshot::new(checkpoints, changelog, strategy), 45 | } 46 | } 47 | 48 | /// Creates a new [`SnapshotItem`] with the given storage keys and strategy. 49 | /// Use this if you might need to handle dynamic strings. Otherwise, you might 50 | /// prefer [`SnapshotItem::new`]. 51 | /// 52 | /// Example: 53 | /// 54 | /// ```rust 55 | /// use cw_storage_plus::{SnapshotItem, Strategy}; 56 | /// 57 | /// let key = "every"; 58 | /// let checkpoints_key = format!("{}_check", key); 59 | /// let changelog_key = format!("{}_change", key); 60 | /// 61 | /// SnapshotItem::::new_dyn( 62 | /// key, 63 | /// checkpoints_key, 64 | /// changelog_key, 65 | /// Strategy::EveryBlock); 66 | /// ``` 67 | pub fn new_dyn( 68 | storage_key: impl Into, 69 | checkpoints: impl Into, 70 | changelog: impl Into, 71 | strategy: Strategy, 72 | ) -> Self { 73 | let changelog = changelog.into(); 74 | SnapshotItem { 75 | primary: Item::new_dyn(storage_key), 76 | changelog_namespace: changelog.clone(), 77 | snapshots: Snapshot::new_dyn(checkpoints, changelog, strategy), 78 | } 79 | } 80 | 81 | pub fn add_checkpoint(&self, store: &mut dyn Storage, height: u64) -> StdResult<()> { 82 | self.snapshots.add_checkpoint(store, height) 83 | } 84 | 85 | pub fn remove_checkpoint(&self, store: &mut dyn Storage, height: u64) -> StdResult<()> { 86 | self.snapshots.remove_checkpoint(store, height) 87 | } 88 | 89 | pub fn changelog(&self) -> Map> { 90 | // Build and return a compatible Map with the proper key type 91 | Map::new_dyn(self.changelog_namespace.clone()) 92 | } 93 | } 94 | 95 | impl SnapshotItem 96 | where 97 | T: Serialize + DeserializeOwned + Clone, 98 | { 99 | /// load old value and store changelog 100 | fn write_change(&self, store: &mut dyn Storage, height: u64) -> StdResult<()> { 101 | // if there is already data in the changelog for this block, do not write more 102 | if self.snapshots.has_changelog(store, (), height)? { 103 | return Ok(()); 104 | } 105 | // otherwise, store the previous value 106 | let old = self.primary.may_load(store)?; 107 | self.snapshots.write_changelog(store, (), height, old) 108 | } 109 | 110 | pub fn save(&self, store: &mut dyn Storage, data: &T, height: u64) -> StdResult<()> { 111 | if self.snapshots.should_checkpoint(store, &())? { 112 | self.write_change(store, height)?; 113 | } 114 | self.primary.save(store, data) 115 | } 116 | 117 | pub fn remove(&self, store: &mut dyn Storage, height: u64) -> StdResult<()> { 118 | if self.snapshots.should_checkpoint(store, &())? { 119 | self.write_change(store, height)?; 120 | } 121 | self.primary.remove(store); 122 | Ok(()) 123 | } 124 | 125 | /// load will return an error if no data is set, or on parse error 126 | pub fn load(&self, store: &dyn Storage) -> StdResult { 127 | self.primary.load(store) 128 | } 129 | 130 | /// may_load will parse the data stored if present, returns Ok(None) if no data there. 131 | /// returns an error on parsing issues 132 | pub fn may_load(&self, store: &dyn Storage) -> StdResult> { 133 | self.primary.may_load(store) 134 | } 135 | 136 | pub fn may_load_at_height(&self, store: &dyn Storage, height: u64) -> StdResult> { 137 | let snapshot = self.snapshots.may_load_at_height(store, (), height)?; 138 | 139 | if let Some(r) = snapshot { 140 | Ok(r) 141 | } else { 142 | // otherwise, return current value 143 | self.may_load(store) 144 | } 145 | } 146 | 147 | // If there is no checkpoint for that height, then we return StdError::NotFound 148 | pub fn assert_checkpointed(&self, store: &dyn Storage, height: u64) -> StdResult<()> { 149 | self.snapshots.assert_checkpointed(store, height) 150 | } 151 | 152 | /// Loads the data, perform the specified action, and store the result in the database. 153 | /// This is a shorthand for some common sequences, which may be useful. 154 | /// 155 | /// If the data exists, `action(Some(value))` is called. Otherwise `action(None)` is called. 156 | /// 157 | /// This is a bit more customized than needed to only read "old" value 1 time, not 2 per naive approach 158 | pub fn update(&self, store: &mut dyn Storage, height: u64, action: A) -> Result 159 | where 160 | A: FnOnce(Option) -> Result, 161 | E: From, 162 | { 163 | let input = self.may_load(store)?; 164 | let output = action(input)?; 165 | self.save(store, &output, height)?; 166 | Ok(output) 167 | } 168 | } 169 | 170 | #[cfg(test)] 171 | mod tests { 172 | use super::*; 173 | use crate::bound::Bound; 174 | use cosmwasm_std::testing::MockStorage; 175 | 176 | type TestItem = SnapshotItem; 177 | 178 | const NEVER: TestItem = 179 | SnapshotItem::new("never", "never__check", "never__change", Strategy::Never); 180 | const EVERY: TestItem = SnapshotItem::new( 181 | "every", 182 | "every__check", 183 | "every__change", 184 | Strategy::EveryBlock, 185 | ); 186 | const SELECT: TestItem = SnapshotItem::new( 187 | "select", 188 | "select__check", 189 | "select__change", 190 | Strategy::Selected, 191 | ); 192 | 193 | // Fills an item (u64) with the following writes: 194 | // 1: 5 195 | // 2: 7 196 | // 3: 8 197 | // 4: 1 198 | // 5: None 199 | // 6: 13 200 | // 7: None 201 | // 8: 22 202 | // Final value: 22 203 | // Value at beginning of 3 -> 7 204 | // Value at beginning of 5 -> 1 205 | fn init_data(item: &TestItem, storage: &mut dyn Storage) { 206 | item.save(storage, &5, 1).unwrap(); 207 | item.save(storage, &7, 2).unwrap(); 208 | 209 | // checkpoint 3 210 | item.add_checkpoint(storage, 3).unwrap(); 211 | 212 | // also use update to set - to ensure this works 213 | item.save(storage, &1, 3).unwrap(); 214 | item.update(storage, 3, |_| -> StdResult { Ok(8) }) 215 | .unwrap(); 216 | 217 | item.remove(storage, 4).unwrap(); 218 | item.save(storage, &13, 4).unwrap(); 219 | 220 | // checkpoint 5 221 | item.add_checkpoint(storage, 5).unwrap(); 222 | item.remove(storage, 5).unwrap(); 223 | item.update(storage, 5, |_| -> StdResult { Ok(22) }) 224 | .unwrap(); 225 | // and delete it later (unknown if all data present) 226 | item.remove_checkpoint(storage, 5).unwrap(); 227 | } 228 | 229 | const FINAL_VALUE: Option = Some(22); 230 | 231 | const VALUE_START_3: Option = Some(7); 232 | 233 | const VALUE_START_5: Option = Some(13); 234 | 235 | fn assert_final_value(item: &TestItem, storage: &dyn Storage) { 236 | assert_eq!(FINAL_VALUE, item.may_load(storage).unwrap()); 237 | } 238 | 239 | #[track_caller] 240 | fn assert_value_at_height( 241 | item: &TestItem, 242 | storage: &dyn Storage, 243 | height: u64, 244 | value: Option, 245 | ) { 246 | assert_eq!(value, item.may_load_at_height(storage, height).unwrap()); 247 | } 248 | 249 | fn assert_missing_checkpoint(item: &TestItem, storage: &dyn Storage, height: u64) { 250 | assert!(item.may_load_at_height(storage, height).is_err()); 251 | } 252 | 253 | #[test] 254 | fn never_works_like_normal_item() { 255 | let mut storage = MockStorage::new(); 256 | init_data(&NEVER, &mut storage); 257 | assert_final_value(&NEVER, &storage); 258 | 259 | // historical queries return error 260 | assert_missing_checkpoint(&NEVER, &storage, 3); 261 | assert_missing_checkpoint(&NEVER, &storage, 5); 262 | } 263 | 264 | #[test] 265 | fn every_blocks_stores_present_and_past() { 266 | let mut storage = MockStorage::new(); 267 | init_data(&EVERY, &mut storage); 268 | assert_final_value(&EVERY, &storage); 269 | 270 | // historical queries return historical values 271 | assert_value_at_height(&EVERY, &storage, 3, VALUE_START_3); 272 | assert_value_at_height(&EVERY, &storage, 5, VALUE_START_5); 273 | } 274 | 275 | #[test] 276 | fn selected_shows_3_not_5() { 277 | let mut storage = MockStorage::new(); 278 | init_data(&SELECT, &mut storage); 279 | assert_final_value(&SELECT, &storage); 280 | 281 | // historical queries return historical values 282 | assert_value_at_height(&SELECT, &storage, 3, VALUE_START_3); 283 | // never checkpointed 284 | assert_missing_checkpoint(&NEVER, &storage, 1); 285 | // deleted checkpoint 286 | assert_missing_checkpoint(&NEVER, &storage, 5); 287 | } 288 | 289 | #[test] 290 | fn handle_multiple_writes_in_one_block() { 291 | let mut storage = MockStorage::new(); 292 | 293 | println!("SETUP"); 294 | EVERY.save(&mut storage, &5, 1).unwrap(); 295 | EVERY.save(&mut storage, &7, 2).unwrap(); 296 | EVERY.save(&mut storage, &2, 2).unwrap(); 297 | 298 | // update and save - query at 3 => 2, at 4 => 12 299 | EVERY 300 | .update(&mut storage, 3, |_| -> StdResult { Ok(9) }) 301 | .unwrap(); 302 | EVERY.save(&mut storage, &12, 3).unwrap(); 303 | assert_eq!(Some(5), EVERY.may_load_at_height(&storage, 2).unwrap()); 304 | assert_eq!(Some(2), EVERY.may_load_at_height(&storage, 3).unwrap()); 305 | assert_eq!(Some(12), EVERY.may_load_at_height(&storage, 4).unwrap()); 306 | 307 | // save and remove - query at 4 => 1, at 5 => None 308 | EVERY.save(&mut storage, &17, 4).unwrap(); 309 | EVERY.remove(&mut storage, 4).unwrap(); 310 | assert_eq!(Some(12), EVERY.may_load_at_height(&storage, 4).unwrap()); 311 | assert_eq!(None, EVERY.may_load_at_height(&storage, 5).unwrap()); 312 | 313 | // remove and update - query at 5 => 2, at 6 => 13 314 | EVERY.remove(&mut storage, 5).unwrap(); 315 | EVERY 316 | .update(&mut storage, 5, |_| -> StdResult { Ok(2) }) 317 | .unwrap(); 318 | assert_eq!(None, EVERY.may_load_at_height(&storage, 5).unwrap()); 319 | assert_eq!(Some(2), EVERY.may_load_at_height(&storage, 6).unwrap()); 320 | } 321 | 322 | #[test] 323 | #[cfg(feature = "iterator")] 324 | fn changelog_range_works() { 325 | use cosmwasm_std::Order; 326 | 327 | let mut store = MockStorage::new(); 328 | 329 | // simple data for testing 330 | EVERY.save(&mut store, &5, 1u64).unwrap(); 331 | EVERY.save(&mut store, &7, 2u64).unwrap(); 332 | EVERY 333 | .update(&mut store, 3u64, |_| -> StdResult { Ok(8) }) 334 | .unwrap(); 335 | EVERY.remove(&mut store, 4u64).unwrap(); 336 | 337 | // let's try to iterate over the changelog 338 | let all: StdResult> = EVERY 339 | .changelog() 340 | .range(&store, None, None, Order::Ascending) 341 | .collect(); 342 | let all = all.unwrap(); 343 | assert_eq!(4, all.len()); 344 | assert_eq!( 345 | all, 346 | vec![ 347 | (1, ChangeSet { old: None }), 348 | (2, ChangeSet { old: Some(5) }), 349 | (3, ChangeSet { old: Some(7) }), 350 | (4, ChangeSet { old: Some(8) }) 351 | ] 352 | ); 353 | 354 | // let's try to iterate over a changelog range 355 | let all: StdResult> = EVERY 356 | .changelog() 357 | .range(&store, Some(Bound::exclusive(3u64)), None, Order::Ascending) 358 | .collect(); 359 | let all = all.unwrap(); 360 | assert_eq!(1, all.len()); 361 | assert_eq!(all, vec![(4, ChangeSet { old: Some(8) }),]); 362 | } 363 | } 364 | -------------------------------------------------------------------------------- /src/snapshot/mod.rs: -------------------------------------------------------------------------------- 1 | #![cfg(feature = "iterator")] 2 | mod item; 3 | mod map; 4 | 5 | pub use item::SnapshotItem; 6 | pub use map::SnapshotMap; 7 | 8 | use crate::bound::Bound; 9 | use crate::de::KeyDeserialize; 10 | use crate::namespace::Namespace; 11 | use crate::{Map, Prefixer, PrimaryKey}; 12 | use cosmwasm_std::{Order, StdError, StdResult, Storage}; 13 | use serde::de::DeserializeOwned; 14 | use serde::{Deserialize, Serialize}; 15 | 16 | /// Structure holding a map of checkpoints composited from 17 | /// height (as u64) and counter of how many times it has 18 | /// been checkpointed (as u32). 19 | /// Stores all changes in changelog. 20 | #[derive(Debug, Clone)] 21 | pub(crate) struct Snapshot { 22 | checkpoints: Map, 23 | 24 | // this stores all changes (key, height). Must differentiate between no data written, 25 | // and explicit None (just inserted) 26 | pub changelog: Map<(K, u64), ChangeSet>, 27 | 28 | // How aggressive we are about checkpointing all data 29 | strategy: Strategy, 30 | } 31 | 32 | impl Snapshot { 33 | /// Creates a new [`Snapshot`] with the given storage keys and strategy. 34 | /// This is a const fn only suitable when all the storage keys provided are 35 | /// static strings. 36 | pub const fn new( 37 | checkpoints: &'static str, 38 | changelog: &'static str, 39 | strategy: Strategy, 40 | ) -> Snapshot { 41 | Snapshot { 42 | checkpoints: Map::new(checkpoints), 43 | changelog: Map::new(changelog), 44 | strategy, 45 | } 46 | } 47 | 48 | /// Creates a new [`Snapshot`] with the given storage keys and strategy. 49 | /// Use this if you might need to handle dynamic strings. Otherwise, you might 50 | /// prefer [`Snapshot::new`]. 51 | pub fn new_dyn( 52 | checkpoints: impl Into, 53 | changelog: impl Into, 54 | strategy: Strategy, 55 | ) -> Snapshot { 56 | Snapshot { 57 | checkpoints: Map::new_dyn(checkpoints), 58 | changelog: Map::new_dyn(changelog), 59 | strategy, 60 | } 61 | } 62 | 63 | pub fn add_checkpoint(&self, store: &mut dyn Storage, height: u64) -> StdResult<()> { 64 | self.checkpoints 65 | .update::<_, StdError>(store, height, |count| Ok(count.unwrap_or_default() + 1))?; 66 | Ok(()) 67 | } 68 | 69 | pub fn remove_checkpoint(&self, store: &mut dyn Storage, height: u64) -> StdResult<()> { 70 | let count = self 71 | .checkpoints 72 | .may_load(store, height)? 73 | .unwrap_or_default(); 74 | if count <= 1 { 75 | self.checkpoints.remove(store, height); 76 | Ok(()) 77 | } else { 78 | self.checkpoints.save(store, height, &(count - 1)) 79 | } 80 | } 81 | } 82 | 83 | impl<'a, K, T> Snapshot 84 | where 85 | T: Serialize + DeserializeOwned + Clone, 86 | K: PrimaryKey<'a> + Prefixer<'a> + KeyDeserialize, 87 | { 88 | /// should_checkpoint looks at the strategy and determines if we want to checkpoint 89 | pub fn should_checkpoint(&self, store: &dyn Storage, k: &K) -> StdResult { 90 | match self.strategy { 91 | Strategy::EveryBlock => Ok(true), 92 | Strategy::Never => Ok(false), 93 | Strategy::Selected => self.should_checkpoint_selected(store, k), 94 | } 95 | } 96 | 97 | /// this is just pulled out from above for the selected block 98 | fn should_checkpoint_selected(&self, store: &dyn Storage, k: &K) -> StdResult { 99 | // most recent checkpoint 100 | let checkpoint = self 101 | .checkpoints 102 | .range(store, None, None, Order::Descending) 103 | .next() 104 | .transpose()?; 105 | if let Some((height, _)) = checkpoint { 106 | // any changelog for the given key since then? 107 | let start = Bound::inclusive(height); 108 | let first = self 109 | .changelog 110 | .prefix(k.clone()) 111 | .range_raw(store, Some(start), None, Order::Ascending) 112 | .next() 113 | .transpose()?; 114 | if first.is_none() { 115 | // there must be at least one open checkpoint and no changelog for the given height since then 116 | return Ok(true); 117 | } 118 | } 119 | // otherwise, we don't save this 120 | Ok(false) 121 | } 122 | 123 | // If there is no checkpoint for that height, then we return StdError::NotFound 124 | pub fn assert_checkpointed(&self, store: &dyn Storage, height: u64) -> StdResult<()> { 125 | let has = match self.strategy { 126 | Strategy::EveryBlock => true, 127 | Strategy::Never => false, 128 | Strategy::Selected => self.checkpoints.may_load(store, height)?.is_some(), 129 | }; 130 | match has { 131 | true => Ok(()), 132 | false => Err(StdError::msg("checkpoint not found")), 133 | } 134 | } 135 | 136 | pub fn has_changelog(&self, store: &mut dyn Storage, key: K, height: u64) -> StdResult { 137 | Ok(self.changelog.may_load(store, (key, height))?.is_some()) 138 | } 139 | 140 | pub fn write_changelog( 141 | &self, 142 | store: &mut dyn Storage, 143 | key: K, 144 | height: u64, 145 | old: Option, 146 | ) -> StdResult<()> { 147 | self.changelog 148 | .save(store, (key, height), &ChangeSet { old }) 149 | } 150 | 151 | // may_load_at_height reads historical data from given checkpoints. 152 | // Returns StdError::NotFound if we have no checkpoint, and can give no data. 153 | // Returns Ok(None) if there is a checkpoint, but no cached data (no changes since the 154 | // checkpoint. Caller should query current state). 155 | // Return Ok(Some(x)) if there is a checkpoint and data written to changelog, returning the state at that time 156 | pub fn may_load_at_height( 157 | &self, 158 | store: &dyn Storage, 159 | key: K, 160 | height: u64, 161 | ) -> StdResult>> { 162 | self.assert_checkpointed(store, height)?; 163 | 164 | // this will look for the first snapshot of height >= given height 165 | // If None, there is no snapshot since that time. 166 | let start = Bound::inclusive(height); 167 | let first = self 168 | .changelog 169 | .prefix(key) 170 | .range_raw(store, Some(start), None, Order::Ascending) 171 | .next(); 172 | 173 | if let Some(r) = first { 174 | // if we found a match, return this last one 175 | r.map(|(_, v)| Some(v.old)) 176 | } else { 177 | Ok(None) 178 | } 179 | } 180 | } 181 | 182 | #[derive(Clone, Copy, PartialEq, Eq, Debug, Serialize, Deserialize)] 183 | pub enum Strategy { 184 | EveryBlock, 185 | Never, 186 | /// Only writes for linked blocks - does a few more reads to save some writes. 187 | /// Probably uses more gas, but less total disk usage. 188 | /// 189 | /// Note that you need a trusted source (e.g. own contract) to set/remove checkpoints. 190 | /// Useful when the checkpoint setting happens in the same contract as the snapshotting. 191 | Selected, 192 | } 193 | 194 | #[derive(Clone, Copy, PartialEq, Eq, Debug, Serialize, Deserialize)] 195 | pub struct ChangeSet { 196 | pub old: Option, 197 | } 198 | 199 | #[cfg(test)] 200 | mod tests { 201 | use super::*; 202 | use cosmwasm_std::testing::MockStorage; 203 | 204 | type TestSnapshot = Snapshot<&'static str, u64>; 205 | 206 | const NEVER: TestSnapshot = Snapshot::new("never__check", "never__change", Strategy::Never); 207 | const EVERY: TestSnapshot = 208 | Snapshot::new("every__check", "every__change", Strategy::EveryBlock); 209 | const SELECT: TestSnapshot = 210 | Snapshot::new("select__check", "select__change", Strategy::Selected); 211 | 212 | const DUMMY_KEY: &str = "dummy"; 213 | 214 | #[test] 215 | fn should_checkpoint() { 216 | let storage = MockStorage::new(); 217 | assert!(!NEVER.should_checkpoint(&storage, &DUMMY_KEY).unwrap()); 218 | assert!(EVERY.should_checkpoint(&storage, &DUMMY_KEY).unwrap()); 219 | assert!(!SELECT.should_checkpoint(&storage, &DUMMY_KEY).unwrap()); 220 | } 221 | 222 | #[test] 223 | fn assert_checkpointed() { 224 | let mut storage = MockStorage::new(); 225 | 226 | assert_eq!( 227 | "kind: Other, error: checkpoint not found", 228 | NEVER 229 | .assert_checkpointed(&storage, 1) 230 | .unwrap_err() 231 | .to_string() 232 | ); 233 | assert!(EVERY.assert_checkpointed(&storage, 1).is_ok()); 234 | assert_eq!( 235 | "kind: Other, error: checkpoint not found", 236 | SELECT 237 | .assert_checkpointed(&storage, 1) 238 | .unwrap_err() 239 | .to_string() 240 | ); 241 | 242 | // Add a checkpoint at 1 243 | NEVER.add_checkpoint(&mut storage, 1).unwrap(); 244 | EVERY.add_checkpoint(&mut storage, 1).unwrap(); 245 | SELECT.add_checkpoint(&mut storage, 1).unwrap(); 246 | 247 | assert_eq!( 248 | "kind: Other, error: checkpoint not found", 249 | NEVER 250 | .assert_checkpointed(&storage, 1) 251 | .unwrap_err() 252 | .to_string() 253 | ); 254 | assert!(EVERY.assert_checkpointed(&storage, 1).is_ok()); 255 | assert!(SELECT.assert_checkpointed(&storage, 1).is_ok()); 256 | 257 | // Remove checkpoint 258 | NEVER.remove_checkpoint(&mut storage, 1).unwrap(); 259 | EVERY.remove_checkpoint(&mut storage, 1).unwrap(); 260 | SELECT.remove_checkpoint(&mut storage, 1).unwrap(); 261 | 262 | assert_eq!( 263 | "kind: Other, error: checkpoint not found", 264 | NEVER 265 | .assert_checkpointed(&storage, 1) 266 | .unwrap_err() 267 | .to_string() 268 | ); 269 | assert!(EVERY.assert_checkpointed(&storage, 1).is_ok()); 270 | assert_eq!( 271 | "kind: Other, error: checkpoint not found", 272 | SELECT 273 | .assert_checkpointed(&storage, 1) 274 | .unwrap_err() 275 | .to_string() 276 | ); 277 | } 278 | 279 | #[test] 280 | fn has_changelog() { 281 | let mut storage = MockStorage::new(); 282 | 283 | assert!(!NEVER.has_changelog(&mut storage, DUMMY_KEY, 1).unwrap()); 284 | assert!(!EVERY.has_changelog(&mut storage, DUMMY_KEY, 1).unwrap()); 285 | assert!(!SELECT.has_changelog(&mut storage, DUMMY_KEY, 1).unwrap()); 286 | assert!(!NEVER.has_changelog(&mut storage, DUMMY_KEY, 2).unwrap()); 287 | assert!(!EVERY.has_changelog(&mut storage, DUMMY_KEY, 2).unwrap()); 288 | assert!(!SELECT.has_changelog(&mut storage, DUMMY_KEY, 2).unwrap()); 289 | assert!(!NEVER.has_changelog(&mut storage, DUMMY_KEY, 3).unwrap()); 290 | assert!(!EVERY.has_changelog(&mut storage, DUMMY_KEY, 3).unwrap()); 291 | assert!(!SELECT.has_changelog(&mut storage, DUMMY_KEY, 3).unwrap()); 292 | 293 | // Write a changelog at 2 294 | NEVER 295 | .write_changelog(&mut storage, DUMMY_KEY, 2, Some(3)) 296 | .unwrap(); 297 | EVERY 298 | .write_changelog(&mut storage, DUMMY_KEY, 2, Some(4)) 299 | .unwrap(); 300 | SELECT 301 | .write_changelog(&mut storage, DUMMY_KEY, 2, Some(5)) 302 | .unwrap(); 303 | 304 | assert!(!NEVER.has_changelog(&mut storage, DUMMY_KEY, 1).unwrap()); 305 | assert!(!EVERY.has_changelog(&mut storage, DUMMY_KEY, 1).unwrap()); 306 | assert!(!SELECT.has_changelog(&mut storage, DUMMY_KEY, 1).unwrap()); 307 | assert!(NEVER.has_changelog(&mut storage, DUMMY_KEY, 2).unwrap()); 308 | assert!(EVERY.has_changelog(&mut storage, DUMMY_KEY, 2).unwrap()); 309 | assert!(SELECT.has_changelog(&mut storage, DUMMY_KEY, 2).unwrap()); 310 | assert!(!NEVER.has_changelog(&mut storage, DUMMY_KEY, 3).unwrap()); 311 | assert!(!EVERY.has_changelog(&mut storage, DUMMY_KEY, 3).unwrap()); 312 | assert!(!SELECT.has_changelog(&mut storage, DUMMY_KEY, 3).unwrap()); 313 | } 314 | 315 | #[test] 316 | fn may_load_at_height() { 317 | let mut storage = MockStorage::new(); 318 | 319 | assert_eq!( 320 | "kind: Other, error: checkpoint not found", 321 | NEVER 322 | .may_load_at_height(&storage, DUMMY_KEY, 3) 323 | .unwrap_err() 324 | .to_string() 325 | ); 326 | assert_eq!( 327 | None, 328 | EVERY.may_load_at_height(&storage, DUMMY_KEY, 3).unwrap() 329 | ); 330 | assert_eq!( 331 | "kind: Other, error: checkpoint not found", 332 | SELECT 333 | .may_load_at_height(&storage, DUMMY_KEY, 3) 334 | .unwrap_err() 335 | .to_string() 336 | ); 337 | 338 | // Add a checkpoint at 3 339 | NEVER.add_checkpoint(&mut storage, 3).unwrap(); 340 | EVERY.add_checkpoint(&mut storage, 3).unwrap(); 341 | SELECT.add_checkpoint(&mut storage, 3).unwrap(); 342 | 343 | assert_eq!( 344 | "kind: Other, error: checkpoint not found", 345 | NEVER 346 | .may_load_at_height(&storage, DUMMY_KEY, 3) 347 | .unwrap_err() 348 | .to_string() 349 | ); 350 | assert_eq!( 351 | None, 352 | EVERY.may_load_at_height(&storage, DUMMY_KEY, 3).unwrap() 353 | ); 354 | assert_eq!( 355 | None, 356 | SELECT.may_load_at_height(&storage, DUMMY_KEY, 3).unwrap() 357 | ); 358 | 359 | // Write a changelog at 3 360 | NEVER 361 | .write_changelog(&mut storage, DUMMY_KEY, 3, Some(100)) 362 | .unwrap(); 363 | EVERY 364 | .write_changelog(&mut storage, DUMMY_KEY, 3, Some(101)) 365 | .unwrap(); 366 | SELECT 367 | .write_changelog(&mut storage, DUMMY_KEY, 3, Some(102)) 368 | .unwrap(); 369 | 370 | assert_eq!( 371 | "kind: Other, error: checkpoint not found", 372 | NEVER 373 | .may_load_at_height(&storage, DUMMY_KEY, 3) 374 | .unwrap_err() 375 | .to_string() 376 | ); 377 | assert_eq!( 378 | Some(Some(101)), 379 | EVERY.may_load_at_height(&storage, DUMMY_KEY, 3).unwrap() 380 | ); 381 | assert_eq!( 382 | Some(Some(102)), 383 | SELECT.may_load_at_height(&storage, DUMMY_KEY, 3).unwrap() 384 | ); 385 | // Check that may_load_at_height at a previous value will return the first change after that. 386 | // (Only with EVERY). 387 | assert_eq!( 388 | "kind: Other, error: checkpoint not found", 389 | NEVER 390 | .may_load_at_height(&storage, DUMMY_KEY, 2) 391 | .unwrap_err() 392 | .to_string() 393 | ); 394 | assert_eq!( 395 | Some(Some(101)), 396 | EVERY.may_load_at_height(&storage, DUMMY_KEY, 2).unwrap() 397 | ); 398 | assert_eq!( 399 | "kind: Other, error: checkpoint not found", 400 | SELECT 401 | .may_load_at_height(&storage, DUMMY_KEY, 2) 402 | .unwrap_err() 403 | .to_string() 404 | ); 405 | 406 | // Write a changelog at 4, removing the value 407 | NEVER 408 | .write_changelog(&mut storage, DUMMY_KEY, 4, None) 409 | .unwrap(); 410 | EVERY 411 | .write_changelog(&mut storage, DUMMY_KEY, 4, None) 412 | .unwrap(); 413 | SELECT 414 | .write_changelog(&mut storage, DUMMY_KEY, 4, None) 415 | .unwrap(); 416 | // And add a checkpoint at 4 417 | NEVER.add_checkpoint(&mut storage, 4).unwrap(); 418 | EVERY.add_checkpoint(&mut storage, 4).unwrap(); 419 | SELECT.add_checkpoint(&mut storage, 4).unwrap(); 420 | 421 | assert_eq!( 422 | "kind: Other, error: checkpoint not found", 423 | NEVER 424 | .may_load_at_height(&storage, DUMMY_KEY, 4) 425 | .unwrap_err() 426 | .to_string() 427 | ); 428 | assert_eq!( 429 | Some(None), 430 | EVERY.may_load_at_height(&storage, DUMMY_KEY, 4).unwrap() 431 | ); 432 | assert_eq!( 433 | Some(None), 434 | SELECT.may_load_at_height(&storage, DUMMY_KEY, 4).unwrap() 435 | ); 436 | 437 | // Confirm old value at 3 438 | assert_eq!( 439 | "kind: Other, error: checkpoint not found", 440 | NEVER 441 | .may_load_at_height(&storage, DUMMY_KEY, 3) 442 | .unwrap_err() 443 | .to_string() 444 | ); 445 | assert_eq!( 446 | Some(Some(101)), 447 | EVERY.may_load_at_height(&storage, DUMMY_KEY, 3).unwrap() 448 | ); 449 | assert_eq!( 450 | Some(Some(102)), 451 | SELECT.may_load_at_height(&storage, DUMMY_KEY, 3).unwrap() 452 | ); 453 | } 454 | } 455 | -------------------------------------------------------------------------------- /src/keys.rs: -------------------------------------------------------------------------------- 1 | use cosmwasm_std::{storage_keys::namespace_with_key, Addr}; 2 | use cosmwasm_std::{Int128, Int64, Uint128, Uint64}; 3 | 4 | use crate::de::KeyDeserialize; 5 | use crate::int_key::IntKey; 6 | 7 | #[derive(Debug)] 8 | pub enum Key<'a> { 9 | Ref(&'a [u8]), 10 | Val8([u8; 1]), 11 | Val16([u8; 2]), 12 | Val32([u8; 4]), 13 | Val64([u8; 8]), 14 | Val128([u8; 16]), 15 | } 16 | 17 | impl AsRef<[u8]> for Key<'_> { 18 | fn as_ref(&self) -> &[u8] { 19 | match self { 20 | Key::Ref(r) => r, 21 | Key::Val8(v) => v, 22 | Key::Val16(v) => v, 23 | Key::Val32(v) => v, 24 | Key::Val64(v) => v, 25 | Key::Val128(v) => v, 26 | } 27 | } 28 | } 29 | 30 | impl PartialEq<&[u8]> for Key<'_> { 31 | fn eq(&self, other: &&[u8]) -> bool { 32 | self.as_ref() == *other 33 | } 34 | } 35 | 36 | /// `PrimaryKey` needs to be implemented for types that want to be a `Map` (or `Map`-like) key, 37 | /// or part of a key. 38 | /// 39 | /// In particular, it defines a series of types that help iterating over parts of a (composite) key: 40 | /// 41 | /// `Prefix`: Prefix is eager. That is, except for empty keys, it's always "one less" than the full key. 42 | /// `Suffix`: Suffix is the complement of prefix. 43 | /// `SubPrefix`: Sub-prefix is "one less" than prefix. 44 | /// `SuperSuffix`: Super-suffix is "one more" than suffix. The complement of sub-prefix. 45 | /// 46 | /// By example, for a 2-tuple `(T, U)`: 47 | /// 48 | /// `T`: Prefix. 49 | /// `U`: Suffix. 50 | /// `()`: Sub-prefix. 51 | /// `(T, U)`: Super-suffix. 52 | /// 53 | /// `SubPrefix` and `SuperSuffix` only make real sense in the case of triples. Still, they need to be 54 | /// consistently defined for all types. 55 | pub trait PrimaryKey<'a>: Clone { 56 | /// These associated types need to implement `Prefixer`, so that they can be useful arguments 57 | /// for `prefix()`, `sub_prefix()`, and their key-deserializable variants. 58 | type Prefix: Prefixer<'a>; 59 | type SubPrefix: Prefixer<'a>; 60 | 61 | /// These associated types need to implement `KeyDeserialize`, so that they can be returned from 62 | /// `range_de()` and friends. 63 | type Suffix: KeyDeserialize; 64 | type SuperSuffix: KeyDeserialize; 65 | 66 | /// returns a slice of key steps, which can be optionally combined 67 | fn key(&self) -> Vec>; 68 | 69 | fn joined_key(&self) -> Vec { 70 | let keys = self.key(); 71 | let l = keys.len(); 72 | namespace_with_key( 73 | &keys[0..l - 1].iter().map(Key::as_ref).collect::>(), 74 | keys[l - 1].as_ref(), 75 | ) 76 | } 77 | 78 | fn joined_extra_key(&self, key: &[u8]) -> Vec { 79 | let keys = self.key(); 80 | namespace_with_key(&keys.iter().map(Key::as_ref).collect::>(), key) 81 | } 82 | } 83 | 84 | // Empty / no primary key 85 | impl PrimaryKey<'_> for () { 86 | type Prefix = Self; 87 | type SubPrefix = Self; 88 | type Suffix = Self; 89 | type SuperSuffix = Self; 90 | 91 | fn key(&self) -> Vec> { 92 | vec![] 93 | } 94 | } 95 | 96 | impl<'a> PrimaryKey<'a> for &'a [u8] { 97 | type Prefix = (); 98 | type SubPrefix = (); 99 | type Suffix = Self; 100 | type SuperSuffix = Self; 101 | 102 | fn key(&self) -> Vec> { 103 | // this is simple, we don't add more prefixes 104 | vec![Key::Ref(self)] 105 | } 106 | } 107 | 108 | impl PrimaryKey<'_> for [u8; N] { 109 | type Prefix = (); 110 | type SubPrefix = (); 111 | type Suffix = Self; 112 | type SuperSuffix = Self; 113 | 114 | fn key(&self) -> Vec> { 115 | // this is simple, we don't add more prefixes 116 | vec![Key::Ref(self.as_slice())] 117 | } 118 | } 119 | 120 | // Provide a string version of this to raw encode strings 121 | impl<'a> PrimaryKey<'a> for &'a str { 122 | type Prefix = (); 123 | type SubPrefix = (); 124 | type Suffix = Self; 125 | type SuperSuffix = Self; 126 | 127 | fn key(&self) -> Vec> { 128 | // this is simple, we don't add more prefixes 129 | vec![Key::Ref(self.as_bytes())] 130 | } 131 | } 132 | 133 | // use generics for combining there - so we can use &[u8], Vec, or IntKey 134 | impl<'a, T: PrimaryKey<'a> + Prefixer<'a> + KeyDeserialize, U: PrimaryKey<'a> + KeyDeserialize> 135 | PrimaryKey<'a> for (T, U) 136 | { 137 | type Prefix = T; 138 | type SubPrefix = (); 139 | type Suffix = U; 140 | type SuperSuffix = Self; 141 | 142 | fn key(&self) -> Vec> { 143 | let mut keys = self.0.key(); 144 | keys.extend(self.1.key()); 145 | keys 146 | } 147 | } 148 | 149 | // implements PrimaryKey for all &T where T implements PrimaryKey. 150 | impl<'a, T> PrimaryKey<'a> for &'a T 151 | where 152 | T: PrimaryKey<'a>, 153 | { 154 | type Prefix = >::Prefix; 155 | type SubPrefix = >::SubPrefix; 156 | type Suffix = T::Suffix; 157 | type SuperSuffix = T::SuperSuffix; 158 | 159 | fn key(&self) -> Vec> { 160 | >::key(self) 161 | } 162 | } 163 | 164 | // use generics for combining there - so we can use &[u8], Vec, or IntKey 165 | impl< 166 | 'a, 167 | T: PrimaryKey<'a> + Prefixer<'a>, 168 | U: PrimaryKey<'a> + Prefixer<'a> + KeyDeserialize, 169 | V: PrimaryKey<'a> + KeyDeserialize, 170 | > PrimaryKey<'a> for (T, U, V) 171 | { 172 | type Prefix = (T, U); 173 | type SubPrefix = T; 174 | type Suffix = V; 175 | type SuperSuffix = (U, V); 176 | 177 | fn key(&self) -> Vec> { 178 | let mut keys = self.0.key(); 179 | keys.extend(self.1.key()); 180 | keys.extend(self.2.key()); 181 | keys 182 | } 183 | } 184 | 185 | pub trait Prefixer<'a> { 186 | /// returns 0 or more namespaces that should be length-prefixed and concatenated for range searches 187 | fn prefix(&self) -> Vec>; 188 | 189 | fn joined_prefix(&self) -> Vec { 190 | let prefixes = self.prefix(); 191 | namespace_with_key(&prefixes.iter().map(Key::as_ref).collect::>(), &[]) 192 | } 193 | } 194 | 195 | impl Prefixer<'_> for () { 196 | fn prefix(&self) -> Vec> { 197 | vec![] 198 | } 199 | } 200 | 201 | impl<'a> Prefixer<'a> for &'a [u8] { 202 | fn prefix(&self) -> Vec> { 203 | vec![Key::Ref(self)] 204 | } 205 | } 206 | 207 | impl<'a, T: Prefixer<'a>, U: Prefixer<'a>> Prefixer<'a> for (T, U) { 208 | fn prefix(&self) -> Vec> { 209 | let mut res = self.0.prefix(); 210 | res.extend(self.1.prefix()); 211 | res 212 | } 213 | } 214 | 215 | impl<'a, T: Prefixer<'a>, U: Prefixer<'a>, V: Prefixer<'a>> Prefixer<'a> for (T, U, V) { 216 | fn prefix(&self) -> Vec> { 217 | let mut res = self.0.prefix(); 218 | res.extend(self.1.prefix()); 219 | res.extend(self.2.prefix()); 220 | res 221 | } 222 | } 223 | 224 | impl<'a, T> Prefixer<'a> for &'a T 225 | where 226 | T: Prefixer<'a>, 227 | { 228 | fn prefix(&self) -> Vec> { 229 | >::prefix(self) 230 | } 231 | } 232 | 233 | // Provide a string version of this to raw encode strings 234 | impl<'a> Prefixer<'a> for &'a str { 235 | fn prefix(&self) -> Vec> { 236 | vec![Key::Ref(self.as_bytes())] 237 | } 238 | } 239 | 240 | impl PrimaryKey<'_> for Vec { 241 | type Prefix = (); 242 | type SubPrefix = (); 243 | type Suffix = Self; 244 | type SuperSuffix = Self; 245 | 246 | fn key(&self) -> Vec> { 247 | vec![Key::Ref(self)] 248 | } 249 | } 250 | 251 | impl Prefixer<'_> for Vec { 252 | fn prefix(&self) -> Vec> { 253 | vec![Key::Ref(self.as_ref())] 254 | } 255 | } 256 | 257 | impl PrimaryKey<'_> for String { 258 | type Prefix = (); 259 | type SubPrefix = (); 260 | type Suffix = Self; 261 | type SuperSuffix = Self; 262 | 263 | fn key(&self) -> Vec> { 264 | vec![Key::Ref(self.as_bytes())] 265 | } 266 | } 267 | 268 | impl Prefixer<'_> for String { 269 | fn prefix(&self) -> Vec> { 270 | vec![Key::Ref(self.as_bytes())] 271 | } 272 | } 273 | 274 | /// owned variant. 275 | impl PrimaryKey<'_> for Addr { 276 | type Prefix = (); 277 | type SubPrefix = (); 278 | type Suffix = Self; 279 | type SuperSuffix = Self; 280 | 281 | fn key(&self) -> Vec> { 282 | // this is simple, we don't add more prefixes 283 | vec![Key::Ref(self.as_bytes())] 284 | } 285 | } 286 | 287 | impl Prefixer<'_> for Addr { 288 | fn prefix(&self) -> Vec> { 289 | vec![Key::Ref(self.as_bytes())] 290 | } 291 | } 292 | 293 | macro_rules! integer_key { 294 | (for $($t:ty, $v:tt),+) => { 295 | $(impl<'a> PrimaryKey<'a> for $t { 296 | type Prefix = (); 297 | type SubPrefix = (); 298 | type Suffix = Self; 299 | type SuperSuffix = Self; 300 | 301 | fn key(&self) -> Vec> { 302 | vec![Key::$v(self.to_cw_bytes())] 303 | } 304 | })* 305 | } 306 | } 307 | 308 | integer_key!(for i8, Val8, u8, Val8, i16, Val16, u16, Val16, i32, Val32, u32, Val32, i64, Val64, u64, Val64, i128, Val128, u128, Val128, Uint64, Val64, Uint128, Val128, Int64, Val64, Int128, Val128); 309 | macro_rules! integer_prefix { 310 | (for $($t:ty, $v:tt),+) => { 311 | $(impl<'a> Prefixer<'a> for $t { 312 | fn prefix(&self) -> Vec> { 313 | vec![Key::$v(self.to_cw_bytes())] 314 | } 315 | })* 316 | } 317 | } 318 | 319 | integer_prefix!(for i8, Val8, u8, Val8, i16, Val16, u16, Val16, i32, Val32, u32, Val32, i64, Val64, u64, Val64, i128, Val128, u128, Val128, Uint64, Val64, Uint128, Val128, Int64, Val64, Int128, Val128); 320 | 321 | #[cfg(test)] 322 | mod test { 323 | use cosmwasm_std::{Uint256, Uint512}; 324 | 325 | use super::*; 326 | 327 | #[test] 328 | fn naked_8key_works() { 329 | let k: u8 = 42u8; 330 | let path = k.key(); 331 | assert_eq!(1, path.len()); 332 | assert_eq!(42u8.to_cw_bytes(), path[0].as_ref()); 333 | 334 | let k: i8 = 42i8; 335 | let path = k.key(); 336 | assert_eq!(1, path.len()); 337 | assert_eq!(42i8.to_cw_bytes(), path[0].as_ref()); 338 | } 339 | 340 | #[test] 341 | fn naked_16key_works() { 342 | let k: u16 = 4242u16; 343 | let path = k.key(); 344 | assert_eq!(1, path.len()); 345 | assert_eq!(4242u16.to_cw_bytes(), path[0].as_ref()); 346 | 347 | let k: i16 = 4242i16; 348 | let path = k.key(); 349 | assert_eq!(1, path.len()); 350 | assert_eq!(4242i16.to_cw_bytes(), path[0].as_ref()); 351 | } 352 | 353 | #[test] 354 | fn naked_32key_works() { 355 | let k: u32 = 4242u32; 356 | let path = k.key(); 357 | assert_eq!(1, path.len()); 358 | assert_eq!(4242u32.to_cw_bytes(), path[0].as_ref()); 359 | 360 | let k: i32 = 4242i32; 361 | let path = k.key(); 362 | assert_eq!(1, path.len()); 363 | assert_eq!(4242i32.to_cw_bytes(), path[0].as_ref()); 364 | } 365 | 366 | #[test] 367 | fn naked_64key_works() { 368 | let k: u64 = 4242u64; 369 | let path = k.key(); 370 | assert_eq!(1, path.len()); 371 | assert_eq!(4242u64.to_cw_bytes(), path[0].as_ref()); 372 | 373 | let k: i64 = 4242i64; 374 | let path = k.key(); 375 | assert_eq!(1, path.len()); 376 | assert_eq!(4242i64.to_cw_bytes(), path[0].as_ref()); 377 | } 378 | 379 | #[test] 380 | fn naked_128key_works() { 381 | let k: u128 = 4242u128; 382 | let path = k.key(); 383 | assert_eq!(1, path.len()); 384 | assert_eq!(4242u128.to_cw_bytes(), path[0].as_ref()); 385 | 386 | let k: i128 = 4242i128; 387 | let path = k.key(); 388 | assert_eq!(1, path.len()); 389 | assert_eq!(4242i128.to_cw_bytes(), path[0].as_ref()); 390 | } 391 | 392 | #[test] 393 | fn std_uint64_key_works() { 394 | let k: Uint64 = Uint64::from(4242u64); 395 | let path = k.key(); 396 | assert_eq!(1, path.len()); 397 | assert_eq!(4242u64.to_cw_bytes(), path[0].as_ref()); 398 | } 399 | 400 | #[test] 401 | fn std_uint128_key_works() { 402 | let k: Uint128 = Uint128::from(4242u128); 403 | let path = k.key(); 404 | assert_eq!(1, path.len()); 405 | assert_eq!(4242u128.to_cw_bytes(), path[0].as_ref()); 406 | } 407 | 408 | #[test] 409 | fn std_int64_key_works() { 410 | let k: Int64 = Int64::from(-4242i64); 411 | let path = k.key(); 412 | assert_eq!(1, path.len()); 413 | assert_eq!((-4242i64).to_cw_bytes(), path[0].as_ref()); 414 | } 415 | 416 | #[test] 417 | fn std_int128_key_works() { 418 | let k: Int128 = Int128::from(-4242i128); 419 | let path = k.key(); 420 | assert_eq!(1, path.len()); 421 | assert_eq!((-4242i128).to_cw_bytes(), path[0].as_ref()); 422 | } 423 | 424 | #[test] 425 | fn str_key_works() { 426 | type K<'a> = &'a str; 427 | 428 | let k: K = "hello"; 429 | let path = k.key(); 430 | assert_eq!(1, path.len()); 431 | assert_eq!(b"hello", path[0].as_ref()); 432 | 433 | let joined = k.joined_key(); 434 | assert_eq!(joined, b"hello") 435 | } 436 | 437 | #[test] 438 | fn string_key_works() { 439 | type K = String; 440 | 441 | let k: K = "hello".to_string(); 442 | let path = k.key(); 443 | assert_eq!(1, path.len()); 444 | assert_eq!(b"hello", path[0].as_ref()); 445 | 446 | let joined = k.joined_key(); 447 | assert_eq!(joined, b"hello") 448 | } 449 | 450 | #[test] 451 | fn fixed_size_bytes_key_works() { 452 | type K = [u8; 32]; 453 | 454 | let k: K = Uint256::MAX.to_be_bytes(); 455 | let path = k.key(); 456 | assert_eq!(1, path.len()); 457 | assert_eq!(k, path[0].as_ref()); 458 | 459 | let joined = k.joined_key(); 460 | assert_eq!(joined, k); 461 | 462 | // ref also works 463 | type K2<'a> = &'a [u8; 64]; 464 | 465 | let k: K2 = &Uint512::MAX.to_be_bytes(); 466 | let path = k.key(); 467 | assert_eq!(1, path.len()); 468 | assert_eq!(k, path[0].as_ref()); 469 | 470 | let joined = k.joined_key(); 471 | assert_eq!(joined, k); 472 | } 473 | 474 | #[test] 475 | fn nested_str_key_works() { 476 | type K<'a> = (&'a str, &'a [u8]); 477 | 478 | let k: K = ("hello", b"world"); 479 | let path = k.key(); 480 | assert_eq!(2, path.len()); 481 | assert_eq!(b"hello", path[0].as_ref()); 482 | assert_eq!(b"world", path[1].as_ref()); 483 | } 484 | 485 | #[test] 486 | fn composite_byte_key() { 487 | let k: (&[u8], &[u8]) = ("foo".as_bytes(), b"bar"); 488 | let path = k.key(); 489 | assert_eq!(2, path.len()); 490 | assert_eq!(path, vec!["foo".as_bytes(), b"bar"],); 491 | } 492 | 493 | #[test] 494 | fn naked_composite_int_key() { 495 | let k: (u32, u64) = (123, 87654); 496 | let path = k.key(); 497 | assert_eq!(2, path.len()); 498 | assert_eq!(4, path[0].as_ref().len()); 499 | assert_eq!(8, path[1].as_ref().len()); 500 | assert_eq!(path[0].as_ref(), 123u32.to_cw_bytes()); 501 | assert_eq!(path[1].as_ref(), 87654u64.to_cw_bytes()); 502 | } 503 | 504 | #[test] 505 | fn nested_composite_keys() { 506 | // use this to ensure proper type-casts below 507 | let first: &[u8] = b"foo"; 508 | // this function tests how well the generics extend to "edge cases" 509 | let k: ((&[u8], &[u8]), &[u8]) = ((first, b"bar"), b"zoom"); 510 | let path = k.key(); 511 | assert_eq!(3, path.len()); 512 | assert_eq!(path, vec![first, b"bar", b"zoom"]); 513 | 514 | // ensure prefix also works 515 | let dir = k.0.prefix(); 516 | assert_eq!(2, dir.len()); 517 | assert_eq!(dir, vec![first, b"bar"]); 518 | } 519 | 520 | #[test] 521 | fn naked_8bit_prefixes() { 522 | let pair: (u8, &[u8]) = (123, b"random"); 523 | let one: Vec = vec![123]; 524 | let two: Vec = b"random".to_vec(); 525 | assert_eq!(pair.prefix(), vec![one.as_slice(), two.as_slice()]); 526 | 527 | let pair: (i8, &[u8]) = (123, b"random"); 528 | // Signed int keys are "sign-flipped" 529 | let one: Vec = vec![123 ^ 0x80]; 530 | let two: Vec = b"random".to_vec(); 531 | assert_eq!(pair.prefix(), vec![one.as_slice(), two.as_slice()]); 532 | } 533 | 534 | #[test] 535 | fn naked_16bit_prefixes() { 536 | let pair: (u16, &[u8]) = (12345, b"random"); 537 | let one: Vec = vec![48, 57]; 538 | let two: Vec = b"random".to_vec(); 539 | assert_eq!(pair.prefix(), vec![one.as_slice(), two.as_slice()]); 540 | 541 | let pair: (i16, &[u8]) = (12345, b"random"); 542 | // Signed int keys are "sign-flipped" 543 | let one: Vec = vec![48 ^ 0x80, 57]; 544 | let two: Vec = b"random".to_vec(); 545 | assert_eq!(pair.prefix(), vec![one.as_slice(), two.as_slice()]); 546 | } 547 | 548 | #[test] 549 | fn naked_64bit_prefixes() { 550 | let pair: (u64, &[u8]) = (12345, b"random"); 551 | let one: Vec = vec![0, 0, 0, 0, 0, 0, 48, 57]; 552 | let two: Vec = b"random".to_vec(); 553 | assert_eq!(pair.prefix(), vec![one.as_slice(), two.as_slice()]); 554 | 555 | let pair: (i64, &[u8]) = (12345, b"random"); 556 | // Signed int keys are "sign-flipped" 557 | #[allow(clippy::identity_op)] 558 | let one: Vec = vec![0 ^ 0x80, 0, 0, 0, 0, 0, 48, 57]; 559 | let two: Vec = b"random".to_vec(); 560 | assert_eq!(pair.prefix(), vec![one.as_slice(), two.as_slice()]); 561 | } 562 | 563 | #[test] 564 | fn naked_proper_prefixes() { 565 | let pair: (u32, &[u8]) = (12345, b"random"); 566 | let one: Vec = vec![0, 0, 48, 57]; 567 | let two: Vec = b"random".to_vec(); 568 | assert_eq!(pair.prefix(), vec![one.as_slice(), two.as_slice()]); 569 | 570 | let triple: (&str, u32, &[u8]) = ("begin", 12345, b"end"); 571 | let one: Vec = b"begin".to_vec(); 572 | let two: Vec = vec![0, 0, 48, 57]; 573 | let three: Vec = b"end".to_vec(); 574 | assert_eq!( 575 | triple.prefix(), 576 | vec![one.as_slice(), two.as_slice(), three.as_slice()] 577 | ); 578 | 579 | // same works with owned variants (&str -> String, &[u8] -> Vec) 580 | let owned_triple: (String, u32, Vec) = ("begin".to_string(), 12345, b"end".to_vec()); 581 | assert_eq!( 582 | owned_triple.prefix(), 583 | vec![one.as_slice(), two.as_slice(), three.as_slice()] 584 | ); 585 | } 586 | } 587 | -------------------------------------------------------------------------------- /src/prefix.rs: -------------------------------------------------------------------------------- 1 | #![cfg(feature = "iterator")] 2 | use core::fmt; 3 | use cosmwasm_std::storage_keys::to_length_prefixed_nested; 4 | use serde::de::DeserializeOwned; 5 | use serde::Serialize; 6 | use std::fmt::Debug; 7 | use std::marker::PhantomData; 8 | 9 | use cosmwasm_std::{Order, Record, StdResult, Storage}; 10 | use std::ops::Deref; 11 | 12 | use crate::bound::{PrefixBound, RawBound}; 13 | use crate::de::KeyDeserialize; 14 | use crate::iter_helpers::{concat, deserialize_kv, deserialize_v, trim}; 15 | use crate::keys::Key; 16 | use crate::{Bound, Prefixer, PrimaryKey}; 17 | 18 | #[derive(Clone)] 19 | pub struct Prefix> 20 | where 21 | K: KeyDeserialize, 22 | T: Serialize + DeserializeOwned, 23 | { 24 | /// all namespaces prefixes and concatenated with the key 25 | pub(crate) storage_prefix: Vec, 26 | // see https://doc.rust-lang.org/std/marker/struct.PhantomData.html#unused-type-parameters for why this is needed 27 | pub(crate) data: PhantomData<(T, K, B)>, 28 | } 29 | 30 | impl Debug for Prefix 31 | where 32 | K: KeyDeserialize, 33 | T: Serialize + DeserializeOwned, 34 | { 35 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 36 | f.debug_struct("Prefix") 37 | .field("storage_prefix", &self.storage_prefix) 38 | .finish_non_exhaustive() 39 | } 40 | } 41 | 42 | impl Deref for Prefix 43 | where 44 | K: KeyDeserialize, 45 | T: Serialize + DeserializeOwned, 46 | { 47 | type Target = [u8]; 48 | 49 | fn deref(&self) -> &[u8] { 50 | &self.storage_prefix 51 | } 52 | } 53 | 54 | impl Prefix 55 | where 56 | K: KeyDeserialize, 57 | T: Serialize + DeserializeOwned, 58 | { 59 | pub fn new(top_name: &[u8], sub_names: &[Key]) -> Self { 60 | let calculated_len = 1 + sub_names.len(); 61 | let mut combined: Vec<&[u8]> = Vec::with_capacity(calculated_len); 62 | combined.push(top_name); 63 | combined.extend(sub_names.iter().map(|sub_name| sub_name.as_ref())); 64 | debug_assert_eq!(calculated_len, combined.len()); // as long as we calculate correctly, we don't need to reallocate 65 | let storage_prefix = to_length_prefixed_nested(&combined); 66 | Prefix { 67 | storage_prefix, 68 | data: PhantomData, 69 | } 70 | } 71 | } 72 | 73 | impl<'b, K, T, B> Prefix 74 | where 75 | B: PrimaryKey<'b>, 76 | K: KeyDeserialize, 77 | T: Serialize + DeserializeOwned, 78 | { 79 | pub fn range_raw<'a>( 80 | &self, 81 | store: &'a dyn Storage, 82 | min: Option>, 83 | max: Option>, 84 | order: Order, 85 | ) -> Box>> + 'a> 86 | where 87 | T: 'a, 88 | { 89 | let mapped = range_with_prefix( 90 | store, 91 | &self.storage_prefix, 92 | min.map(|b| b.to_raw_bound()), 93 | max.map(|b| b.to_raw_bound()), 94 | order, 95 | ) 96 | .map(deserialize_v); 97 | Box::new(mapped) 98 | } 99 | 100 | pub fn keys_raw<'a>( 101 | &self, 102 | store: &'a dyn Storage, 103 | min: Option>, 104 | max: Option>, 105 | order: Order, 106 | ) -> Box> + 'a> { 107 | keys_with_prefix( 108 | store, 109 | &self.storage_prefix, 110 | min.map(|b| b.to_raw_bound()), 111 | max.map(|b| b.to_raw_bound()), 112 | order, 113 | ) 114 | } 115 | 116 | /// Clears the prefix, removing the first `limit` elements (or all if `limit == None`). 117 | pub fn clear(&self, store: &mut dyn Storage, limit: Option) { 118 | const TAKE: usize = 10; 119 | let mut cleared = false; 120 | 121 | let mut left_to_clear = limit.unwrap_or(usize::MAX); 122 | 123 | while !cleared { 124 | // Take just TAKE elements to prevent possible heap overflow if the prefix is big, 125 | // but don't take more than we want to clear. 126 | let take = TAKE.min(left_to_clear); 127 | 128 | let paths = keys_full(store, &self.storage_prefix, None, None, Order::Ascending) 129 | .take(take) 130 | .collect::>(); 131 | 132 | for path in &paths { 133 | store.remove(path); 134 | } 135 | left_to_clear -= paths.len(); 136 | 137 | cleared = paths.len() < take || left_to_clear == 0; 138 | } 139 | } 140 | 141 | /// Returns `true` if the prefix is empty. 142 | pub fn is_empty(&self, store: &dyn Storage) -> bool { 143 | keys_full(store, &self.storage_prefix, None, None, Order::Ascending) 144 | .next() 145 | .is_none() 146 | } 147 | 148 | pub fn range<'a>( 149 | &self, 150 | store: &'a dyn Storage, 151 | min: Option>, 152 | max: Option>, 153 | order: Order, 154 | ) -> Box> + 'a> 155 | where 156 | T: 'a, 157 | K::Output: 'static, 158 | { 159 | let mapped = range_with_prefix( 160 | store, 161 | &self.storage_prefix, 162 | min.map(|b| b.to_raw_bound()), 163 | max.map(|b| b.to_raw_bound()), 164 | order, 165 | ) 166 | .map(|kv| deserialize_kv::(kv)); 167 | Box::new(mapped) 168 | } 169 | 170 | pub fn keys<'a>( 171 | &self, 172 | store: &'a dyn Storage, 173 | min: Option>, 174 | max: Option>, 175 | order: Order, 176 | ) -> Box> + 'a> 177 | where 178 | T: 'a, 179 | K::Output: 'static, 180 | { 181 | let mapped = keys_with_prefix( 182 | store, 183 | &self.storage_prefix, 184 | min.map(|b| b.to_raw_bound()), 185 | max.map(|b| b.to_raw_bound()), 186 | order, 187 | ) 188 | .map(|k| K::from_vec(k)); 189 | Box::new(mapped) 190 | } 191 | } 192 | 193 | /// Returns an iterator through all records in storage with the given prefix and 194 | /// within the given bounds, yielding the key without prefix and value. 195 | pub fn range_with_prefix<'a>( 196 | storage: &'a dyn Storage, 197 | namespace: &[u8], 198 | start: Option, 199 | end: Option, 200 | order: Order, 201 | ) -> Box + 'a> { 202 | // make a copy for the closure to handle lifetimes safely 203 | let prefix = namespace.to_vec(); 204 | let mapped = 205 | range_full(storage, namespace, start, end, order).map(move |(k, v)| (trim(&prefix, &k), v)); 206 | Box::new(mapped) 207 | } 208 | 209 | /// Returns an iterator through all keys in storage with the given prefix and 210 | /// within the given bounds, yielding the key without the prefix. 211 | pub fn keys_with_prefix<'a>( 212 | storage: &'a dyn Storage, 213 | namespace: &[u8], 214 | start: Option, 215 | end: Option, 216 | order: Order, 217 | ) -> Box> + 'a> { 218 | // make a copy for the closure to handle lifetimes safely 219 | let prefix = namespace.to_vec(); 220 | let mapped = keys_full(storage, namespace, start, end, order).map(move |k| trim(&prefix, &k)); 221 | Box::new(mapped) 222 | } 223 | 224 | /// Returns an iterator through all records in storage within the given bounds, 225 | /// yielding the full key (including the prefix) and value. 226 | pub(crate) fn range_full<'a>( 227 | store: &'a dyn Storage, 228 | namespace: &[u8], 229 | start: Option, 230 | end: Option, 231 | order: Order, 232 | ) -> impl Iterator + 'a { 233 | let start = calc_start_bound(namespace, start); 234 | let end = calc_end_bound(namespace, end); 235 | 236 | // get iterator from storage 237 | store.range(Some(&start), Some(&end), order) 238 | } 239 | 240 | /// Returns an iterator through all keys in storage within the given bounds, 241 | /// yielding the full key including the prefix. 242 | pub(crate) fn keys_full<'a>( 243 | store: &'a dyn Storage, 244 | namespace: &[u8], 245 | start: Option, 246 | end: Option, 247 | order: Order, 248 | ) -> impl Iterator> + 'a { 249 | let start = calc_start_bound(namespace, start); 250 | let end = calc_end_bound(namespace, end); 251 | 252 | // get iterator from storage 253 | store.range_keys(Some(&start), Some(&end), order) 254 | } 255 | 256 | fn calc_start_bound(namespace: &[u8], bound: Option) -> Vec { 257 | match bound { 258 | None => namespace.to_vec(), 259 | // this is the natural limits of the underlying Storage 260 | Some(RawBound::Inclusive(limit)) => concat(namespace, &limit), 261 | Some(RawBound::Exclusive(limit)) => concat(namespace, &extend_one_byte(&limit)), 262 | } 263 | } 264 | 265 | fn calc_end_bound(namespace: &[u8], bound: Option) -> Vec { 266 | match bound { 267 | None => increment_last_byte(namespace), 268 | // this is the natural limits of the underlying Storage 269 | Some(RawBound::Exclusive(limit)) => concat(namespace, &limit), 270 | Some(RawBound::Inclusive(limit)) => concat(namespace, &extend_one_byte(&limit)), 271 | } 272 | } 273 | 274 | pub fn namespaced_prefix_range<'a, 'c, K: Prefixer<'a>>( 275 | storage: &'c dyn Storage, 276 | namespace: &[u8], 277 | start: Option>, 278 | end: Option>, 279 | order: Order, 280 | ) -> Box + 'c> { 281 | let prefix = to_length_prefixed_nested(&[namespace]); 282 | let start = calc_prefix_start_bound(&prefix, start); 283 | let end = calc_prefix_end_bound(&prefix, end); 284 | 285 | // get iterator from storage 286 | let base_iterator = storage.range(Some(&start), Some(&end), order); 287 | 288 | // make a copy for the closure to handle lifetimes safely 289 | let mapped = base_iterator.map(move |(k, v)| (trim(&prefix, &k), v)); 290 | Box::new(mapped) 291 | } 292 | 293 | fn calc_prefix_start_bound<'a, K: Prefixer<'a>>( 294 | namespace: &[u8], 295 | bound: Option>, 296 | ) -> Vec { 297 | match bound.map(|b| b.to_raw_bound()) { 298 | None => namespace.to_vec(), 299 | // this is the natural limits of the underlying Storage 300 | Some(RawBound::Inclusive(limit)) => concat(namespace, &limit), 301 | Some(RawBound::Exclusive(limit)) => concat(namespace, &increment_last_byte(&limit)), 302 | } 303 | } 304 | 305 | fn calc_prefix_end_bound<'a, K: Prefixer<'a>>( 306 | namespace: &[u8], 307 | bound: Option>, 308 | ) -> Vec { 309 | match bound.map(|b| b.to_raw_bound()) { 310 | None => increment_last_byte(namespace), 311 | // this is the natural limits of the underlying Storage 312 | Some(RawBound::Exclusive(limit)) => concat(namespace, &limit), 313 | Some(RawBound::Inclusive(limit)) => concat(namespace, &increment_last_byte(&limit)), 314 | } 315 | } 316 | 317 | pub(crate) fn extend_one_byte(limit: &[u8]) -> Vec { 318 | let mut v = limit.to_vec(); 319 | v.push(0); 320 | v 321 | } 322 | 323 | /// Returns a new vec of same length and last byte incremented by one 324 | /// If last bytes are 255, we handle overflow up the chain. 325 | /// If all bytes are 255, this returns wrong data - but that is never possible as a namespace 326 | fn increment_last_byte(input: &[u8]) -> Vec { 327 | let mut copy = input.to_vec(); 328 | // zero out all trailing 255, increment first that is not such 329 | for i in (0..input.len()).rev() { 330 | if copy[i] == 255 { 331 | copy[i] = 0; 332 | } else { 333 | copy[i] += 1; 334 | break; 335 | } 336 | } 337 | copy 338 | } 339 | 340 | #[cfg(test)] 341 | mod test { 342 | use super::*; 343 | use cosmwasm_std::testing::MockStorage; 344 | 345 | #[test] 346 | fn ensure_proper_range_bounds() { 347 | let mut store = MockStorage::new(); 348 | // manually create this - not testing nested prefixes here 349 | let prefix: Prefix, u64> = Prefix { 350 | storage_prefix: b"foo".to_vec(), 351 | data: PhantomData, 352 | }; 353 | 354 | // set some data, we care about "foo" prefix 355 | store.set(b"foobar", b"1"); 356 | store.set(b"foora", b"2"); 357 | store.set(b"foozi", b"3"); 358 | // these shouldn't match 359 | store.set(b"foply", b"100"); 360 | store.set(b"font", b"200"); 361 | 362 | let expected = vec![ 363 | (b"bar".to_vec(), 1u64), 364 | (b"ra".to_vec(), 2u64), 365 | (b"zi".to_vec(), 3u64), 366 | ]; 367 | let expected_reversed: Vec<(Vec, u64)> = expected.iter().rev().cloned().collect(); 368 | 369 | // let's do the basic sanity check 370 | let res: StdResult> = prefix 371 | .range_raw(&store, None, None, Order::Ascending) 372 | .collect(); 373 | assert_eq!(&expected, &res.unwrap()); 374 | let res: StdResult> = prefix 375 | .range_raw(&store, None, None, Order::Descending) 376 | .collect(); 377 | assert_eq!(&expected_reversed, &res.unwrap()); 378 | 379 | // now let's check some ascending ranges 380 | let res: StdResult> = prefix 381 | .range_raw( 382 | &store, 383 | Some(Bound::inclusive(b"ra".to_vec())), 384 | None, 385 | Order::Ascending, 386 | ) 387 | .collect(); 388 | assert_eq!(&expected[1..], res.unwrap().as_slice()); 389 | // skip excluded 390 | let res: StdResult> = prefix 391 | .range_raw( 392 | &store, 393 | Some(Bound::exclusive(b"ra".to_vec())), 394 | None, 395 | Order::Ascending, 396 | ) 397 | .collect(); 398 | assert_eq!(&expected[2..], res.unwrap().as_slice()); 399 | // if we exclude something a little lower, we get matched 400 | let res: StdResult> = prefix 401 | .range_raw( 402 | &store, 403 | Some(Bound::exclusive(b"r".to_vec())), 404 | None, 405 | Order::Ascending, 406 | ) 407 | .collect(); 408 | assert_eq!(&expected[1..], res.unwrap().as_slice()); 409 | 410 | // now let's check some descending ranges 411 | let res: StdResult> = prefix 412 | .range_raw( 413 | &store, 414 | None, 415 | Some(Bound::inclusive(b"ra".to_vec())), 416 | Order::Descending, 417 | ) 418 | .collect(); 419 | assert_eq!(&expected_reversed[1..], res.unwrap().as_slice()); 420 | // skip excluded 421 | let res: StdResult> = prefix 422 | .range_raw( 423 | &store, 424 | None, 425 | Some(Bound::exclusive(b"ra".to_vec())), 426 | Order::Descending, 427 | ) 428 | .collect(); 429 | assert_eq!(&expected_reversed[2..], res.unwrap().as_slice()); 430 | // if we exclude something a little higher, we get matched 431 | let res: StdResult> = prefix 432 | .range_raw( 433 | &store, 434 | None, 435 | Some(Bound::exclusive(b"rb".to_vec())), 436 | Order::Descending, 437 | ) 438 | .collect(); 439 | assert_eq!(&expected_reversed[1..], res.unwrap().as_slice()); 440 | 441 | // now test when both sides are set 442 | let res: StdResult> = prefix 443 | .range_raw( 444 | &store, 445 | Some(Bound::inclusive(b"ra".to_vec())), 446 | Some(Bound::exclusive(b"zi".to_vec())), 447 | Order::Ascending, 448 | ) 449 | .collect(); 450 | assert_eq!(&expected[1..2], res.unwrap().as_slice()); 451 | // and descending 452 | let res: StdResult> = prefix 453 | .range_raw( 454 | &store, 455 | Some(Bound::inclusive(b"ra".to_vec())), 456 | Some(Bound::exclusive(b"zi".to_vec())), 457 | Order::Descending, 458 | ) 459 | .collect(); 460 | assert_eq!(&expected[1..2], res.unwrap().as_slice()); 461 | // Include both sides 462 | let res: StdResult> = prefix 463 | .range_raw( 464 | &store, 465 | Some(Bound::inclusive(b"ra".to_vec())), 466 | Some(Bound::inclusive(b"zi".to_vec())), 467 | Order::Descending, 468 | ) 469 | .collect(); 470 | assert_eq!(&expected_reversed[..2], res.unwrap().as_slice()); 471 | // Exclude both sides 472 | let res: StdResult> = prefix 473 | .range_raw( 474 | &store, 475 | Some(Bound::exclusive(b"ra".to_vec())), 476 | Some(Bound::exclusive(b"zi".to_vec())), 477 | Order::Ascending, 478 | ) 479 | .collect(); 480 | assert_eq!(res.unwrap().as_slice(), &[]); 481 | } 482 | 483 | #[test] 484 | fn prefix_debug() { 485 | let prefix: Prefix = Prefix::new(b"lol", &[Key::Val8([8; 1])]); 486 | assert_eq!( 487 | format!("{prefix:?}"), 488 | "Prefix { storage_prefix: [0, 3, 108, 111, 108, 0, 1, 8], .. }" 489 | ); 490 | } 491 | 492 | #[test] 493 | fn prefix_clear_limited() { 494 | let mut store = MockStorage::new(); 495 | // manually create this - not testing nested prefixes here 496 | let prefix: Prefix, u64> = Prefix { 497 | storage_prefix: b"foo".to_vec(), 498 | data: PhantomData, 499 | }; 500 | 501 | // set some data, we care about "foo" prefix 502 | for i in 0..100u32 { 503 | store.set(format!("foo{i}").as_bytes(), b"1"); 504 | } 505 | 506 | // clearing less than `TAKE` should work 507 | prefix.clear(&mut store, Some(1)); 508 | assert_eq!( 509 | prefix.range(&store, None, None, Order::Ascending).count(), 510 | 99 511 | ); 512 | 513 | // clearing more than `TAKE` should work 514 | prefix.clear(&mut store, Some(12)); 515 | assert_eq!( 516 | prefix.range(&store, None, None, Order::Ascending).count(), 517 | 99 - 12 518 | ); 519 | 520 | // clearing an exact multiple of `TAKE` should work 521 | prefix.clear(&mut store, Some(20)); 522 | assert_eq!( 523 | prefix.range(&store, None, None, Order::Ascending).count(), 524 | 99 - 12 - 20 525 | ); 526 | 527 | // clearing more than available should work 528 | prefix.clear(&mut store, Some(1000)); 529 | assert_eq!( 530 | prefix.range(&store, None, None, Order::Ascending).count(), 531 | 0 532 | ); 533 | } 534 | 535 | #[test] 536 | fn prefix_clear_unlimited() { 537 | let mut store = MockStorage::new(); 538 | // manually create this - not testing nested prefixes here 539 | let prefix: Prefix, u64> = Prefix { 540 | storage_prefix: b"foo".to_vec(), 541 | data: PhantomData, 542 | }; 543 | 544 | // set some data, we care about "foo" prefix 545 | for i in 0..1000u32 { 546 | store.set(format!("foo{i}").as_bytes(), b"1"); 547 | } 548 | 549 | // clearing all should work 550 | prefix.clear(&mut store, None); 551 | assert_eq!( 552 | prefix.range(&store, None, None, Order::Ascending).count(), 553 | 0 554 | ); 555 | 556 | // set less data 557 | for i in 0..5u32 { 558 | store.set(format!("foo{i}").as_bytes(), b"1"); 559 | } 560 | 561 | // clearing all should work 562 | prefix.clear(&mut store, None); 563 | assert_eq!( 564 | prefix.range(&store, None, None, Order::Ascending).count(), 565 | 0 566 | ); 567 | } 568 | 569 | #[test] 570 | fn is_empty_works() { 571 | // manually create this - not testing nested prefixes here 572 | let prefix: Prefix, u64> = Prefix { 573 | storage_prefix: b"foo".to_vec(), 574 | data: PhantomData, 575 | }; 576 | 577 | let mut storage = MockStorage::new(); 578 | 579 | assert!(prefix.is_empty(&storage)); 580 | 581 | storage.set(b"fookey1", b"1"); 582 | storage.set(b"fookey2", b"2"); 583 | 584 | assert!(!prefix.is_empty(&storage)); 585 | } 586 | 587 | #[test] 588 | fn keys_raw_works() { 589 | // manually create this - not testing nested prefixes here 590 | let prefix: Prefix, u64> = Prefix { 591 | storage_prefix: b"foo".to_vec(), 592 | data: PhantomData, 593 | }; 594 | 595 | let mut storage = MockStorage::new(); 596 | storage.set(b"fookey1", b"1"); 597 | storage.set(b"fookey2", b"2"); 598 | 599 | let keys: Vec<_> = prefix 600 | .keys_raw(&storage, None, None, Order::Ascending) 601 | .collect(); 602 | assert_eq!(keys, vec![b"key1", b"key2"]); 603 | 604 | let keys: Vec<_> = prefix 605 | .keys_raw( 606 | &storage, 607 | Some(Bound::exclusive("key1")), 608 | None, 609 | Order::Ascending, 610 | ) 611 | .collect(); 612 | assert_eq!(keys, vec![b"key2"]); 613 | } 614 | } 615 | --------------------------------------------------------------------------------