├── .gitignore ├── Cargo.lock ├── Cargo.toml ├── README.md ├── asb-authdb ├── Cargo.toml ├── authdb-trait │ ├── Cargo.toml │ └── src │ │ └── lib.rs ├── blake2-hasher │ ├── Cargo.toml │ └── src │ │ └── lib.rs ├── lvmt-db │ ├── Cargo.toml │ ├── benches │ │ └── bench.rs │ ├── lvmt-serde-derive │ │ ├── Cargo.toml │ │ └── src │ │ │ └── lib.rs │ ├── pp │ │ └── power-tau-ho1sTw-06.bin │ ├── ppot2ark │ │ ├── Cargo.lock │ │ ├── Cargo.toml │ │ └── src │ │ │ ├── adapter.rs │ │ │ └── lib.rs │ └── src │ │ ├── amt │ │ ├── mod.rs │ │ ├── node.rs │ │ ├── test.rs │ │ ├── tree.rs │ │ └── write_guard.rs │ │ ├── bin │ │ └── bn254pp_from_ppot.rs │ │ ├── crypto │ │ ├── error.rs │ │ ├── export.rs │ │ ├── mod.rs │ │ ├── power_tau.rs │ │ ├── prove_params.rs │ │ └── utils.rs │ │ ├── enable_log.rs │ │ ├── lib.rs │ │ ├── lvmt_db.rs │ │ ├── merkle │ │ └── mod.rs │ │ ├── multi_layer_amt │ │ ├── key.rs │ │ ├── mod.rs │ │ ├── name.rs │ │ ├── node.rs │ │ └── tree.rs │ │ ├── serde │ │ ├── basic.rs │ │ ├── curves.rs │ │ ├── h256.rs │ │ └── mod.rs │ │ ├── single_amt │ │ └── mod.rs │ │ └── storage │ │ ├── access.rs │ │ ├── kvdb.rs │ │ ├── layout.rs │ │ └── mod.rs ├── parity-journaldb │ ├── Cargo.toml │ ├── README.md │ ├── fastmap │ │ ├── Cargo.toml │ │ └── src │ │ │ └── lib.rs │ ├── memory-db │ │ ├── Cargo.toml │ │ ├── README.md │ │ └── src │ │ │ └── lib.rs │ └── src │ │ ├── archivedb.rs │ │ ├── as_hash_db_impls.rs │ │ ├── earlymergedb.rs │ │ ├── hasher.rs │ │ ├── lib.rs │ │ ├── mertics.rs │ │ ├── overlaydb.rs │ │ ├── overlayrecentdb.rs │ │ ├── refcounteddb.rs │ │ ├── traits.rs │ │ └── util.rs ├── patricia-trie-ethereum │ ├── Cargo.toml │ └── src │ │ ├── lib.rs │ │ └── rlp_node_codec.rs ├── rainblock-trie │ ├── Cargo.toml │ └── src │ │ ├── child_ref.rs │ │ ├── lib.rs │ │ ├── nibble.rs │ │ ├── rain_mpt.rs │ │ ├── tests.rs │ │ ├── thread_non_safe.rs │ │ ├── thread_safe.rs │ │ ├── trie_node.rs │ │ └── trie_node_ext.rs └── src │ ├── amt.rs │ ├── lib.rs │ ├── lmpts.rs │ ├── lvmt.rs │ ├── mpt.rs │ ├── rain_mpt.rs │ └── raw.rs ├── asb-backend ├── Cargo.toml ├── cfx-kvdb-rocksdb │ ├── Cargo.toml │ └── src │ │ ├── lib.rs │ │ └── stats.rs ├── parity-stats │ ├── Cargo.toml │ └── src │ │ └── lib.rs └── src │ ├── cfx_kvdb_rocksdb.rs │ ├── db_with_mertics.rs │ ├── in_mem_with_metrics.rs │ ├── lib.rs │ ├── mdbx.rs │ └── parity_kvdb_rocksdb.rs ├── asb-options ├── Cargo.toml └── src │ └── lib.rs ├── asb-profile ├── Cargo.toml └── src │ ├── counter.rs │ ├── lib.rs │ └── profiler.rs ├── asb-tasks ├── Cargo.toml └── src │ ├── lib.rs │ ├── read_then_write.rs │ └── real_trace.rs ├── benchmarks ├── Cargo.toml └── src │ ├── main.rs │ └── run.rs ├── run.py └── rust-toolchain /.gitignore: -------------------------------------------------------------------------------- 1 | /paper_experiment 2 | /target 3 | /lvmt-db/pp 4 | /pp 5 | /trace 6 | /thesis 7 | /results 8 | /warmup 9 | flamegraph.svg 10 | *.pdf 11 | *.log 12 | .vscode 13 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | 3 | members = [ 4 | "benchmarks", 5 | "asb-backend", 6 | "asb-options", 7 | "asb-profile", 8 | "asb-authdb", 9 | "asb-tasks", 10 | "asb-authdb/lvmt-db/ppot2ark" 11 | ] 12 | default-members = ["benchmarks"] 13 | 14 | [workspace.dependencies] 15 | ################ 16 | # ASB modules 17 | ################ 18 | asb-authdb = { path = "asb-authdb" } 19 | asb-options = { path = "asb-options" } 20 | asb-backend = { path = "asb-backend" } 21 | asb-profile = { path = "asb-profile" } 22 | asb-tasks = { path = "asb-tasks" } 23 | 24 | ####################### 25 | # Key-value Database 26 | ####################### 27 | 28 | # Interfaces 29 | kvdb = "0.4" 30 | kvdb07 = { package="kvdb",version="0.7" } 31 | 32 | # Backend Implementations 33 | kvdb-memorydb = "0.4.0" 34 | libmdbx = "0.1.12" 35 | 36 | # Utils 37 | malloc_size_of = { git = "https://github.com/Conflux-Chain/conflux-rust.git", rev = "9de2cc9"} 38 | malloc_size_of_derive = { git = "https://github.com/Conflux-Chain/conflux-rust.git", rev = "9de2cc9"} 39 | parity-util-mem = "0.5.2" 40 | stats = { path = "asb-backend/parity-stats", package = "parity-stats" } 41 | 42 | 43 | ########################### 44 | # Authenticated Database 45 | ########################### 46 | 47 | # Interface 48 | authdb = { path = "asb-authdb/authdb-trait", package = "authdb-trait" } 49 | 50 | # AuthDB implementations 51 | lvmt-db = { path = "asb-authdb/lvmt-db", features = ["large_lvmt"] } 52 | parity-journaldb = { path = "asb-authdb/parity-journaldb" } 53 | patricia-trie-ethereum = { path = "asb-authdb/patricia-trie-ethereum" } 54 | rainblock-trie = { path = "asb-authdb/rainblock-trie" } 55 | 56 | # Utils 57 | parity-scale-codec = "1.3.5" 58 | keccak-hasher = { git="https://github.com/openethereum/openethereum.git", rev="2ae2949" } 59 | blake2-hasher = { path = "asb-authdb/blake2-hasher" } 60 | hash-db = "0.11.0" 61 | trie-db = "0.11.0" 62 | 63 | ########### 64 | # Types 65 | ########### 66 | cfx-primitives = { package ="primitives", git = "https://github.com/Conflux-Chain/conflux-rust.git", rev = "9de2cc9", features = ["test_no_account_length_check"]} 67 | cfx-types = { git = "https://github.com/Conflux-Chain/conflux-rust.git", rev = "9de2cc9"} 68 | primitive-types = "0.7.3" 69 | 70 | ########### 71 | # Others 72 | ########### 73 | lazy_static = "1.4.0" 74 | tokio = "0.2.25" 75 | -------------------------------------------------------------------------------- /asb-authdb/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "asb-authdb" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | asb-options = { workspace = true } 10 | asb-backend = { workspace = true } 11 | asb-profile = { workspace = true } 12 | authdb = { workspace = true } 13 | parity-journaldb = { workspace = true } 14 | kvdb = { workspace = true } 15 | lvmt-db = { workspace = true } 16 | patricia-trie-ethereum = { workspace = true } 17 | primitive-types = { workspace = true } 18 | hash-db = { workspace = true } 19 | trie-db = { workspace = true } 20 | parity-scale-codec = { workspace = true } 21 | cfx-primitives = { workspace = true } 22 | rainblock-trie = { workspace = true } 23 | 24 | [features] 25 | light-hash = ["parity-journaldb/light-hash", "rainblock-trie/light-hash"] 26 | lmpts = ["asb-backend/lmpts-backend"] 27 | thread-safe = ["rainblock-trie/thread-safe"] -------------------------------------------------------------------------------- /asb-authdb/authdb-trait/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "authdb-trait" 3 | version = "0.1.0" 4 | edition = "2021" 5 | authors = ["Chenxing Li "] 6 | 7 | 8 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 9 | 10 | [dependencies] 11 | kvdb = { workspace = true } -------------------------------------------------------------------------------- /asb-authdb/authdb-trait/src/lib.rs: -------------------------------------------------------------------------------- 1 | use kvdb::{DBOp, DBTransaction, KeyValueDB}; 2 | use std::sync::Arc; 3 | 4 | pub trait AuthDB { 5 | fn get(&self, key: Vec) -> Option>; 6 | fn set(&mut self, key: Vec, value: Vec); 7 | fn commit(&mut self, index: usize); 8 | 9 | fn flush_all(&mut self) {} 10 | fn backend(&self) -> Option<&dyn KeyValueDB>; 11 | } 12 | 13 | impl AuthDB for Arc { 14 | fn get(&self, key: Vec) -> Option> { 15 | KeyValueDB::get(&**self, 0, key.as_ref()) 16 | .unwrap() 17 | .map(|x| x.into_boxed_slice()) 18 | } 19 | 20 | fn set(&mut self, key: Vec, value: Vec) { 21 | self.write_buffered(DBTransaction { 22 | ops: vec![DBOp::Insert { 23 | col: 0, 24 | key: key.into(), 25 | value, 26 | }], 27 | }); 28 | } 29 | 30 | fn commit(&mut self, _index: usize) { 31 | self.flush().unwrap() 32 | } 33 | 34 | fn backend(&self) -> Option<&dyn KeyValueDB> { 35 | Some(&**self) 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /asb-authdb/blake2-hasher/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "blake2-hasher" 3 | version = "0.1.0" 4 | authors = ["Chenxing Li "] 5 | edition = "2018" 6 | 7 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 8 | 9 | [dependencies] 10 | plain_hasher = "0.2.3" 11 | blake2 = "0.10.4" 12 | ethereum-types = "0.9.2" 13 | hash-db = "0.11.0" 14 | -------------------------------------------------------------------------------- /asb-authdb/blake2-hasher/src/lib.rs: -------------------------------------------------------------------------------- 1 | extern crate blake2; 2 | extern crate ethereum_types; 3 | extern crate hash_db; 4 | extern crate plain_hasher; 5 | 6 | use blake2::{Blake2b512, Blake2s256, Digest}; 7 | use ethereum_types::H256; 8 | use hash_db::Hasher; 9 | use plain_hasher::PlainHasher; 10 | 11 | /// Concrete `Hasher` impl for the Keccak-256 hash 12 | #[derive(Default, Debug, Clone, PartialEq)] 13 | pub struct Blake2sHasher; 14 | impl Hasher for Blake2sHasher { 15 | type Out = H256; 16 | type StdHasher = PlainHasher; 17 | const LENGTH: usize = 32; 18 | fn hash(x: &[u8]) -> Self::Out { 19 | let mut hasher = Blake2s256::new(); 20 | hasher.update(x); 21 | let digest = hasher.finalize(); 22 | let mut answer = H256::zero(); 23 | answer.0[..].copy_from_slice(&digest); 24 | answer 25 | } 26 | } 27 | 28 | pub fn blake2s>(s: T) -> H256 { 29 | Blake2sHasher::hash(s.as_ref()) 30 | } 31 | 32 | /// Concrete `Hasher` impl for the Keccak-256 hash 33 | #[derive(Default, Debug, Clone, PartialEq)] 34 | pub struct Blake2bHasher; 35 | impl Hasher for Blake2bHasher { 36 | type Out = H256; 37 | type StdHasher = PlainHasher; 38 | const LENGTH: usize = 32; 39 | fn hash(x: &[u8]) -> Self::Out { 40 | let mut hasher = Blake2b512::new(); 41 | hasher.update(x); 42 | let digest = hasher.finalize(); 43 | let mut answer = H256::zero(); 44 | answer.0[..].copy_from_slice(&digest[..Self::LENGTH]); 45 | answer 46 | } 47 | } 48 | 49 | pub fn blake2b>(s: T) -> H256 { 50 | Blake2bHasher::hash(s.as_ref()) 51 | } 52 | -------------------------------------------------------------------------------- /asb-authdb/lvmt-db/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "lvmt-db" 3 | version = "0.0.1" 4 | authors = ["ChenxingLi "] 5 | edition = "2018" 6 | 7 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 8 | 9 | [dependencies] 10 | ark-ec = { version="^0.2.0", features = ["std"] } 11 | ark-ff = { version="^0.2.0", features = ["std"] } 12 | ark-poly = { version="^0.2.0", features = ["std", "parallel"] } 13 | ark-serialize = { version="^0.2.0", features = ["std"] } 14 | ark-std = { version="^0.2.0" } 15 | ark-bls12-381 = { version = "^0.2.0", features = ["curve"]} 16 | ark-bn254 = { version = "^0.2.0", features = ["curve"] } 17 | ppot2ark = { path = "./ppot2ark" } 18 | 19 | ethereum-types = "0.9" 20 | keccak-hash = "0.5.1" 21 | log = "0.4" 22 | log4rs = { version = "1.0.0", features = ["background_rotation", "gzip"] } 23 | rand = "0.7" 24 | error-chain = { version = "0.12", default-features = false } 25 | base64 = "0.13.0" 26 | static_assertions = "^1.0" 27 | exitcode = "1.1.2" 28 | global = "0.3.0" 29 | integer-encoding = "3.0.2" 30 | hashbrown = "0.11.2" 31 | 32 | lvmt-serde-derive = { path="./lvmt-serde-derive" } 33 | 34 | kvdb="0.4" 35 | rayon=">=1.5" 36 | 37 | kvdb-memorydb = "0.4.0" 38 | 39 | 40 | [dev-dependencies] 41 | unroll = "*" 42 | 43 | [features] 44 | medium_lvmt = [] 45 | large_lvmt = [] 46 | huge_lvmt = [] 47 | no_cache_pow = [] 48 | 49 | [[bench]] 50 | name = "bench" 51 | path = "benches/bench.rs" -------------------------------------------------------------------------------- /asb-authdb/lvmt-db/benches/bench.rs: -------------------------------------------------------------------------------- 1 | #![feature(test)] 2 | #![cfg(test)] 3 | extern crate test; 4 | extern crate unroll; 5 | 6 | use ethereum_types::H256; 7 | use lvmt_db::crypto::export::G1Projective; 8 | use lvmt_db::crypto::{AMTParams, Pairing, TypeDepths, TypeUInt}; 9 | use lvmt_db::lvmt_db::NUM_COLS; 10 | use lvmt_db::{Key, LvmtDB, Proof}; 11 | use rand::Rng; 12 | use std::collections::HashMap; 13 | use std::sync::Arc; 14 | use test::{black_box, Bencher}; 15 | use unroll::unroll_for_loops; 16 | 17 | #[unroll_for_loops] 18 | fn mul_u64(x: &mut [u64; 1000], y: &[u64; 1000]) { 19 | for i in 0..1000 { 20 | x[i] *= y[i]; 21 | } 22 | } 23 | 24 | #[bench] 25 | fn bench_u64(b: &mut Bencher) { 26 | let mut rng = rand::thread_rng(); 27 | let mut x: [u64; 1000] = [(); 1000].map(|_| rng.gen::()); 28 | let y: [u64; 1000] = [(); 1000].map(|_| rng.gen::()); 29 | b.iter(move || mul_u64(&mut x, &y)) 30 | } 31 | 32 | #[bench] 33 | fn bench_prove(b: &mut Bencher) { 34 | let mut rng = rand::thread_rng(); 35 | 36 | let backend = lvmt_db::storage::test_kvdb(NUM_COLS); 37 | let pp = Arc::new(AMTParams::::from_dir( 38 | "./pp", 39 | TypeDepths::USIZE, 40 | true, 41 | )); 42 | let mut db = LvmtDB::new(backend, pp.clone(), false, Some((0, 0))); 43 | 44 | let mut epoch_root_dict = HashMap::new(); 45 | 46 | let mut current_epoch = 0; 47 | let mut _latest_amt_root = G1Projective::default(); 48 | 49 | for i in 0..=255 { 50 | db.set(&Key(vec![1, 2, i, 0]), vec![1, 2, i, 5].into()); 51 | let (amt_root, epoch_root) = db.commit(current_epoch).unwrap(); 52 | _latest_amt_root = amt_root; 53 | epoch_root_dict.insert(current_epoch, epoch_root); 54 | current_epoch += 1; 55 | } 56 | 57 | let prove_key = 58 | |key: Vec, value: Vec, db: &mut LvmtDB, epoch_root_dict: &HashMap| { 59 | // println!("Verify key {:?}", key); 60 | let key = Key(key.to_vec()); 61 | assert_eq!(value, db.get(&key).unwrap().unwrap().into_vec()); 62 | db.prove(&key).unwrap() 63 | // AmtDb::verify(&key, &proof, |epoch| epoch_root_dict[&epoch], &pp).unwrap(); 64 | }; 65 | 66 | b.iter(|| { 67 | let i = rng.gen(); 68 | let proof = prove_key( 69 | vec![1, 2, i, 0], 70 | vec![1, 2, i, 5], 71 | &mut db, 72 | &epoch_root_dict, 73 | ); 74 | black_box(proof); 75 | }) 76 | } 77 | 78 | #[bench] 79 | fn bench_verify(b: &mut Bencher) { 80 | let mut rng = rand::thread_rng(); 81 | 82 | let backend = lvmt_db::storage::test_kvdb(NUM_COLS); 83 | let pp = Arc::new(AMTParams::::from_dir( 84 | "./pp", 85 | TypeDepths::USIZE, 86 | true, 87 | )); 88 | let mut db = LvmtDB::new(backend, pp.clone(), false, Some((0, 0))); 89 | 90 | let mut epoch_root_dict = HashMap::new(); 91 | 92 | let mut current_epoch = 0; 93 | let mut _latest_amt_root = G1Projective::default(); 94 | 95 | for i in 0..=255 { 96 | db.set(&Key(vec![1, 2, i, 0]), vec![1, 2, i, 5].into()); 97 | let (amt_root, epoch_root) = db.commit(current_epoch).unwrap(); 98 | _latest_amt_root = amt_root; 99 | epoch_root_dict.insert(current_epoch, epoch_root); 100 | current_epoch += 1; 101 | } 102 | 103 | let prove_key = 104 | |key: Vec, value: Vec, db: &mut LvmtDB, epoch_root_dict: &HashMap| { 105 | // println!("Verify key {:?}", key); 106 | let key = Key(key.to_vec()); 107 | assert_eq!(value, db.get(&key).unwrap().unwrap().into_vec()); 108 | db.prove(&key).unwrap() 109 | // AmtDb::verify(&key, &proof, |epoch| epoch_root_dict[&epoch], &pp).unwrap(); 110 | }; 111 | 112 | let proofs: Vec = (0..=255u8) 113 | .map(|i| { 114 | prove_key( 115 | vec![1, 2, i, 0], 116 | vec![1, 2, i, 5], 117 | &mut db, 118 | &epoch_root_dict, 119 | ) 120 | }) 121 | .collect(); 122 | 123 | b.iter(|| { 124 | let i = rng.gen(); 125 | LvmtDB::verify( 126 | &Key(vec![1, 2, i, 0]), 127 | &proofs[i as usize], 128 | |epoch| epoch_root_dict[&epoch], 129 | &pp, 130 | ) 131 | .unwrap(); 132 | }) 133 | } 134 | -------------------------------------------------------------------------------- /asb-authdb/lvmt-db/lvmt-serde-derive/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "lvmt-serde-derive" 3 | version = "0.1.0" 4 | authors = ["Chenxing Li "] 5 | edition = "2018" 6 | 7 | [lib] 8 | proc-macro = true 9 | 10 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 11 | 12 | [dependencies] 13 | syn = {version="1", features=["derive"]} 14 | proc-macro2 = "1" 15 | quote = "1" 16 | proc-macro-crate="1" 17 | -------------------------------------------------------------------------------- /asb-authdb/lvmt-db/lvmt-serde-derive/src/lib.rs: -------------------------------------------------------------------------------- 1 | extern crate proc_macro; 2 | extern crate proc_macro2; 3 | extern crate syn; 4 | #[macro_use] 5 | extern crate quote; 6 | 7 | use proc_macro::TokenStream; 8 | use proc_macro2::Span; 9 | use proc_macro2::TokenStream as TokenStream2; 10 | use proc_macro_crate::{crate_name, FoundCrate}; 11 | use syn::{Data, DeriveInput, Ident, Index, Member, Type, TypePath}; 12 | 13 | fn parse_members<'a>(ast: &'a DeriveInput) -> impl Iterator + 'a { 14 | if let Data::Struct(ref s) = ast.data { 15 | s.fields.iter().enumerate().map(|(index, field)| { 16 | field 17 | .ident 18 | .as_ref() 19 | .map_or(Member::Unnamed(Index::from(index)), |x: &Ident| { 20 | Member::Named(x.clone()) 21 | }) 22 | }) 23 | } else { 24 | panic!("#[derive(MyFromBytes)] is only defined for structs.") 25 | } 26 | } 27 | 28 | fn is_vec<'a>(ast: &'a DeriveInput) -> impl Iterator + 'a { 29 | if let Data::Struct(ref s) = ast.data { 30 | s.fields.iter().map(|field| { 31 | if let Type::Path(ref path) = field.ty { 32 | if path.path.segments.len() != 1 { 33 | false 34 | } else { 35 | let ty = path.path.segments.first().unwrap(); 36 | let is_vec = ty.ident.to_string() == "Vec"; 37 | let vec_u8: TypePath = syn::parse(quote! { Vec }.into()).unwrap(); 38 | let is_u8 = path == &vec_u8; 39 | is_vec && !is_u8 40 | } 41 | } else { 42 | false 43 | } 44 | }) 45 | } else { 46 | panic!("#[derive(MyFromBytes)] is only defined for structs.") 47 | } 48 | } 49 | 50 | fn dummy_impl(input: TokenStream2, trait_name: &'static str, struct_name: &Ident) -> TokenStream2 { 51 | let dummy_const = syn::Ident::new( 52 | &format!("_IMPL_AMT_{}_FOR_{}", trait_name, struct_name), 53 | struct_name.span(), 54 | ); 55 | let amt_crate = find_crate(); 56 | quote! { 57 | #[allow(non_upper_case_globals, unused_attributes, unused_qualifications)] 58 | const #dummy_const: () = { 59 | use #amt_crate::serde::{MyFromBytes, MyToBytes, SerdeType}; 60 | #input 61 | }; 62 | } 63 | } 64 | 65 | fn find_crate() -> TokenStream2 { 66 | let found_crate = crate_name("lvmt-db").expect("lvmt-db is not present in `Cargo.toml`"); 67 | 68 | match found_crate { 69 | FoundCrate::Itself => quote!(crate), 70 | FoundCrate::Name(name) => { 71 | let ident = Ident::new(&name, Span::call_site()); 72 | quote!( #ident ) 73 | } 74 | } 75 | } 76 | 77 | #[proc_macro_derive(MyFromBytes)] 78 | pub fn decodable(input: TokenStream) -> TokenStream { 79 | let ast = syn::parse(input).unwrap(); 80 | let member = parse_members(&ast); 81 | let read_fn = is_vec(&ast).map(|is_vec| { 82 | if is_vec { 83 | quote! { read_vec } 84 | } else { 85 | quote! { read } 86 | } 87 | }); 88 | let name = &ast.ident; 89 | let (impl_generics, ty_generics, where_clause) = ast.generics.split_for_impl(); 90 | 91 | let impl_block = quote! { 92 | impl #impl_generics MyFromBytes for #name #ty_generics #where_clause { 93 | fn read(mut reader: R, ty: SerdeType) -> ::std::io::Result { 94 | Ok(Self { 95 | #(#member: MyFromBytes::#read_fn(&mut reader, ty)?,)* 96 | }) 97 | } 98 | } 99 | }; 100 | 101 | dummy_impl(impl_block, "FROM_BYTES", name).into() 102 | } 103 | 104 | #[proc_macro_derive(MyToBytes)] 105 | pub fn encodable(input: TokenStream) -> TokenStream { 106 | let ast = syn::parse(input).unwrap(); 107 | let member = parse_members(&ast); 108 | let write_fn = is_vec(&ast).map(|is_vec| { 109 | if is_vec { 110 | quote! { write_vec } 111 | } else { 112 | quote! { write } 113 | } 114 | }); 115 | let name = &ast.ident; 116 | let (impl_generics, ty_generics, where_clause) = ast.generics.split_for_impl(); 117 | 118 | let impl_block = quote! { 119 | impl #impl_generics MyToBytes for #name #ty_generics #where_clause { 120 | fn write(&self, mut writer: W, ty: SerdeType) -> ::std::io::Result<()> { 121 | #(MyToBytes::#write_fn(&self.#member, &mut writer, ty)?;)* 122 | Ok(()) 123 | } 124 | } 125 | }; 126 | 127 | dummy_impl(impl_block, "TO_BYTES", name).into() 128 | } 129 | -------------------------------------------------------------------------------- /asb-authdb/lvmt-db/pp/power-tau-ho1sTw-06.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ChenxingLi/authenticated-storage-benchmarks/c22ff827ec72df6bf8855893c49786d2072eaf3c/asb-authdb/lvmt-db/pp/power-tau-ho1sTw-06.bin -------------------------------------------------------------------------------- /asb-authdb/lvmt-db/ppot2ark/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "ppot2ark" 3 | version = "0.1.0" 4 | authors = ["ChenxingLi "] 5 | edition = "2018" 6 | 7 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 8 | 9 | [dependencies] 10 | bellman_ce = { git = "https://github.com/kobigurk/phase2-bn254", rev = "dd6b966"} 11 | powersoftau = { git = "https://github.com/kobigurk/phase2-bn254", rev = "dd6b966"} 12 | ff = {package = "ff_ce", version = "0.7", features = ["derive"]} 13 | memmap = "0.7.0" 14 | 15 | ark-ff = { version="^0.2.0", default-features = false } 16 | ark-ec = { version="^0.2.0", default-features = false } 17 | ark-std = { version="^0.2.0", default-features = false } 18 | ark-bn254 = { version="^0.2.0", features = ["std","curve"] } 19 | -------------------------------------------------------------------------------- /asb-authdb/lvmt-db/ppot2ark/src/adapter.rs: -------------------------------------------------------------------------------- 1 | // use crate::ark::FqRepr; 2 | use std::marker::PhantomData; 3 | 4 | pub use ark_ff::{One as _, PrimeField as _, Zero as _}; 5 | pub use ark_std::str::FromStr; 6 | pub use bellman_ce::pairing::CurveAffine as _; 7 | pub use ff::{Field as _, PrimeField as _}; 8 | use std::fmt::{Debug, Display}; 9 | 10 | mod ppot { 11 | pub use bellman_ce::pairing::bn256::Bn256 as Bn; 12 | pub use bellman_ce::pairing::bn256::{Fq, Fq2, FqRepr, Fr, FrRepr, G1Affine, G2Affine, G1, G2}; 13 | } 14 | 15 | mod ark { 16 | pub use ark_ff::{fields::PrimeField, Field, One}; 17 | 18 | pub use ark_bn254::{Fq, Fq2, Fr, G1Affine, G1Projective, G2Affine, G2Projective}; 19 | pub use ark_ff::biginteger::BigInteger256 as FqRepr; 20 | pub use ark_ff::biginteger::BigInteger256 as FrRepr; 21 | 22 | pub use ark_bn254::{FqParameters, FrParameters}; 23 | pub use ark_ff::fields::Fp256; 24 | } 25 | 26 | pub trait Adapter { 27 | type Output: Debug + PartialEq + Sized + Eq + Copy + Clone + Send + Sync + Display; 28 | fn adapt(self) -> Self::Output; 29 | } 30 | 31 | impl Adapter for ppot::FqRepr { 32 | type Output = ark::FqRepr; 33 | 34 | fn adapt(self) -> Self::Output { 35 | ark::FqRepr(self.0) 36 | } 37 | } 38 | 39 | impl Adapter for ppot::FrRepr { 40 | type Output = ark::FrRepr; 41 | 42 | fn adapt(self) -> Self::Output { 43 | ark::FrRepr(self.0) 44 | } 45 | } 46 | 47 | impl Adapter for ppot::Fq { 48 | type Output = ark::Fq; 49 | 50 | fn adapt(self) -> Self::Output { 51 | ark::Fp256::(self.into_raw_repr().adapt(), PhantomData) 52 | } 53 | } 54 | 55 | impl Adapter for ppot::Fr { 56 | type Output = ark::Fr; 57 | 58 | fn adapt(self) -> Self::Output { 59 | ark::Fp256::(self.into_raw_repr().adapt(), PhantomData) 60 | } 61 | } 62 | 63 | impl Adapter for ppot::Fq2 { 64 | type Output = ark::Fq2; 65 | 66 | fn adapt(self) -> Self::Output { 67 | ark::Fq2::new(self.c0.adapt(), self.c1.adapt()) 68 | } 69 | } 70 | 71 | impl Adapter for ppot::G1Affine { 72 | type Output = ark::G1Affine; 73 | 74 | fn adapt(self) -> Self::Output { 75 | if self.is_zero() { 76 | ark::G1Affine::zero() 77 | } else { 78 | ark::G1Affine::new(self.get_x().adapt(), self.get_y().clone().adapt(), false) 79 | } 80 | } 81 | } 82 | 83 | impl Adapter for ppot::G2Affine { 84 | type Output = ark::G2Affine; 85 | 86 | fn adapt(self) -> Self::Output { 87 | if self.is_zero() { 88 | ark::G2Affine::zero() 89 | } else { 90 | ark::G2Affine::new(self.get_x().adapt(), self.get_y().clone().adapt(), false) 91 | } 92 | } 93 | } 94 | 95 | fn test_eq(input: P, answer: P::Output) { 96 | assert_eq!(input.adapt(), answer); 97 | } 98 | 99 | #[test] 100 | fn test_fields() { 101 | test_eq(ppot::Fq::one(), ark::Fq::one()); 102 | test_eq( 103 | ppot::Fq::from_str("17").unwrap(), 104 | ark::Fq::from_str("17").unwrap(), 105 | ); 106 | test_eq( 107 | ppot::Fq::from_str("17").unwrap().inverse().unwrap(), 108 | ark::Fq::one() / ark::Fq::from_str("17").unwrap(), 109 | ); 110 | 111 | test_eq(ppot::Fr::one(), ark::Fr::one()); 112 | test_eq( 113 | ppot::Fr::from_str("17").unwrap(), 114 | ark::Fr::from_str("17").unwrap(), 115 | ); 116 | test_eq( 117 | ppot::Fr::from_str("17").unwrap().inverse().unwrap(), 118 | ark::Fr::one() / ark::Fr::from_str("17").unwrap(), 119 | ); 120 | } 121 | 122 | // #[test] 123 | // fn test2(){ 124 | // use ark_ff::{One,Field,fields::PrimeField}; 125 | // let mut x = ark::Fq::one(); 126 | // println!("{:?}",x.into_repr()); 127 | // let y = ark::Fq::from_repr(ark::FqRepr([1,0,0,0])).unwrap(); 128 | // println!("{:?}",y.into_repr()); 129 | // let z = ark::Fp256::(ark::BigInteger256([1,0,0,0]), PhantomData); 130 | // println!("{:?}",z.into_repr()); 131 | // } 132 | -------------------------------------------------------------------------------- /asb-authdb/lvmt-db/ppot2ark/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code, unused)] 2 | 3 | mod adapter; 4 | 5 | pub use adapter::Adapter; 6 | 7 | pub use bellman_ce::pairing::bn256::Bn256; 8 | pub use powersoftau::batched_accumulator::BatchedAccumulator; 9 | pub use powersoftau::parameters::{CeremonyParams, CheckForCorrectness, UseCompression}; 10 | 11 | use memmap::MmapOptions; 12 | use std::fs::OpenOptions; 13 | 14 | pub fn from_challenge<'a>( 15 | response_filename: &str, 16 | size: usize, 17 | parameters: &'a CeremonyParams, 18 | ) -> Box> { 19 | let mut accumulator = BatchedAccumulator::empty(¶meters); 20 | let reader = OpenOptions::new() 21 | .read(true) 22 | .open(response_filename) 23 | .expect("unable open response file in this directory"); 24 | let input_map = unsafe { 25 | MmapOptions::new() 26 | .map(&reader) 27 | .expect("unable to create a memory map for input") 28 | }; 29 | accumulator 30 | .read_chunk( 31 | 0, 32 | 1 << size, 33 | UseCompression::No, 34 | CheckForCorrectness::Yes, 35 | &input_map, 36 | ) 37 | .unwrap(); 38 | Box::new(accumulator) 39 | } 40 | 41 | // #[test] 42 | // fn test_load_and_pair() { 43 | // use ark_bn254::Bn254; 44 | // use ark_ec::PairingEngine; 45 | // 46 | // println!("adc"); 47 | // let (g1, g2) = from_challenge("/data/chenxing/challenge0072", 6, &CeremonyParams::::new(28, 20)); 48 | // println!("{} {}", g1.len(), g2.len()); 49 | // assert_eq!(Bn254::pairing(g1[0], g2[3]), Bn254::pairing(g1[4], g2[0])); 50 | // } 51 | -------------------------------------------------------------------------------- /asb-authdb/lvmt-db/src/amt/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod node; 2 | pub mod tree; 3 | pub mod write_guard; 4 | 5 | #[cfg(test)] 6 | mod test; 7 | 8 | pub use self::{ 9 | node::NodeIndex, 10 | tree::{AMTConfigTrait, AMTData, AMTProof, AMTree}, 11 | }; 12 | -------------------------------------------------------------------------------- /asb-authdb/lvmt-db/src/amt/node.rs: -------------------------------------------------------------------------------- 1 | use crate::crypto::export::{AffineCurve, FromBytes, ProjectiveCurve, Read, ToBytes, Write}; 2 | use crate::crypto::TypeUInt; 3 | use crate::serde::{MyFromBytes, MyToBytes, SerdeType}; 4 | use std::io::Result as IoResult; 5 | use std::marker::PhantomData; 6 | 7 | #[derive(Clone, Copy, Default)] 8 | pub struct AMTNode { 9 | pub commitment: G, 10 | pub proof: G, 11 | } 12 | 13 | impl MyFromBytes for AMTNode { 14 | fn read(mut reader: R, ty: SerdeType) -> IoResult { 15 | if ty.consistent { 16 | let g1_aff: ::Affine = FromBytes::read(&mut reader)?; 17 | let g2_aff: ::Affine = FromBytes::read(&mut reader)?; 18 | Ok(Self { 19 | commitment: g1_aff.into_projective(), 20 | proof: g2_aff.into_projective(), 21 | }) 22 | } else { 23 | Ok(Self { 24 | commitment: FromBytes::read(&mut reader)?, 25 | proof: FromBytes::read(&mut reader)?, 26 | }) 27 | } 28 | } 29 | } 30 | 31 | impl MyToBytes for AMTNode { 32 | fn write(&self, mut writer: W, ty: SerdeType) -> IoResult<()> { 33 | if ty.consistent { 34 | ToBytes::write(&self.commitment.into_affine(), &mut writer)?; 35 | ToBytes::write(&self.proof.into_affine(), &mut writer)?; 36 | } else { 37 | ToBytes::write(&self.commitment, &mut writer)?; 38 | ToBytes::write(&self.proof, &mut writer)?; 39 | } 40 | Ok(()) 41 | } 42 | } 43 | 44 | #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] 45 | pub struct NodeIndex { 46 | depth: usize, 47 | index: usize, 48 | _phantom: PhantomData, 49 | } 50 | 51 | impl NodeIndex { 52 | #[inline] 53 | pub(crate) fn new(depth: usize, index: usize) -> Self { 54 | assert!(index < (1 << depth)); 55 | assert!(depth <= N::USIZE); 56 | Self { 57 | depth, 58 | index, 59 | _phantom: PhantomData, 60 | } 61 | } 62 | 63 | pub fn leaf(index: usize) -> Self { 64 | Self::new(N::USIZE, index) 65 | } 66 | 67 | pub fn root() -> Self { 68 | Self::new(0, 0) 69 | } 70 | 71 | #[inline] 72 | pub fn to_sibling(&self) -> Self { 73 | NodeIndex::new(self.depth, self.index ^ 1) 74 | } 75 | 76 | #[inline] 77 | pub fn to_ancestor(&self, height: usize) -> Self { 78 | assert!(height <= self.depth); 79 | NodeIndex::new(self.depth - height, self.index >> height) 80 | } 81 | 82 | pub fn needs_maintain(&self, shard_root: &Self) -> bool { 83 | if self == &Self::root() { 84 | return true; 85 | } 86 | 87 | if self.depth > shard_root.depth { 88 | let height_diff = self.depth - shard_root.depth; 89 | let index = self.index >> height_diff; 90 | return index == shard_root.index; 91 | } else { 92 | let sib = self.to_sibling(); 93 | let height_diff = shard_root.depth - sib.depth; 94 | let index = shard_root.index >> height_diff; 95 | return sib.index == index; 96 | } 97 | } 98 | 99 | #[inline] 100 | pub fn depth(&self) -> usize { 101 | self.depth 102 | } 103 | 104 | #[inline] 105 | pub fn index(&self) -> usize { 106 | self.index 107 | } 108 | 109 | #[inline] 110 | pub fn total_depth(&self) -> usize { 111 | N::USIZE 112 | } 113 | } 114 | 115 | #[cfg(test)] 116 | mod test { 117 | use super::*; 118 | use crate::type_uint; 119 | type_uint! { 120 | struct TestUInt(6); 121 | } 122 | type Index = NodeIndex; 123 | 124 | #[test] 125 | fn test_needs_maintain() { 126 | let shard_root = Index::new(3, 4); 127 | assert!(Index::new(0, 0).needs_maintain(&shard_root)); 128 | assert!(Index::new(1, 0).needs_maintain(&shard_root)); 129 | assert!(Index::new(2, 3).needs_maintain(&shard_root)); 130 | assert!(Index::new(3, 5).needs_maintain(&shard_root)); 131 | assert!(Index::new(4, 8).needs_maintain(&shard_root)); 132 | assert!(Index::new(4, 9).needs_maintain(&shard_root)); 133 | assert!(Index::new(6, 32).needs_maintain(&shard_root)); 134 | assert!(Index::new(6, 39).needs_maintain(&shard_root)); 135 | 136 | assert!(!Index::new(1, 1).needs_maintain(&shard_root)); 137 | assert!(!Index::new(2, 0).needs_maintain(&shard_root)); 138 | assert!(!Index::new(2, 2).needs_maintain(&shard_root)); 139 | assert!(!Index::new(3, 2).needs_maintain(&shard_root)); 140 | assert!(!Index::new(3, 4).needs_maintain(&shard_root)); 141 | assert!(!Index::new(4, 2).needs_maintain(&shard_root)); 142 | assert!(!Index::new(4, 7).needs_maintain(&shard_root)); 143 | assert!(!Index::new(4, 10).needs_maintain(&shard_root)); 144 | assert!(!Index::new(6, 7).needs_maintain(&shard_root)); 145 | assert!(!Index::new(6, 31).needs_maintain(&shard_root)); 146 | assert!(!Index::new(6, 40).needs_maintain(&shard_root)); 147 | } 148 | } 149 | -------------------------------------------------------------------------------- /asb-authdb/lvmt-db/src/amt/test.rs: -------------------------------------------------------------------------------- 1 | use super::tree::{AMTConfigTrait, AMTData, AMTree}; 2 | use crate::amt::NodeIndex; 3 | use crate::crypto::{ 4 | export::{Fr, FrInt, Pairing, G1}, 5 | AMTParams, TypeUInt, 6 | }; 7 | use crate::storage::{FlattenArray, FlattenTree}; 8 | use crate::type_uint; 9 | use std::sync::Arc; 10 | 11 | struct TestConfig {} 12 | 13 | type_uint! { 14 | struct TestDepths(6); 15 | } 16 | 17 | impl AMTConfigTrait for TestConfig { 18 | type PE = Pairing; 19 | type Name = u64; 20 | type Data = u64; 21 | type Commitment = G1; 22 | type DataLayout = FlattenArray; 23 | type TreeLayout = FlattenTree; 24 | type Height = TestDepths; 25 | } 26 | 27 | type TestTree = AMTree; 28 | 29 | fn test_all(amt: &mut TestTree, public_parameter: &AMTParams, task: &str) { 30 | for i in 0..TestConfig::LENGTH { 31 | let proof = amt.prove(i); 32 | let value = amt.get(i); 33 | 34 | assert!( 35 | TestTree::verify( 36 | i, 37 | value.as_fr(), 38 | amt.commitment(), 39 | proof.unwrap(), 40 | public_parameter 41 | ), 42 | "fail at task {} pos {}", 43 | task, 44 | i 45 | ); 46 | } 47 | } 48 | 49 | impl AMTData> for u64 { 50 | fn as_fr_int(&self) -> FrInt { 51 | FrInt::::from(*self) 52 | } 53 | } 54 | 55 | #[test] 56 | fn test_amt() { 57 | let db = crate::storage::test_db_col(); 58 | 59 | const DEPTHS: usize = TestConfig::DEPTHS; 60 | const LENGTH: usize = 1 << DEPTHS; 61 | 62 | let pp = Arc::new(AMTParams::::from_dir("./pp", DEPTHS, true)); 63 | 64 | let mut amt = TestTree::new(64, db, pp.clone(), Some(NodeIndex::::root())); 65 | amt.set_commitment(&Default::default()); 66 | 67 | test_all(&mut amt, &pp, "Empty"); 68 | 69 | *amt.write_versions(0) += 1; 70 | assert_eq!(amt.get(0), &1); 71 | assert_eq!(amt.get(1), &0); 72 | test_all(&mut amt, &pp, "one-hot"); 73 | 74 | *amt.write_versions(0) += &1; 75 | *amt.write_versions(LENGTH / 2) += &1; 76 | test_all(&mut amt, &pp, "sibling pair"); 77 | } 78 | 79 | #[test] 80 | fn test_one() { 81 | let db = crate::storage::test_db_col(); 82 | 83 | const DEPTHS: usize = TestConfig::DEPTHS; 84 | 85 | let pp = Arc::new(AMTParams::::from_dir("./pp", DEPTHS, true)); 86 | 87 | let mut amt = TestTree::new(64, db, pp.clone(), Some(NodeIndex::::root())); 88 | amt.set_commitment(&Default::default()); 89 | 90 | *amt.write_versions(0) += 1; 91 | assert_eq!(amt.get(0), &1); 92 | assert_eq!(amt.get(1), &0); 93 | 94 | let task = "one-hot"; 95 | let i = 1; 96 | let proof = amt.prove(i); 97 | let value = amt.get(i); 98 | 99 | assert!( 100 | TestTree::verify(i, value.as_fr(), amt.commitment(), proof.unwrap(), &pp), 101 | "fail at task {} pos {}", 102 | task, 103 | i 104 | ); 105 | } 106 | -------------------------------------------------------------------------------- /asb-authdb/lvmt-db/src/amt/tree.rs: -------------------------------------------------------------------------------- 1 | use super::node::{AMTNode, NodeIndex}; 2 | use super::write_guard::AMTNodeWriteGuard; 3 | use crate::crypto::export::{PairingEngine, PrimeField, ProjectiveCurve, Zero}; 4 | use crate::crypto::{ 5 | export::{Fr, FrInt, G1}, 6 | AMTParams, TypeUInt, 7 | }; 8 | use crate::serde::{MyFromBytes, MyToBytes}; 9 | use crate::storage::access::PUT_MODE; 10 | use crate::storage::{DBAccess, DBColumn, LayoutTrait}; 11 | use std::sync::Arc; 12 | 13 | pub trait AMTConfigTrait { 14 | type PE: PairingEngine; 15 | type Name: MyToBytes; 16 | type Data: AMTData> + Default + Clone + MyToBytes + MyFromBytes; 17 | type Commitment: ProjectiveCurve + MyToBytes + MyFromBytes; 18 | 19 | type DataLayout: LayoutTrait; 20 | type TreeLayout: LayoutTrait>; 21 | type Height: TypeUInt; 22 | 23 | const DEPTHS: usize = Self::Height::USIZE; 24 | const LENGTH: usize = 1 << Self::DEPTHS; 25 | const IDX_MASK: usize = Self::LENGTH - 1; 26 | } 27 | 28 | pub trait AMTData { 29 | fn as_fr_int(&self) -> P::BigInt; 30 | fn as_fr(&self) -> P { 31 | self.as_fr_int().into() 32 | } 33 | } 34 | 35 | #[derive(Clone)] 36 | pub struct AMTree { 37 | pub name: C::Name, 38 | data: DBAccess, 39 | subtree_roots: DBAccess, 40 | inner_nodes: DBAccess, AMTNode, C::TreeLayout>, 41 | commitment: Option>, 42 | 43 | dirty: bool, 44 | shard_root: Option>, 45 | 46 | pp: Arc>, 47 | } 48 | 49 | pub type AMTProof = Vec>; 50 | 51 | impl AMTree { 52 | pub fn new( 53 | name: C::Name, 54 | db: DBColumn, 55 | pp: Arc>, 56 | shard_root: Option>, 57 | ) -> Self { 58 | let ser_name = name.to_bytes_consensus(); 59 | let set_prefix = |prefix: u8| { 60 | let mut prefix = vec![prefix]; 61 | prefix.extend_from_slice(&ser_name); 62 | prefix 63 | }; 64 | Self { 65 | name, 66 | 67 | data: DBAccess::new(set_prefix(1), db.clone()), 68 | inner_nodes: DBAccess::new(set_prefix(2), db.clone()), 69 | subtree_roots: DBAccess::new(set_prefix(3), db.clone()), 70 | 71 | commitment: None, 72 | dirty: false, 73 | shard_root, 74 | pp, 75 | } 76 | } 77 | 78 | pub fn set_commitment(&mut self, commitment: &G1) { 79 | if self.commitment.is_none() { 80 | self.commitment = Some(commitment.clone()) 81 | } 82 | } 83 | 84 | // Because the underlying data has a cache, so most read operation requires a mutable ref. 85 | pub fn get(&mut self, index: usize) -> &C::Data { 86 | assert!(index < C::LENGTH); 87 | self.data.get(&index) 88 | } 89 | 90 | pub(super) fn get_mut(&mut self, index: usize) -> &mut C::Data { 91 | assert!(index < C::LENGTH); 92 | self.data.get_mut(&index) 93 | } 94 | 95 | pub fn dirty(&self) -> bool { 96 | self.dirty 97 | } 98 | 99 | pub fn only_root(&self) -> bool { 100 | self.shard_root.is_none() 101 | } 102 | 103 | pub fn can_prove(&self) -> bool { 104 | self.shard_root == Some(NodeIndex::root()) 105 | } 106 | 107 | pub fn write_versions(&mut self, index: usize) -> AMTNodeWriteGuard { 108 | let value = std::mem::take(self.data.get_mut(&index)); 109 | AMTNodeWriteGuard::new(index, value, self) 110 | } 111 | 112 | pub fn subtree_root_mut(&mut self, index: usize) -> &mut G1 { 113 | assert!(index < C::LENGTH); 114 | self.subtree_roots.get_mut(&index) 115 | } 116 | 117 | pub fn subtree_root(&mut self, index: usize) -> &G1 { 118 | &*self.subtree_root_mut(index) 119 | } 120 | 121 | pub fn commitment(&mut self) -> &G1 { 122 | self.commitment.as_ref().unwrap() 123 | } 124 | 125 | pub fn flush(&mut self) -> G1 { 126 | *PUT_MODE.lock_mut().unwrap() = 0; 127 | self.data.flush_cache(); 128 | 129 | *PUT_MODE.lock_mut().unwrap() = 1; 130 | self.inner_nodes.flush_cache(); 131 | 132 | *PUT_MODE.lock_mut().unwrap() = 2; 133 | self.subtree_roots.flush_cache(); 134 | 135 | self.dirty = false; 136 | self.commitment.unwrap().clone() 137 | } 138 | 139 | pub fn update(&mut self, index: usize, update_fr_int: FrInt) { 140 | assert!(index < C::LENGTH); 141 | 142 | if update_fr_int == FrInt::::from(0) { 143 | return; 144 | } 145 | 146 | self.dirty = true; 147 | 148 | let inc_comm = self.pp.get_idents_pow(index, &update_fr_int); 149 | 150 | // Update commitment 151 | *self.commitment.as_mut().unwrap() += &inc_comm; 152 | 153 | let shard_root = if let Some(v) = self.shard_root { 154 | v 155 | } else { 156 | return; 157 | }; 158 | 159 | let leaf_index = bitreverse(index, C::DEPTHS); 160 | let node_index = NodeIndex::new(C::DEPTHS, leaf_index); 161 | 162 | for (height, depth) in (0..C::DEPTHS).map(|height| (height, C::DEPTHS - height)) { 163 | let visit_node_index = node_index.to_ancestor(height); 164 | 165 | if !visit_node_index.needs_maintain(&shard_root) { 166 | continue; 167 | } 168 | 169 | // let proof = self.pp.get_quotient(depth, index).mul(update_fr_int); 170 | let proof = self.pp.get_quotient_pow(depth, index, &update_fr_int); 171 | let node = self.inner_nodes.get_mut(&visit_node_index); 172 | node.commitment += &inc_comm; 173 | node.proof += proof; 174 | } 175 | } 176 | 177 | pub fn prove(&mut self, index: usize) -> Option>> { 178 | if !self.can_prove() { 179 | return None; 180 | } 181 | let leaf_index = bitreverse(index, C::DEPTHS); 182 | let node_index = NodeIndex::new(C::DEPTHS, leaf_index); 183 | 184 | let mut answers = vec![Default::default(); C::DEPTHS]; 185 | 186 | for visit_depth in (1..=C::DEPTHS).rev() { 187 | let visit_height = C::DEPTHS - visit_depth; 188 | let sibling_node_index = node_index.to_ancestor(visit_height).to_sibling(); 189 | 190 | answers[visit_depth - 1] = self.inner_nodes.get_mut(&sibling_node_index).clone(); 191 | } 192 | Some(answers) 193 | } 194 | 195 | pub fn verify( 196 | index: usize, 197 | value: Fr, 198 | commitment: &G1, 199 | proof: AMTProof>, 200 | pp: &AMTParams, 201 | ) -> bool { 202 | assert!(index < C::LENGTH); 203 | let self_indent = pp.get_commitments(index).mul(value.into()); 204 | let others: G1 = proof.iter().map(|node| node.commitment).sum(); 205 | 206 | if *commitment != self_indent + &others { 207 | println!( 208 | "Commitment check fail {},{},{}", 209 | self_indent.is_zero(), 210 | others.is_zero(), 211 | commitment.is_zero() 212 | ); 213 | return false; 214 | } 215 | 216 | for (idx, node) in proof.iter().copied().enumerate() { 217 | let height = C::DEPTHS - idx - 1; 218 | let depth = idx + 1; 219 | let verification = *pp.get_sibling_verification(depth, index); 220 | if C::PE::pairing(node.commitment, pp.g2()) != C::PE::pairing(node.proof, verification) 221 | { 222 | println!("Pairing check fails at height {}", height); 223 | return false; 224 | } 225 | } 226 | return true; 227 | } 228 | } 229 | 230 | #[inline] 231 | fn bitreverse(mut n: usize, l: usize) -> usize { 232 | let mut r = 0; 233 | for _ in 0..l { 234 | r = (r << 1) | (n & 1); 235 | n >>= 1; 236 | } 237 | r 238 | } 239 | -------------------------------------------------------------------------------- /asb-authdb/lvmt-db/src/amt/write_guard.rs: -------------------------------------------------------------------------------- 1 | use super::tree::{AMTConfigTrait, AMTData, AMTree}; 2 | use crate::crypto::export::{BigInteger, FpParameters, FrInt, FrParams}; 3 | use std::ops::{Deref, DerefMut, Drop}; 4 | 5 | pub struct AMTNodeWriteGuard<'a, C: AMTConfigTrait> { 6 | index: usize, 7 | value: C::Data, 8 | old_fr_int: FrInt, 9 | tree: &'a mut AMTree, 10 | } 11 | 12 | impl<'a, C: AMTConfigTrait> AMTNodeWriteGuard<'a, C> { 13 | pub(super) fn new(index: usize, value: C::Data, tree: &'a mut AMTree) -> Self { 14 | let old_fr_int = value.as_fr_int(); 15 | Self { 16 | index, 17 | value, 18 | old_fr_int, 19 | tree, 20 | } 21 | } 22 | } 23 | 24 | impl<'a, C: AMTConfigTrait> Deref for AMTNodeWriteGuard<'a, C> { 25 | type Target = C::Data; 26 | 27 | fn deref(&self) -> &Self::Target { 28 | &self.value 29 | } 30 | } 31 | 32 | impl<'a, C: AMTConfigTrait> DerefMut for AMTNodeWriteGuard<'a, C> { 33 | fn deref_mut(&mut self) -> &mut Self::Target { 34 | &mut self.value 35 | } 36 | } 37 | 38 | impl<'a, C: AMTConfigTrait> Drop for AMTNodeWriteGuard<'a, C> { 39 | fn drop(&mut self) { 40 | let mut fr_int = self.value.as_fr_int(); 41 | let borrow_bit = fr_int.sub_noborrow(&self.old_fr_int); 42 | if borrow_bit { 43 | fr_int.add_nocarry(&FrParams::::R); 44 | } 45 | std::mem::swap(self.tree.get_mut(self.index), &mut self.value); 46 | self.tree.update(self.index, fr_int); 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /asb-authdb/lvmt-db/src/bin/bn254pp_from_ppot.rs: -------------------------------------------------------------------------------- 1 | use lvmt_db::crypto::{ 2 | export::{CanonicalSerialize, G1Aff, G2Aff}, 3 | pp_file_name, PowerTau, 4 | }; 5 | use ppot2ark::{from_challenge, Adapter, Bn256, CeremonyParams}; 6 | use std::fs::File; 7 | 8 | use ark_bn254::Bn254; 9 | 10 | fn fetch_pp_from_ppot(filename: &str, size: usize) -> PowerTau { 11 | let params = CeremonyParams::::new(28, 20); 12 | let accumulator = from_challenge(filename, size, ¶ms); 13 | let g1: Vec> = (0..(1 << size)) 14 | .map(|idx| accumulator.tau_powers_g1[idx].adapt()) 15 | .collect(); 16 | let g2: Vec> = (0..(1 << size)) 17 | .map(|idx| accumulator.tau_powers_g2[idx].adapt()) 18 | .collect(); 19 | return PowerTau(g1, g2); 20 | } 21 | 22 | fn main() { 23 | let args: Vec = std::env::args().collect(); 24 | if args.len() != 4 { 25 | println!("Usage: \n "); 26 | std::process::exit(exitcode::USAGE); 27 | } 28 | 29 | let challenge_filename = &args[1]; 30 | let pow_size = args[2].parse().expect("could not parse pow_size"); 31 | let dir: &String = &args[3].parse().expect("could not parse file"); 32 | 33 | let file = format!("{}/{}", dir, pp_file_name::(pow_size)); 34 | 35 | let pp = fetch_pp_from_ppot(challenge_filename, pow_size); 36 | 37 | let buffer = File::create(file).unwrap(); 38 | pp.serialize_uncompressed(&buffer).unwrap(); 39 | } 40 | -------------------------------------------------------------------------------- /asb-authdb/lvmt-db/src/crypto/error.rs: -------------------------------------------------------------------------------- 1 | error_chain! { 2 | links { 3 | } 4 | 5 | foreign_links { 6 | File(std::io::Error); 7 | Serialize(crate::crypto::export::SerializationError); 8 | } 9 | 10 | errors { 11 | InconsistentLength { 12 | description("In consistent length between expected params and real params") 13 | display("In consistent length between expected params and real params") 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /asb-authdb/lvmt-db/src/crypto/export.rs: -------------------------------------------------------------------------------- 1 | // Re-export all the required components in Zexe's repo. 2 | 3 | // Since Zexe's repo doesn't have a stable implementation and could be refactored in the future, 4 | // we import all the required objects in one place and all its usage for this repo should import from here. 5 | 6 | pub use ark_bls12_381::Bls12_381; 7 | pub use ark_bn254::Bn254; 8 | pub use ark_ec::{AffineCurve, PairingEngine, ProjectiveCurve}; 9 | pub use ark_ff::{ 10 | utils::k_adicity, BigInteger, FftField, Field, FpParameters, FromBytes, One, PrimeField, 11 | ToBytes, UniformRand, Zero, 12 | }; 13 | pub use ark_poly::{EvaluationDomain, Radix2EvaluationDomain}; 14 | pub use ark_serialize::{ 15 | CanonicalDeserialize, CanonicalSerialize, Read, SerializationError, Write, 16 | }; 17 | 18 | pub type G1 = ::G1Projective; 19 | pub type G1Aff = ::G1Affine; 20 | pub type G2 = ::G2Projective; 21 | pub type G2Aff = ::G2Affine; 22 | pub type Fr = ::Fr; 23 | pub type FrInt = as PrimeField>::BigInt; 24 | pub type FrParams = as PrimeField>::Params; 25 | 26 | pub type Pairing = Bn254; 27 | pub type G1Projective = ark_bn254::G1Projective; 28 | pub type G1Affine = ark_bn254::G1Affine; 29 | 30 | pub mod instances { 31 | use super::Pairing; 32 | pub type G1 = super::G1; 33 | pub type G1Aff = super::G1Aff; 34 | pub type G2 = super::G2; 35 | pub type G2Aff = super::G2Aff; 36 | pub type Fr = super::Fr; 37 | pub type FrInt = super::FrInt; 38 | pub type FrParams = super::FrParams; 39 | } 40 | -------------------------------------------------------------------------------- /asb-authdb/lvmt-db/src/crypto/mod.rs: -------------------------------------------------------------------------------- 1 | mod error; 2 | pub mod export; 3 | mod power_tau; 4 | mod prove_params; 5 | mod utils; 6 | 7 | pub use export::Pairing; 8 | pub use power_tau::PowerTau; 9 | pub use prove_params::AMTParams; 10 | pub use utils::{pp_file_name, TypeDepths, TypeUInt}; 11 | -------------------------------------------------------------------------------- /asb-authdb/lvmt-db/src/crypto/power_tau.rs: -------------------------------------------------------------------------------- 1 | use super::error; 2 | use super::export::{ 3 | AffineCurve, CanonicalDeserialize, CanonicalSerialize, Fr, G1Aff, G2Aff, PairingEngine, 4 | ProjectiveCurve, SerializationError, UniformRand, G1, G2, 5 | }; 6 | use super::pp_file_name; 7 | use ark_ff::utils::k_adicity; 8 | use ark_ff::Field; 9 | use rand; 10 | use rayon::prelude::*; 11 | use std::fs::{create_dir_all, File}; 12 | use std::io::{Read, Write}; 13 | use std::path::Path; 14 | 15 | #[derive(CanonicalDeserialize, CanonicalSerialize)] 16 | pub struct PowerTau(pub Vec>, pub Vec>); 17 | 18 | fn power_tau<'a, G: AffineCurve>(gen: &'a G, tau: &'a G::ScalarField, length: usize) -> Vec { 19 | let gen: G::Projective = gen.into_projective(); 20 | (0usize..length) 21 | .into_par_iter() 22 | .chunks(1024) 23 | .map(|x| { 24 | ProjectiveCurve::batch_normalization_into_affine( 25 | &x.iter() 26 | .map(|idx| { 27 | let mut gen = gen.clone(); 28 | gen *= tau.pow([*idx as u64]); 29 | gen 30 | }) 31 | .collect::>()[..], 32 | ) 33 | }) 34 | .flatten() 35 | .collect() 36 | } 37 | 38 | impl PowerTau { 39 | #[cfg(test)] 40 | fn setup_with_tau(tau: Fr, depth: usize) -> PowerTau { 41 | Self::setup_inner(Some(tau), depth) 42 | } 43 | 44 | pub fn setup(depth: usize) -> PowerTau { 45 | Self::setup_inner(None, depth) 46 | } 47 | 48 | fn setup_inner(tau: Option>, depth: usize) -> PowerTau { 49 | let random_tau = Fr::::rand(&mut rand::thread_rng()); 50 | let tau = tau.unwrap_or(random_tau); 51 | 52 | let gen1 = G1Aff::::prime_subgroup_generator(); 53 | let gen2 = G2Aff::::prime_subgroup_generator(); 54 | 55 | let g1pp: Vec> = power_tau(&gen1, &tau, 1 << depth); 56 | let g2pp: Vec> = power_tau(&gen2, &tau, 1 << depth); 57 | 58 | return PowerTau(g1pp, g2pp); 59 | } 60 | 61 | fn from_dir_inner(file: &str, expected_depth: usize) -> Result, error::Error> { 62 | let buffer = File::open(file)?; 63 | let pp: PowerTau = CanonicalDeserialize::deserialize_unchecked(buffer)?; 64 | let (g1_len, g2_len) = (pp.0.len(), pp.1.len()); 65 | let depth = k_adicity(2, g1_len) as usize; 66 | if g1_len != g2_len { 67 | Err(error::ErrorKind::InconsistentLength.into()) 68 | } else if expected_depth > depth { 69 | Err(error::ErrorKind::InconsistentLength.into()) 70 | } else if expected_depth < g2_len { 71 | let g1_vec = pp.0[..1 << expected_depth].to_vec(); 72 | let g2_vec = pp.1[..1 << expected_depth].to_vec(); 73 | Ok(PowerTau(g1_vec, g2_vec)) 74 | } else { 75 | Ok(pp) 76 | } 77 | } 78 | 79 | pub fn from_dir(dir: &str, expected_depth: usize) -> PowerTau { 80 | let file = &format!("{}/{}", dir, pp_file_name::(expected_depth)); 81 | Self::from_dir_inner(file, expected_depth).expect(&format!( 82 | "Fail to load public parameters for {} at depth {}, read TODO to generate", 83 | std::any::type_name::(), 84 | expected_depth 85 | )) 86 | } 87 | 88 | pub fn from_dir_or_new(dir: &str, expected_depth: usize) -> PowerTau { 89 | let file = &format!("{}/{}", dir, pp_file_name::(expected_depth)); 90 | match Self::from_dir_inner(file, expected_depth) { 91 | Ok(pp) => pp, 92 | Err(_) => { 93 | let pp = Self::setup(expected_depth); 94 | create_dir_all(Path::new(file).parent().unwrap()).unwrap(); 95 | let buffer = File::create(file).unwrap(); 96 | pp.serialize_uncompressed(&buffer).unwrap(); 97 | pp 98 | } 99 | } 100 | } 101 | 102 | pub fn into_projective(self) -> (Vec>, Vec>) { 103 | let g1pp = self.0.iter().copied().map(|x| G1::::from(x)).collect(); 104 | let g2pp = self.1.iter().copied().map(|x| G2::::from(x)).collect(); 105 | (g1pp, g2pp) 106 | } 107 | } 108 | 109 | #[test] 110 | fn test_partial_load() { 111 | type Pairing = super::export::Pairing; 112 | 113 | let tau = Fr::::rand(&mut rand::thread_rng()); 114 | let large_pp = PowerTau::::setup_with_tau(tau, 8); 115 | let small_pp = PowerTau::::setup_with_tau(tau, 4); 116 | 117 | assert_eq!(small_pp.0[..], large_pp.0[..(small_pp.0.len())]); 118 | assert_eq!(small_pp.1[..], large_pp.1[..(small_pp.1.len())]); 119 | } 120 | 121 | #[test] 122 | fn test_parallel_build() { 123 | use crate::crypto::export::{Pairing, ProjectiveCurve}; 124 | 125 | const DEPTH: usize = 13; 126 | let tau = Fr::::rand(&mut rand::thread_rng()); 127 | let gen1 = G1Aff::::prime_subgroup_generator(); 128 | let g1pp_ans = power_tau(&gen1, &tau, 1 << DEPTH); 129 | 130 | let mut g1pp: Vec> = vec![]; 131 | g1pp.reserve(1 << DEPTH); 132 | let mut gen1 = gen1.into_projective(); 133 | for _ in 0..1 << DEPTH { 134 | g1pp.push(gen1.into_affine()); 135 | gen1 *= tau.clone(); 136 | } 137 | assert_eq!(g1pp, g1pp_ans) 138 | } 139 | -------------------------------------------------------------------------------- /asb-authdb/lvmt-db/src/crypto/utils.rs: -------------------------------------------------------------------------------- 1 | use super::export::PairingEngine; 2 | use crate::DEPTHS; 3 | use std::any::Any; 4 | use std::collections::hash_map::DefaultHasher; 5 | use std::fmt::Debug; 6 | use std::hash::{Hash, Hasher}; 7 | 8 | pub trait TypeUInt: Copy + Eq + Hash + Debug + Sized { 9 | const USIZE: usize; 10 | } 11 | 12 | #[macro_export] 13 | macro_rules! type_uint { 14 | ( $(#[$attr:meta])* $visibility:vis struct $name:ident ($num:tt); ) => { 15 | $(#[$attr])* 16 | #[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)] 17 | $visibility struct $name; 18 | 19 | impl TypeUInt for $name { 20 | const USIZE: usize = $num; 21 | } 22 | }; 23 | } 24 | 25 | type_uint! { 26 | pub struct TypeDepths(DEPTHS); 27 | } 28 | 29 | pub(crate) fn type_hash() -> String { 30 | let type_name = std::any::type_name::().to_string(); 31 | let mut s = DefaultHasher::new(); 32 | type_name.hash(&mut s); 33 | base64::encode(s.finish().to_be_bytes()) 34 | } 35 | 36 | fn file_name(prefix: &str, depth: usize) -> String { 37 | format!("{}-{}-{:02}.bin", prefix, &type_hash::()[..6], depth) 38 | } 39 | 40 | pub fn pp_file_name(depth: usize) -> String { 41 | file_name::("power-tau", depth) 42 | } 43 | 44 | pub fn amtp_file_name(depth: usize) -> String { 45 | file_name::("amt-params", depth) 46 | } 47 | 48 | // This is an ad-hoc fix due to the upstream crate provides insufficient APIs for projective curve. 49 | // when the const generic stabilized, this function could be a constant function. 50 | // pub fn serialize_length() -> usize { 51 | // let mem_point: usize = std::mem::size_of::(); 52 | // let mem_base: usize = std::mem::size_of::(); 53 | // 54 | // assert_eq!(mem_point % mem_base, 0); 55 | // let coords: usize = mem_point / mem_base; 56 | // (G::BaseField::default()).uncompressed_size() * coords 57 | // } 58 | // 59 | // #[test] 60 | // fn test_serialize_length() { 61 | // use crate::crypto::export::{Pairing, ToBytes, G1}; 62 | // 63 | // let sample = G1::::prime_subgroup_generator(); 64 | // let mut result: Vec = Vec::new(); 65 | // sample.write(&mut result).unwrap(); 66 | // 67 | // assert_eq!(serialize_length::>(), result.len()); 68 | // } 69 | -------------------------------------------------------------------------------- /asb-authdb/lvmt-db/src/enable_log.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | 3 | use log::LevelFilter; 4 | use log4rs::{ 5 | append::console::ConsoleAppender, 6 | config::{Appender, Config as LogConfig, Logger, Root}, 7 | }; 8 | 9 | pub fn enable_log(level: LevelFilter) -> Result<(), String> { 10 | let mut conf_builder = LogConfig::builder().appender( 11 | Appender::builder().build("stdout", Box::new(ConsoleAppender::builder().build())), 12 | ); 13 | let root_builder = Root::builder().appender("stdout"); 14 | // Should add new crate names here 15 | for crate_name in ["lvmt-db", "amt-bench"].iter() { 16 | conf_builder = conf_builder.logger(Logger::builder().build(*crate_name, level)); 17 | } 18 | let log_config = conf_builder 19 | .build(root_builder.build(level)) 20 | .map_err(|e| format!("failed to build log config: {:?}", e))?; 21 | log4rs::init_config(log_config) 22 | .map_err(|e| format!("failed to initialize log with config: {:?}", e))?; 23 | Ok(()) 24 | } 25 | 26 | pub fn enable_debug_log() { 27 | enable_log(LevelFilter::Debug).unwrap(); 28 | } 29 | 30 | pub fn enable_info_log() { 31 | enable_log(LevelFilter::Info).unwrap(); 32 | } 33 | -------------------------------------------------------------------------------- /asb-authdb/lvmt-db/src/lib.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | extern crate error_chain; 3 | #[macro_use] 4 | extern crate static_assertions; 5 | 6 | extern crate base64; 7 | extern crate core; 8 | #[cfg(test)] 9 | extern crate kvdb_memorydb; 10 | 11 | pub mod amt; 12 | pub mod crypto; 13 | mod enable_log; 14 | pub mod lvmt_db; 15 | pub mod merkle; 16 | pub mod multi_layer_amt; 17 | pub mod serde; 18 | pub mod single_amt; 19 | pub mod storage; 20 | 21 | pub use crate::lvmt_db::{LvmtDB, LvmtRoot, Proof}; 22 | pub use multi_layer_amt::Key; 23 | 24 | #[allow(unused)] 25 | use enable_log::*; 26 | 27 | #[cfg(not(any(feature = "medium_lvmt", feature = "large_lvmt", feature = "huge_lvmt")))] 28 | const DEPTHS: usize = 8; 29 | #[cfg(feature = "media_lvmt")] 30 | const DEPTHS: usize = 12; 31 | #[cfg(feature = "large_lvmt")] 32 | const DEPTHS: usize = 16; 33 | #[cfg(feature = "huge_lvmt")] 34 | const DEPTHS: usize = 20; 35 | -------------------------------------------------------------------------------- /asb-authdb/lvmt-db/src/merkle/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::storage::access::PUT_MODE; 2 | use crate::storage::{DBAccess, DBColumn, FlattenArray}; 3 | use ethereum_types::H256; 4 | use keccak_hash::{keccak, KECCAK_EMPTY}; 5 | 6 | pub struct StaticMerkleTree { 7 | data: DBAccess, 8 | root: H256, 9 | depth: u32, 10 | } 11 | 12 | pub type MerkleProof = (Vec, u64); 13 | 14 | fn combine_hash(a: &H256, b: &H256) -> H256 { 15 | let mut input = a.0.to_vec(); 16 | input.extend_from_slice(&b.0); 17 | let answer = keccak(&input); 18 | answer 19 | } 20 | 21 | impl StaticMerkleTree { 22 | pub fn new(db: DBColumn, epoch: u64) -> Self { 23 | let mut backend: DBAccess = 24 | DBAccess::new(epoch.to_be_bytes().into(), db); 25 | let depth = backend.get(&0).to_low_u64_be() as u32; 26 | let root = backend.get(&1).clone(); 27 | Self { 28 | data: backend, 29 | depth, 30 | root, 31 | } 32 | } 33 | 34 | pub fn root(&self) -> &H256 { 35 | &self.root 36 | } 37 | 38 | pub fn prove(&mut self, position: u64) -> MerkleProof { 39 | let mut proofs = Vec::with_capacity(self.depth as usize); 40 | for depth in (1..=self.depth).rev() { 41 | let height = self.depth - depth; 42 | let index = (1 << depth) | ((position >> height) ^ 1) as usize; 43 | let mut answer = self.data.get(&index).clone(); 44 | if answer == Default::default() { 45 | answer = KECCAK_EMPTY 46 | }; 47 | proofs.push(answer); 48 | } 49 | return (proofs, position); 50 | } 51 | 52 | pub fn verify(root: &H256, hash: &H256, proof: &MerkleProof) -> bool { 53 | let (merkle_path, pos) = proof; 54 | let mut current_hash = hash.clone(); 55 | for (index, proof) in merkle_path.iter().enumerate() { 56 | let right_append = (*pos >> index) % 2 == 0; 57 | current_hash = if right_append { 58 | combine_hash(¤t_hash, proof) 59 | } else { 60 | combine_hash(proof, ¤t_hash) 61 | }; 62 | } 63 | current_hash == *root 64 | } 65 | 66 | pub fn dump<'a>(db: DBColumn, epoch: u64, data: Vec, only_root: bool) -> H256 { 67 | let length = data.len(); 68 | let depth = length.next_power_of_two().trailing_zeros(); 69 | 70 | let mut backend: DBAccess = 71 | DBAccess::new(epoch.to_be_bytes().into(), db); 72 | 73 | let mut this_level = data; 74 | let mut root: H256 = Default::default(); 75 | 76 | for level in (0..=depth).rev() { 77 | for (i, hash) in this_level.iter().enumerate() { 78 | if !only_root { 79 | backend.set(&((1 << level) + i), hash.clone()); 80 | } 81 | if level == 0 { 82 | root = hash.clone() 83 | } 84 | } 85 | if this_level.len() % 2 != 0 { 86 | this_level.push(KECCAK_EMPTY); 87 | } 88 | this_level = this_level 89 | .chunks(2) 90 | .map(|x| combine_hash(&x[0], &x[1])) 91 | .collect(); 92 | } 93 | 94 | backend.set(&0, H256::from_low_u64_be(depth as u64)); 95 | 96 | *PUT_MODE.lock_mut().unwrap() = 3; 97 | backend.flush_cache(); 98 | 99 | return root; 100 | } 101 | } 102 | 103 | #[test] 104 | fn test_static_merkle_tree() { 105 | let db = crate::storage::test_db_col(); 106 | for epoch in 1u64..=32 { 107 | let data: Vec = (0..epoch) 108 | .map(|x| H256::from_low_u64_be(x + 65536)) 109 | .collect(); 110 | let root = StaticMerkleTree::dump(db.clone(), epoch, data, false); 111 | 112 | let mut tree = StaticMerkleTree::new(db.clone(), epoch); 113 | assert_eq!(root, tree.root().clone()); 114 | for i in 0..epoch { 115 | let proof = tree.prove(i); 116 | assert!( 117 | StaticMerkleTree::verify(&root, &H256::from_low_u64_be(i + 65536), &proof), 118 | "fail proof at tree {} pos {}", 119 | epoch, 120 | i 121 | ); 122 | } 123 | } 124 | } 125 | -------------------------------------------------------------------------------- /asb-authdb/lvmt-db/src/multi_layer_amt/key.rs: -------------------------------------------------------------------------------- 1 | use std::cmp::min; 2 | use std::convert::TryFrom; 3 | 4 | use lvmt_serde_derive::{MyFromBytes, MyToBytes}; 5 | 6 | use super::{TreeName, DEPTHS}; 7 | 8 | #[derive(Default, Debug, Hash, PartialEq, Eq, Clone, PartialOrd, Ord, MyToBytes, MyFromBytes)] 9 | pub struct Key(pub Vec); 10 | 11 | impl AsRef<[u8]> for Key { 12 | fn as_ref(&self) -> &[u8] { 13 | &self.0 14 | } 15 | } 16 | 17 | impl Key { 18 | #[inline] 19 | fn mid(&self, start: usize, length: usize) -> u128 { 20 | if length == 0 { 21 | return 0; 22 | } 23 | 24 | let start_byte = start / 8; 25 | let start_bit = start - start_byte * 8; 26 | 27 | let mut entry = self.0[start_byte..min(start_byte + 16, self.0.len())].to_vec(); 28 | 29 | if entry.len() != 16 { 30 | entry.resize(16, 0); 31 | } 32 | 33 | let entry = u128::from_be_bytes(<[u8; 16]>::try_from(entry).unwrap()); 34 | 35 | return entry >> (start_bit + (128 - length)); 36 | } 37 | 38 | pub fn tree_at_level(&self, level: u8) -> TreeName { 39 | TreeName( 40 | (0..level) 41 | .map(|level| self.index_at_level(level) as u32) 42 | .collect(), 43 | ) 44 | } 45 | 46 | pub fn index_at_level(&self, level: u8) -> usize { 47 | // if level > 3 { 48 | // dbg!(level); 49 | // } 50 | let length = (level as usize) * DEPTHS; 51 | self.mid(length, DEPTHS) as usize 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /asb-authdb/lvmt-db/src/multi_layer_amt/mod.rs: -------------------------------------------------------------------------------- 1 | mod key; 2 | mod name; 3 | mod node; 4 | mod tree; 5 | 6 | pub use self::{ 7 | key::Key, 8 | name::TreeName, 9 | node::{EpochPosition, Node, MAX_VERSION_NUMBER}, 10 | tree::{AMTNodeIndex, VerInfo, VersionTree}, 11 | }; 12 | use crate::{ 13 | amt::{AMTConfigTrait, AMTree}, 14 | crypto::export::{Pairing, G1}, 15 | storage::{FlattenArray, FlattenTree}, 16 | }; 17 | 18 | #[derive(Copy, Clone)] 19 | pub struct AMTConfig; 20 | 21 | impl AMTConfigTrait for AMTConfig { 22 | type PE = Pairing; 23 | type Name = TreeName; 24 | type Data = Node; 25 | type Commitment = G1; 26 | type DataLayout = FlattenArray; 27 | type TreeLayout = FlattenTree; 28 | type Height = crate::crypto::TypeDepths; 29 | } 30 | 31 | type Tree = AMTree; 32 | pub type Commitment = G1<::PE>; 33 | 34 | const DEPTHS: usize = ::DEPTHS; 35 | 36 | //TODO: Store Key for non-existent proof 37 | -------------------------------------------------------------------------------- /asb-authdb/lvmt-db/src/multi_layer_amt/name.rs: -------------------------------------------------------------------------------- 1 | use crate::serde::{MyFromBytes, MyToBytes, SerdeType}; 2 | use std::io::{Read, Result, Write}; 3 | 4 | #[derive(Default, Debug, Clone, Hash, PartialEq, Eq, Ord, PartialOrd)] 5 | pub struct TreeName(pub(super) Vec); 6 | 7 | impl MyFromBytes for TreeName { 8 | fn read(mut reader: R, ty: SerdeType) -> Result { 9 | let length: u8 = MyFromBytes::read(&mut reader, ty)?; 10 | let mut answer = Vec::::with_capacity(length as usize); 11 | for _ in 0..length { 12 | answer.push(MyFromBytes::read(&mut reader, ty)?); 13 | } 14 | Ok(Self(answer)) 15 | } 16 | } 17 | 18 | impl MyToBytes for TreeName { 19 | fn write(&self, mut writer: W, ty: SerdeType) -> Result<()> { 20 | MyToBytes::write(&(self.0.len() as u8), &mut writer, ty)?; 21 | for item in self.0.iter() { 22 | MyToBytes::write(item, &mut writer, ty)?; 23 | } 24 | Ok(()) 25 | } 26 | } 27 | 28 | impl TreeName { 29 | pub const fn root() -> Self { 30 | TreeName(Vec::new()) 31 | } 32 | 33 | pub fn level_index(&self) -> Option { 34 | self.0.last().cloned() 35 | } 36 | 37 | pub fn child(&self, index: u32) -> Self { 38 | let mut answer = self.clone(); 39 | answer.0.push(index); 40 | answer 41 | } 42 | 43 | pub fn parent(&self) -> Option { 44 | let mut answer = self.clone(); 45 | let top_element = answer.0.pop(); 46 | if top_element.is_none() { 47 | None 48 | } else { 49 | Some(answer) 50 | } 51 | } 52 | } 53 | 54 | #[test] 55 | fn test_tree_name_string() { 56 | assert_eq!(TreeName(vec![]).to_bytes_consensus(), [0u8]); 57 | 58 | assert_eq!( 59 | TreeName(vec![1]).to_bytes_consensus(), 60 | [1u8, 1u8, 0u8, 0u8, 0u8] 61 | ); 62 | 63 | assert_eq!( 64 | TreeName::from_bytes_consensus(&TreeName(vec![1, 2, 3]).to_bytes_consensus()).unwrap(), 65 | TreeName(vec![1, 2, 3]) 66 | ); 67 | } 68 | -------------------------------------------------------------------------------- /asb-authdb/lvmt-db/src/multi_layer_amt/node.rs: -------------------------------------------------------------------------------- 1 | use crate::amt::AMTData; 2 | use crate::crypto::export::{ 3 | FpParameters, Fr as FrGeneric, FrInt as FrIntGeneric, Pairing, PrimeField, 4 | }; 5 | use lvmt_serde_derive::{MyFromBytes, MyToBytes}; 6 | use std::ops::{Deref, DerefMut}; 7 | 8 | pub(super) type Fr = FrGeneric; 9 | pub(super) type FrInt = FrIntGeneric; 10 | 11 | pub const VERSION_BITS: usize = 40; 12 | pub const MAX_VERSION_NUMBER: u64 = (1 << VERSION_BITS) - 1; 13 | 14 | #[allow(dead_code)] 15 | fn const_assert() { 16 | const CAPACITY: u32 = ::Params::CAPACITY; 17 | const_assert!(CAPACITY > 40 * 6); 18 | } 19 | 20 | #[derive(Default, Clone, Debug, MyFromBytes, MyToBytes)] 21 | pub struct KeyVersions(pub Vec); 22 | impl Deref for KeyVersions { 23 | type Target = Vec; 24 | 25 | fn deref(&self) -> &Self::Target { 26 | &self.0 27 | } 28 | } 29 | 30 | impl DerefMut for KeyVersions { 31 | fn deref_mut(&mut self) -> &mut Self::Target { 32 | &mut self.0 33 | } 34 | } 35 | 36 | #[derive(Default, Clone, Copy, Debug, MyFromBytes, MyToBytes)] 37 | pub struct EpochPosition { 38 | pub(crate) epoch: u64, 39 | pub(crate) position: u64, 40 | } 41 | 42 | #[derive(Default, Clone, Debug, MyFromBytes, MyToBytes)] 43 | pub struct Node { 44 | pub(crate) key_versions: KeyVersions, 45 | pub(crate) tree_version: u64, 46 | pub(crate) tree_position: EpochPosition, 47 | } 48 | 49 | impl AMTData for Node { 50 | #[cfg(target_endian = "little")] 51 | fn as_fr_int(&self) -> FrInt { 52 | assert!(self.key_versions.len() <= 5); 53 | let mut result = [0u8; 32]; 54 | 55 | let mut start: usize = 5; 56 | for ver in self.key_versions.iter() { 57 | result[start..(start + 5)].copy_from_slice(&ver.to_le_bytes()[0..5]); 58 | start += 5; 59 | } 60 | result[0..5].copy_from_slice(&self.tree_version.to_le_bytes()[0..5]); 61 | 62 | let result = unsafe { std::mem::transmute::<[u8; 32], [u64; 4]>(result) }; 63 | FrInt::new(result) 64 | } 65 | } 66 | 67 | impl Node { 68 | pub fn versions_from_fr_int(fr_int: &FrInt, index: usize) -> u64 { 69 | assert!(index < 6); 70 | let byte_array = unsafe { std::mem::transmute::<&[u64; 4], &[u8; 32]>(&fr_int.0) }; 71 | let mut answer = [0u8; 8]; 72 | answer[..5].copy_from_slice(&byte_array[index * 5..(index + 1) * 5]); 73 | u64::from_le_bytes(answer) 74 | } 75 | } 76 | 77 | #[cfg(test)] 78 | mod test { 79 | use super::*; 80 | use rand::{prelude::ThreadRng, Rng}; 81 | 82 | #[test] 83 | fn test_array_transmute() { 84 | let mut node = Node { 85 | key_versions: KeyVersions(Vec::new()), 86 | tree_version: 0, 87 | tree_position: Default::default(), 88 | }; 89 | node.tree_version = 1; 90 | (2..=6).for_each(|x: u64| node.key_versions.push(x)); 91 | 92 | let mut answer = [0u64; 4]; 93 | answer[0] = 1; 94 | answer[0] += 2 * (1 << VERSION_BITS); 95 | answer[1] += 3 * (1 << VERSION_BITS * 2 - 64); 96 | answer[1] += 4 * (1 << VERSION_BITS * 3 - 64); 97 | answer[2] += 5 * (1 << VERSION_BITS * 4 - 128); 98 | answer[3] += 6 * (1 << VERSION_BITS * 5 - 192); 99 | let answer = FrInt::new(answer); 100 | 101 | assert_eq!(node.as_fr_int(), answer); 102 | } 103 | 104 | #[cfg(test)] 105 | fn test_random_node_as_fr_int(rng: &mut ThreadRng) { 106 | use crate::crypto::export::BigInteger; 107 | 108 | let mut node = Node { 109 | key_versions: KeyVersions(vec![Default::default(); 5]), 110 | tree_version: 0, 111 | tree_position: Default::default(), 112 | }; 113 | 114 | const MASK: u64 = (1 << VERSION_BITS) - 1; 115 | 116 | node.tree_version = rng.gen::() & MASK; 117 | let mut answer = FrInt::from(node.tree_version); 118 | for i in 0..5 { 119 | node.key_versions[i] = rng.gen::() & MASK; 120 | let mut fr_int = FrInt::from(node.key_versions[i]); 121 | fr_int.muln((VERSION_BITS * (i + 1)) as u32); 122 | answer.add_nocarry(&fr_int); 123 | } 124 | 125 | assert_eq!(node.as_fr_int(), answer); 126 | 127 | assert_eq!(node.tree_version, Node::versions_from_fr_int(&answer, 0)); 128 | for i in 0..5 { 129 | assert_eq!( 130 | node.key_versions[i], 131 | Node::versions_from_fr_int(&answer, i + 1) 132 | ); 133 | } 134 | } 135 | 136 | #[test] 137 | fn test_as_fr_int() { 138 | let mut rng = rand::thread_rng(); 139 | for _ in 0..1000 { 140 | test_random_node_as_fr_int(&mut rng); 141 | } 142 | } 143 | } 144 | -------------------------------------------------------------------------------- /asb-authdb/lvmt-db/src/serde/basic.rs: -------------------------------------------------------------------------------- 1 | use std::io::{Read, Result, Write}; 2 | 3 | use super::{MyFromBytes, MyToBytes, SerdeType}; 4 | 5 | macro_rules! impl_for_basic { 6 | ($uint: ty) => { 7 | impl MyFromBytes for $uint { 8 | #[inline] 9 | fn read(mut reader: R, _ty: SerdeType) -> Result { 10 | let mut bytes = (0 as $uint).to_le_bytes(); 11 | reader.read_exact(&mut bytes)?; 12 | Ok(<$uint>::from_le_bytes(bytes)) 13 | } 14 | } 15 | 16 | impl MyToBytes for $uint { 17 | #[inline] 18 | fn write(&self, mut writer: W, _ty: SerdeType) -> Result<()> { 19 | writer.write_all(&self.to_le_bytes()) 20 | } 21 | } 22 | }; 23 | } 24 | impl_for_basic!(u8); 25 | impl_for_basic!(u16); 26 | impl_for_basic!(u32); 27 | impl_for_basic!(u64); 28 | impl_for_basic!(usize); 29 | 30 | impl MyFromBytes for Vec { 31 | fn read(mut reader: R, ty: SerdeType) -> Result { 32 | let length: usize = MyFromBytes::read(&mut reader, ty)?; 33 | let mut answer = vec![0u8; length]; 34 | reader.read_exact(&mut answer)?; 35 | Ok(answer) 36 | } 37 | } 38 | 39 | impl MyToBytes for Vec { 40 | fn write(&self, mut writer: W, ty: SerdeType) -> Result<()> { 41 | MyToBytes::write(&self.len(), &mut writer, ty)?; 42 | writer.write_all(&self)?; 43 | Ok(()) 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /asb-authdb/lvmt-db/src/serde/curves.rs: -------------------------------------------------------------------------------- 1 | use std::io::{Read, Result, Write}; 2 | 3 | use crate::crypto::export::{ 4 | AffineCurve, FromBytes, G1Affine, G1Projective, ProjectiveCurve, ToBytes, 5 | }; 6 | 7 | use super::{MyFromBytes, MyToBytes, SerdeType}; 8 | 9 | impl MyFromBytes for G1Projective { 10 | #[inline] 11 | fn read(reader: R, ty: SerdeType) -> Result { 12 | if ty.consistent { 13 | let g1_aff: ::Affine = FromBytes::read(reader)?; 14 | Ok(g1_aff.into_projective()) 15 | } else { 16 | FromBytes::read(reader) 17 | } 18 | } 19 | } 20 | 21 | impl MyToBytes for G1Projective { 22 | #[inline] 23 | fn write(&self, writer: W, ty: SerdeType) -> Result<()> { 24 | if ty.consistent { 25 | let g1_aff = self.into_affine(); 26 | ToBytes::write(&g1_aff, writer) 27 | } else { 28 | ToBytes::write(self, writer) 29 | } 30 | } 31 | } 32 | 33 | impl MyFromBytes for G1Affine { 34 | #[inline] 35 | fn read(reader: R, _ty: SerdeType) -> Result { 36 | FromBytes::read(reader) 37 | } 38 | } 39 | 40 | impl MyToBytes for G1Affine { 41 | #[inline] 42 | fn write(&self, writer: W, _ty: SerdeType) -> Result<()> { 43 | ToBytes::write(self, writer) 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /asb-authdb/lvmt-db/src/serde/h256.rs: -------------------------------------------------------------------------------- 1 | use super::{MyFromBytes, MyToBytes, SerdeType}; 2 | use keccak_hash::H256; 3 | use std::io::{Read, Result, Write}; 4 | 5 | impl MyFromBytes for H256 { 6 | fn read(mut reader: R, _ty: SerdeType) -> Result { 7 | let mut answer = H256::default(); 8 | reader.read_exact(answer.as_mut())?; 9 | Ok(answer) 10 | } 11 | } 12 | 13 | impl MyToBytes for H256 { 14 | fn write(&self, mut writer: W, _ty: SerdeType) -> Result<()> { 15 | writer.write_all(self.as_ref()) 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /asb-authdb/lvmt-db/src/serde/mod.rs: -------------------------------------------------------------------------------- 1 | mod basic; 2 | mod curves; 3 | mod h256; 4 | 5 | use std::io::{Read, Result, Write}; 6 | 7 | #[derive(Copy, Clone)] 8 | pub struct SerdeType { 9 | pub consistent: bool, 10 | } 11 | 12 | pub trait MyFromBytes: Sized { 13 | fn read(reader: R, ty: SerdeType) -> Result; 14 | fn read_vec(mut reader: R, ty: SerdeType) -> Result> { 15 | let length: usize = MyFromBytes::read(&mut reader, ty)?; 16 | let mut answer = Vec::::with_capacity(length); 17 | for _ in 0..length { 18 | answer.push(::read(&mut reader, ty)?); 19 | } 20 | Ok(answer) 21 | } 22 | 23 | fn from_bytes(mut data: &[u8], ty: SerdeType) -> Result { 24 | MyFromBytes::read(&mut data, ty) 25 | } 26 | fn from_bytes_local(data: &[u8]) -> Result { 27 | MyFromBytes::from_bytes(data, SerdeType { consistent: false }) 28 | } 29 | fn from_bytes_consensus(data: &[u8]) -> Result { 30 | MyFromBytes::from_bytes(data, SerdeType { consistent: true }) 31 | } 32 | } 33 | 34 | pub trait MyToBytes: Sized { 35 | fn write(&self, writer: W, ty: SerdeType) -> Result<()>; 36 | fn write_vec(vec_self: &Vec, mut writer: W, ty: SerdeType) -> Result<()> { 37 | MyToBytes::write(&vec_self.len(), &mut writer, ty)?; 38 | for item in vec_self.iter() { 39 | MyToBytes::write(item, &mut writer, ty)?; 40 | } 41 | Ok(()) 42 | } 43 | 44 | fn to_bytes(&self, ty: SerdeType) -> Vec { 45 | let mut serialized = Vec::with_capacity(1024); 46 | // Write to Vec should always return Ok(..) 47 | MyToBytes::write(self, &mut serialized, ty).unwrap(); 48 | serialized.shrink_to_fit(); 49 | serialized 50 | } 51 | fn to_bytes_local(&self) -> Vec { 52 | MyToBytes::to_bytes(self, SerdeType { consistent: false }) 53 | } 54 | fn to_bytes_consensus(&self) -> Vec { 55 | MyToBytes::to_bytes(self, SerdeType { consistent: true }) 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /asb-authdb/lvmt-db/src/single_amt/mod.rs: -------------------------------------------------------------------------------- 1 | use crate::amt::{AMTConfigTrait, AMTData, AMTree, NodeIndex}; 2 | use crate::crypto::export::{ 3 | instances::{Fr, FrInt, G1Aff, G1}, 4 | Pairing, ProjectiveCurve, Zero, 5 | }; 6 | use crate::crypto::{AMTParams, TypeUInt}; 7 | use crate::serde::{MyFromBytes, MyToBytes, SerdeType}; 8 | use crate::storage::{DBColumn, FlattenArray, FlattenTree}; 9 | use keccak_hash::{keccak, H256}; 10 | use kvdb::{DBKey, DBOp, DBTransaction, KeyValueDB}; 11 | use lvmt_serde_derive::{MyFromBytes, MyToBytes}; 12 | use std::io::Write; 13 | use std::sync::{Arc, RwLock}; 14 | 15 | const ROOT_KEY: [u8; 2] = [0, 0]; 16 | 17 | #[derive(Copy, Clone)] 18 | struct TreeName; 19 | 20 | impl MyToBytes for TreeName { 21 | fn write(&self, mut writer: W, _ty: SerdeType) -> std::io::Result<()> { 22 | writer.write(&[])?; 23 | Ok(()) 24 | } 25 | } 26 | 27 | #[derive(Default, Clone, Debug, MyFromBytes, MyToBytes)] 28 | struct Node { 29 | data: Vec, 30 | hash: H256, 31 | } 32 | 33 | impl AMTData for Node { 34 | #[cfg(target_endian = "little")] 35 | fn as_fr_int(&self) -> FrInt { 36 | let mut result = unsafe { std::mem::transmute::<[u8; 32], [u64; 4]>(self.hash.0.clone()) }; 37 | result[3] &= 0x3fffffff; 38 | FrInt::new(result) 39 | } 40 | } 41 | 42 | #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] 43 | pub struct Height; 44 | 45 | impl TypeUInt for Height { 46 | const USIZE: usize = N; 47 | } 48 | 49 | #[derive(Copy, Clone)] 50 | struct AMTConfig; 51 | 52 | impl AMTConfigTrait for AMTConfig { 53 | type PE = Pairing; 54 | type Name = TreeName; 55 | type Data = Node; 56 | type Commitment = G1; 57 | type DataLayout = FlattenArray; 58 | type TreeLayout = FlattenTree; 59 | type Height = Height; 60 | } 61 | 62 | #[derive(Clone)] 63 | pub struct AmtDB { 64 | root: G1, 65 | amt: Arc>>>, 66 | pub db: Arc, 67 | } 68 | 69 | impl AmtDB { 70 | pub fn new( 71 | db: Arc, 72 | pp: Arc>, 73 | shard_node: Option<(usize, usize)>, 74 | ) -> Self { 75 | let db_col = DBColumn::from_kvdb(db.clone(), 0); 76 | let root = db_col 77 | .get(ROOT_KEY.as_ref()) 78 | .unwrap() 79 | .map_or(G1::zero(), |x| G1::from_bytes_local(&x).unwrap()); 80 | 81 | let shard_root = shard_node.map(|(depth, index)| NodeIndex::>::new(depth, index)); 82 | 83 | let mut amt = AMTree::>::new(TreeName, db_col, pp, shard_root); 84 | amt.set_commitment(&root); 85 | 86 | Self { 87 | root, 88 | amt: Arc::new(RwLock::new(amt)), 89 | db, 90 | } 91 | } 92 | 93 | fn index(key: &[u8]) -> usize { 94 | let bytes = (N + 7) / 8; 95 | assert!(key.len() >= bytes); 96 | let mut index: [u8; 8] = [0u8; 8]; 97 | index.copy_from_slice(&key[..bytes]); 98 | let mut index = u64::from_le_bytes(index); 99 | index &= (1 << N) - 1; 100 | return index as usize; 101 | } 102 | 103 | pub fn get(&self, key: &[u8]) -> Option> { 104 | let mut amt = self.amt.write().unwrap(); 105 | let node = amt.get(Self::index(key)); 106 | return if node.hash == H256::zero() { 107 | None 108 | } else { 109 | Some(node.data.clone()) 110 | }; 111 | } 112 | 113 | pub fn set(&mut self, key: &[u8], value: Vec) { 114 | let new_node = Node { 115 | hash: keccak(value.as_slice()), 116 | data: value, 117 | }; 118 | let mut amt = self.amt.write().unwrap(); 119 | *amt.write_versions(Self::index(key)) = new_node; 120 | } 121 | 122 | pub fn commit(&mut self) -> G1Aff { 123 | self.root = self.amt.write().unwrap().flush(); 124 | self.db.write_buffered(DBTransaction { 125 | ops: vec![DBOp::Insert { 126 | col: 0, 127 | key: DBKey::from(ROOT_KEY.as_ref()), 128 | value: self.root.clone().to_bytes_local(), 129 | }], 130 | }); 131 | self.db.flush().unwrap(); 132 | return self.root.into_affine(); 133 | } 134 | } 135 | -------------------------------------------------------------------------------- /asb-authdb/lvmt-db/src/storage/access.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::Debug; 2 | use std::hash::Hash; 3 | use std::marker::PhantomData; 4 | 5 | use global::Global; 6 | use hashbrown::HashMap; 7 | use kvdb::{DBOp, DBTransaction}; 8 | 9 | use crate::serde::{MyFromBytes, MyToBytes}; 10 | 11 | use super::layout::LayoutTrait; 12 | use super::DBColumn; 13 | 14 | pub static PUT_COUNT: Global<[u64; 4]> = Global::INIT; 15 | pub static PUT_MODE: Global = Global::INIT; 16 | 17 | #[derive(Clone)] 18 | pub struct DBAccess< 19 | K: Copy + Clone + Debug + Eq + Hash, 20 | V: Default + Clone + MyFromBytes + MyToBytes, 21 | L: LayoutTrait, 22 | > { 23 | prefix: Vec, 24 | db: DBColumn, 25 | cache: HashMap, 26 | _phantom: PhantomData<(K, V, L)>, 27 | } 28 | 29 | impl< 30 | K: Copy + Clone + Debug + Eq + Hash, 31 | V: Default + Clone + MyFromBytes + MyToBytes, 32 | L: LayoutTrait, 33 | > DBAccess 34 | { 35 | pub fn new(prefix: Vec, db: DBColumn) -> Self { 36 | Self { 37 | prefix, 38 | db, 39 | cache: Default::default(), 40 | _phantom: PhantomData, 41 | } 42 | } 43 | 44 | pub fn get(&mut self, node_index: &K) -> &V { 45 | let (value, _dirty) = self.ensure_cached(node_index); 46 | value 47 | } 48 | 49 | pub fn get_mut(&mut self, node_index: &K) -> &mut V { 50 | let (value, dirty) = self.ensure_cached(node_index); 51 | *dirty = true; 52 | value 53 | } 54 | 55 | fn ensure_cached(&mut self, node_index: &K) -> &mut (V, bool) { 56 | let (prefix, db) = (&self.prefix, &self.db); 57 | 58 | self.cache.entry(*node_index).or_insert_with(|| { 59 | let db_key = Self::compute_key(&prefix, node_index); 60 | 61 | let value = match db.get(&db_key).unwrap() { 62 | Some(x) => V::from_bytes_local(&*x).unwrap(), 63 | None => V::default(), 64 | }; 65 | (value, false) 66 | }) 67 | } 68 | 69 | pub fn set(&mut self, node_index: &K, value: V) { 70 | self.cache.insert(*node_index, (value, true)); 71 | } 72 | 73 | pub fn flush_cache(&mut self) { 74 | let prefix = &self.prefix; 75 | let ops: Vec = self 76 | .cache 77 | .iter_mut() 78 | .filter(|(_k, (_v, dirty))| *dirty) 79 | .map(|(key, (value, dirty))| { 80 | *dirty = false; 81 | let db_key = Self::compute_key(&prefix, key); 82 | DBOp::Insert { 83 | col: 0, 84 | key: db_key.into(), 85 | value: value.to_bytes_local(), 86 | } 87 | }) 88 | .collect(); 89 | 90 | (*PUT_COUNT.lock_mut().unwrap())[*PUT_MODE.lock().unwrap()] += ops.len() as u64; 91 | 92 | self.db.write_buffered(DBTransaction { ops }); 93 | self.cache.clear(); 94 | } 95 | 96 | fn compute_key(name: &[u8], node_index: &K) -> Vec { 97 | let layout_index = >::position(node_index) as u32; 98 | 99 | let mut key = name.to_vec(); 100 | key.extend_from_slice(&layout_index.to_be_bytes()); 101 | key 102 | } 103 | } 104 | 105 | #[cfg(test)] 106 | mod test { 107 | use crate::crypto::TypeUInt; 108 | use crate::type_uint; 109 | 110 | use super::*; 111 | 112 | type_uint! { 113 | struct TestDepths(6); 114 | } 115 | 116 | #[test] 117 | fn test_backend() { 118 | type NodeIndex = crate::amt::NodeIndex; 119 | type FlattenTree = super::super::FlattenTree; 120 | 121 | const DEPTHS: usize = TestDepths::USIZE; 122 | const TMP_RATIO: usize = 719323; 123 | 124 | let db = crate::storage::test_db_col(); 125 | let mut tree = 126 | DBAccess::::new("test".to_string().into_bytes(), db); 127 | 128 | for depth in 0..DEPTHS { 129 | for index in 0..(1 << depth) { 130 | let node_index = &NodeIndex::new(depth, index); 131 | *tree.get_mut(node_index) = (TMP_RATIO * depth) as u64; 132 | *tree.get_mut(node_index) += index as u64; 133 | } 134 | } 135 | 136 | tree.flush_cache(); 137 | 138 | for depth in 0..DEPTHS { 139 | for index in 0..(1 << depth) { 140 | let node_index = &NodeIndex::new(depth, index); 141 | assert_eq!( 142 | (TMP_RATIO * depth + index) as u64, 143 | *tree.get_mut(node_index) 144 | ) 145 | } 146 | } 147 | 148 | drop(tree); 149 | } 150 | } 151 | -------------------------------------------------------------------------------- /asb-authdb/lvmt-db/src/storage/kvdb.rs: -------------------------------------------------------------------------------- 1 | use kvdb::{DBOp, DBTransaction, DBValue, KeyValueDB}; 2 | use std::io::Result; 3 | use std::sync::Arc; 4 | 5 | #[derive(Clone)] 6 | pub struct DBColumn { 7 | db: Arc, 8 | col: u32, 9 | } 10 | 11 | impl DBColumn { 12 | pub fn from_kvdb(db: Arc, col: u32) -> Self { 13 | Self { db, col } 14 | } 15 | 16 | pub fn get(&self, key: &[u8]) -> Result> { 17 | self.db.get(self.col, key) 18 | } 19 | 20 | pub fn write_buffered(&self, mut transaction: DBTransaction) { 21 | let ops = &mut transaction.ops; 22 | ops.iter_mut().for_each(|x| match x { 23 | DBOp::Insert { col, .. } => *col = self.col, 24 | DBOp::Delete { col, .. } => *col = self.col, 25 | }); 26 | self.db.write_buffered(transaction) 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /asb-authdb/lvmt-db/src/storage/layout.rs: -------------------------------------------------------------------------------- 1 | use crate::amt::NodeIndex; 2 | use crate::crypto::TypeUInt; 3 | use std::fmt::Debug; 4 | use std::hash::Hash; 5 | 6 | pub trait LayoutTrait { 7 | fn position(index: &I) -> usize; 8 | } 9 | 10 | #[derive(Clone)] 11 | pub struct FlattenArray; 12 | 13 | impl LayoutTrait for FlattenArray { 14 | #[inline] 15 | fn position(index: &usize) -> usize { 16 | *index 17 | } 18 | } 19 | 20 | #[derive(Clone)] 21 | pub struct FlattenTree; 22 | 23 | impl LayoutTrait> for FlattenTree { 24 | #[inline] 25 | fn position(tree_index: &NodeIndex) -> usize { 26 | (1 << tree_index.depth()) + tree_index.index() 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /asb-authdb/lvmt-db/src/storage/mod.rs: -------------------------------------------------------------------------------- 1 | pub mod access; 2 | pub mod kvdb; 3 | pub mod layout; 4 | 5 | pub use self::access::DBAccess; 6 | pub use self::kvdb::DBColumn; 7 | pub use self::layout::{FlattenArray, FlattenTree, LayoutTrait}; 8 | pub use self::test_tools::{test_db_col, test_kvdb}; 9 | 10 | mod test_tools { 11 | use super::DBColumn; 12 | use kvdb::KeyValueDB; 13 | use std::sync::Arc; 14 | 15 | pub fn test_db_col() -> DBColumn { 16 | DBColumn::from_kvdb(Arc::new(kvdb_memorydb::create(1)), 0) 17 | } 18 | 19 | pub fn test_kvdb(num_cols: u32) -> Arc { 20 | Arc::new(kvdb_memorydb::create(num_cols)) 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /asb-authdb/parity-journaldb/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "parity-journaldb" 3 | version = "0.2.0" 4 | authors = ["Parity Technologies "] 5 | description = "A `HashDB` which can manage a short-term journal potentially containing many forks of mutually exclusive actions" 6 | license = "GPL3" 7 | 8 | [dependencies] 9 | parity-bytes = "0.1" 10 | ethereum-types = "0.9.2" 11 | hash-db = { workspace = true } 12 | hash-db15 = { package="hash-db", version="0.15.2", optional = true} 13 | trie-db = "0.11.0" 14 | keccak-hasher = { workspace = true } 15 | keccak-hasher15 = {package="keccak-hasher", version = "0.15.3", optional = true} 16 | blake2-hasher = { workspace = true, optional = true } 17 | 18 | kvdb = "0.4" 19 | log = "0.4" 20 | memory-db = { path = "./memory-db" } 21 | parity-util-mem = "0.7.0" 22 | parking_lot = "0.11.1" 23 | fastmap = { path = "./fastmap" } 24 | rlp = "0.4.6" 25 | stats = { workspace = true } 26 | 27 | [dev-dependencies] 28 | env_logger = "0.5" 29 | keccak-hash = "0.5.0" 30 | kvdb-memorydb = "=0.4.0" 31 | parity-util-mem05 = { package ="parity-util-mem", version = "0.5.2"} 32 | 33 | [features] 34 | # Compatible of crate version 35 | hash15 = ["hash-db15", "keccak-hasher15"] 36 | light-hash = ["blake2-hasher"] 37 | -------------------------------------------------------------------------------- /asb-authdb/parity-journaldb/README.md: -------------------------------------------------------------------------------- 1 | # Journal DB 2 | 3 | This crate is extracted from 4 | Project [OpenEthereum](https://github.com/openethereum/openethereum/tree/main/crates/db/journaldb). We make this crate 5 | not rely on any bankend database crate. -------------------------------------------------------------------------------- /asb-authdb/parity-journaldb/fastmap/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "fastmap" 3 | version = "0.1.0" 4 | authors = ["Parity Technologies "] 5 | description = "Specialized version of `HashMap` with H256 keys and fast hashing function." 6 | license = "GPL-3.0" 7 | 8 | [dependencies] 9 | ethereum-types = "0.9.2" 10 | plain_hasher = "0.2" 11 | -------------------------------------------------------------------------------- /asb-authdb/parity-journaldb/fastmap/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2015-2020 Parity Technologies (UK) Ltd. 2 | // This file is part of OpenEthereum. 3 | 4 | // OpenEthereum is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // OpenEthereum is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with OpenEthereum. If not, see . 16 | 17 | //! Provides a `H256FastMap` type with H256 keys and fast hashing function. 18 | 19 | extern crate ethereum_types; 20 | extern crate plain_hasher; 21 | 22 | use ethereum_types::H256; 23 | use plain_hasher::PlainHasher; 24 | use std::{ 25 | collections::{HashMap, HashSet}, 26 | hash, 27 | }; 28 | 29 | /// Specialized version of `HashMap` with H256 keys and fast hashing function. 30 | pub type H256FastMap = HashMap>; 31 | /// Specialized version of HashSet with H256 values and fast hashing function. 32 | pub type H256FastSet = HashSet>; 33 | 34 | #[cfg(test)] 35 | mod tests { 36 | use super::*; 37 | 38 | #[test] 39 | fn test_works() { 40 | let mut h = H256FastMap::default(); 41 | h.insert(H256::from_low_u64_be(123), "abc"); 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /asb-authdb/parity-journaldb/memory-db/Cargo.toml: -------------------------------------------------------------------------------- 1 | # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO 2 | # 3 | # When uploading crates to the registry Cargo will automatically 4 | # "normalize" Cargo.toml files for maximal compatibility 5 | # with all versions of Cargo and also rewrite `path` dependencies 6 | # to registry (e.g. crates.io) dependencies 7 | # 8 | # If you believe there's an error in this file please file an 9 | # issue against the rust-lang/cargo repository. If you're 10 | # editing this file be aware that the upstream Cargo.toml 11 | # will likely look very different (and much more reasonable) 12 | 13 | [package] 14 | name = "memory-db" 15 | version = "0.11.0" 16 | authors = ["Parity Technologies "] 17 | description = "In-memory implementation of hash-db, useful for tests" 18 | license = "Apache-2.0" 19 | repository = "https://github.com/paritytech/parity-common" 20 | 21 | [dependencies] 22 | hash-db = "0.11.0" 23 | 24 | [dev-dependencies.criterion] 25 | version = "0.2.8" 26 | 27 | [dev-dependencies.keccak-hasher] 28 | version = "0.11.0" 29 | 30 | [dependencies.parity-util-mem] 31 | version = "0.7" 32 | -------------------------------------------------------------------------------- /asb-authdb/parity-journaldb/memory-db/README.md: -------------------------------------------------------------------------------- 1 | MemoryDB is a reference counted memory-based [`HashDB`](https://github.com/paritytech/parity-common/tree/master/hash-db) implementation backed by a `HashMap`. -------------------------------------------------------------------------------- /asb-authdb/parity-journaldb/src/as_hash_db_impls.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2015-2020 Parity Technologies (UK) Ltd. 2 | // This file is part of OpenEthereum. 3 | 4 | // OpenEthereum is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // OpenEthereum is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with OpenEthereum. If not, see . 16 | 17 | //! Impls of the `AsHashDB` upcast trait for all different variants of DB 18 | use crate::hasher::DBHasher; 19 | use crate::{AsKeyedHashDB, KeyedHashDB}; 20 | use archivedb::ArchiveDB; 21 | use earlymergedb::EarlyMergeDB; 22 | use ethereum_types::H256; 23 | use hash_db::{AsHashDB, HashDB}; 24 | use kvdb::DBValue as KVDBValue; 25 | use overlaydb::OverlayDB; 26 | use overlayrecentdb::OverlayRecentDB; 27 | use refcounteddb::RefCountedDB; 28 | use trie_db::DBValue; 29 | 30 | #[cfg(feature = "hash15")] 31 | use hash_db15::{AsHashDB as AsHashDB15, HashDB as HashDB15, Prefix}; 32 | #[cfg(feature = "hash15")] 33 | use keccak_hasher15::DBHasher as DBHasher15; 34 | 35 | macro_rules! wrap_hash_db { 36 | ($name: ty) => { 37 | impl HashDB for $name { 38 | fn get(&self, key: &H256) -> Option { 39 | HashDB::::get(self, key).map(|x| DBValue::from_vec(x)) 40 | } 41 | 42 | fn contains(&self, key: &H256) -> bool { 43 | HashDB::::contains(self, key) 44 | } 45 | 46 | fn insert(&mut self, value: &[u8]) -> H256 { 47 | HashDB::::insert(self, value) 48 | } 49 | 50 | fn emplace(&mut self, key: H256, value: DBValue) { 51 | HashDB::::emplace(self, key, value.into_vec()) 52 | } 53 | 54 | fn remove(&mut self, key: &H256) { 55 | HashDB::::remove(self, key) 56 | } 57 | } 58 | 59 | impl AsHashDB for $name { 60 | fn as_hash_db(&self) -> &dyn HashDB { 61 | self 62 | } 63 | fn as_hash_db_mut(&mut self) -> &mut dyn HashDB { 64 | self 65 | } 66 | } 67 | 68 | impl AsHashDB for $name { 69 | fn as_hash_db(&self) -> &dyn HashDB { 70 | self 71 | } 72 | fn as_hash_db_mut(&mut self) -> &mut dyn HashDB { 73 | self 74 | } 75 | } 76 | 77 | impl AsKeyedHashDB for $name { 78 | fn as_keyed_hash_db(&self) -> &dyn KeyedHashDB { 79 | self 80 | } 81 | } 82 | 83 | #[cfg(feature = "hash15")] 84 | impl AsHashDB15 for $name { 85 | fn as_hash_db(&self) -> &dyn HashDB15 { 86 | self 87 | } 88 | 89 | fn as_hash_db_mut(&mut self) -> &mut dyn HashDB15 { 90 | self 91 | } 92 | } 93 | 94 | #[cfg(feature = "hash15")] 95 | impl HashDB15 for $name { 96 | // The key function `HashKey` in `memory-db` (v0.28.0) omits `prefix`. 97 | // The example code in `TrieDB` uses `HashKey` as key function. 98 | // So here we also omit `prefix`. 99 | fn get(&self, key: &[u8; 32], _prefix: Prefix) -> Option { 100 | HashDB::::get(self, to_h256_ref(key)) 101 | } 102 | 103 | fn contains(&self, key: &[u8; 32], _prefix: Prefix) -> bool { 104 | HashDB::::contains(self, to_h256_ref(key)) 105 | } 106 | 107 | fn insert(&mut self, _prefix: Prefix, value: &[u8]) -> [u8; 32] { 108 | HashDB::::insert(self, value).into() 109 | } 110 | 111 | fn emplace(&mut self, key: [u8; 32], _prefix: Prefix, value: DBValue) { 112 | HashDB::::emplace(self, key.into(), value) 113 | } 114 | 115 | fn remove(&mut self, key: &[u8; 32], _prefix: Prefix) { 116 | HashDB::::remove(self, to_h256_ref(key)) 117 | } 118 | } 119 | }; 120 | } 121 | 122 | wrap_hash_db!(ArchiveDB); 123 | wrap_hash_db!(EarlyMergeDB); 124 | wrap_hash_db!(OverlayRecentDB); 125 | wrap_hash_db!(RefCountedDB); 126 | wrap_hash_db!(OverlayDB); 127 | 128 | #[cfg(feature = "hash15")] 129 | fn to_h256_ref(input: &[u8; 32]) -> &H256 { 130 | unsafe { std::mem::transmute(input) } 131 | } 132 | -------------------------------------------------------------------------------- /asb-authdb/parity-journaldb/src/hasher.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "light-hash")] 2 | pub use blake2_hasher::Blake2bHasher as DBHasher; 3 | #[cfg(not(feature = "light-hash"))] 4 | pub use keccak_hasher::KeccakHasher as DBHasher; 5 | -------------------------------------------------------------------------------- /asb-authdb/parity-journaldb/src/mertics.rs: -------------------------------------------------------------------------------- 1 | // The original journaldb relies on some metric tools in crate `ethcore-db`. But it doesn't rely on the other dependencies. 2 | 3 | /// Ethcore definition of a KeyValueDB with embeeded metrics 4 | // pub trait KeyValueDB: kvdb::KeyValueDB + stats::PrometheusMetrics {} 5 | 6 | #[cfg(test)] 7 | pub use self::memory_db::InMemoryWithMetrics; 8 | 9 | #[cfg(test)] 10 | mod memory_db { 11 | // Copyright 2015-2020 Parity Technologies (UK) Ltd. 12 | // The following code is part of OpenEthereum. 13 | 14 | use parity_util_mem05::{MallocSizeOf, MallocSizeOfOps}; 15 | 16 | // OpenEthereum is free software: you can redistribute it and/or modify 17 | // it under the terms of the GNU General Public License as published by 18 | // the Free Software Foundation, either version 3 of the License, or 19 | // (at your option) any later version. 20 | 21 | // OpenEthereum is distributed in the hope that it will be useful, 22 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 23 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 24 | // GNU General Public License for more details. 25 | 26 | // You should have received a copy of the GNU General Public License 27 | // along with OpenEthereum. If not, see . 28 | 29 | /// InMemory with disabled statistics 30 | pub struct InMemoryWithMetrics { 31 | db: kvdb_memorydb::InMemory, 32 | } 33 | 34 | impl MallocSizeOf for InMemoryWithMetrics { 35 | fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { 36 | parity_util_mem05::MallocSizeOf::size_of(&self.db, ops) 37 | } 38 | } 39 | 40 | impl kvdb::KeyValueDB for InMemoryWithMetrics { 41 | fn get(&self, col: u32, key: &[u8]) -> std::io::Result> { 42 | self.db.get(col, key) 43 | } 44 | fn get_by_prefix(&self, col: u32, prefix: &[u8]) -> Option> { 45 | self.db.get_by_prefix(col, prefix) 46 | } 47 | fn write_buffered(&self, transaction: kvdb::DBTransaction) { 48 | self.db.write_buffered(transaction) 49 | } 50 | fn write(&self, transaction: kvdb::DBTransaction) -> std::io::Result<()> { 51 | self.db.write(transaction) 52 | } 53 | fn flush(&self) -> std::io::Result<()> { 54 | self.db.flush() 55 | } 56 | 57 | fn iter<'a>(&'a self, col: u32) -> Box<(dyn Iterator, Box<[u8]>)> + 'a)> { 58 | kvdb::KeyValueDB::iter(&self.db, col) 59 | } 60 | 61 | fn iter_from_prefix<'a>( 62 | &'a self, 63 | col: u32, 64 | prefix: &'a [u8], 65 | ) -> Box, Box<[u8]>)> + 'a> { 66 | self.db.iter_from_prefix(col, prefix) 67 | } 68 | 69 | fn restore(&self, new_db: &str) -> std::io::Result<()> { 70 | self.db.restore(new_db) 71 | } 72 | } 73 | 74 | impl stats::PrometheusMetrics for InMemoryWithMetrics { 75 | fn prometheus_metrics(&self, _: &mut stats::PrometheusRegistry) {} 76 | } 77 | 78 | impl InMemoryWithMetrics { 79 | /// Create new instance 80 | pub fn create(num_cols: u32) -> Self { 81 | Self { 82 | db: kvdb_memorydb::create(num_cols), 83 | } 84 | } 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /asb-authdb/parity-journaldb/src/traits.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2015-2020 Parity Technologies (UK) Ltd. 2 | // This file is part of OpenEthereum. 3 | 4 | // OpenEthereum is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // OpenEthereum is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with OpenEthereum. If not, see . 16 | 17 | //! Disk-backed `HashDB` implementation. 18 | 19 | use std::{io, sync::Arc}; 20 | 21 | use crate::hasher::DBHasher; 22 | use crate::KeyValueDB; 23 | use bytes::Bytes; 24 | use ethereum_types::H256; 25 | use hash_db::{AsHashDB, HashDB}; 26 | use kvdb::{DBTransaction, DBValue}; 27 | use std::collections::{BTreeMap, HashMap}; 28 | 29 | /// expose keys of a hashDB for debugging or tests (slow). 30 | pub trait KeyedHashDB: HashDB { 31 | /// Primarily use for tests, highly inefficient. 32 | fn keys(&self) -> HashMap; 33 | } 34 | 35 | /// Upcast to `KeyedHashDB` 36 | pub trait AsKeyedHashDB: AsHashDB { 37 | /// Perform upcast to KeyedHashDB. 38 | fn as_keyed_hash_db(&self) -> &dyn KeyedHashDB; 39 | } 40 | 41 | /// A `HashDB` which can manage a short-term journal potentially containing many forks of mutually 42 | /// exclusive actions. 43 | pub trait JournalDB: KeyedHashDB { 44 | /// Return a copy of ourself, in a box. 45 | fn boxed_clone(&self) -> Box; 46 | 47 | /// Returns heap memory size used 48 | fn get_sizes(&self, sizes: &mut BTreeMap); 49 | 50 | /// Returns the size of journalled state in memory. 51 | /// This function has a considerable speed requirement -- 52 | /// it must be fast enough to call several times per block imported. 53 | fn journal_size(&self) -> usize { 54 | 0 55 | } 56 | 57 | /// Check if this database has any commits 58 | fn is_empty(&self) -> bool; 59 | 60 | /// Get the earliest era in the DB. None if there isn't yet any data in there. 61 | fn earliest_era(&self) -> Option { 62 | None 63 | } 64 | 65 | /// Get the latest era in the DB. None if there isn't yet any data in there. 66 | fn latest_era(&self) -> Option; 67 | 68 | /// Journal recent database operations as being associated with a given era and id. 69 | // TODO: give the overlay to this function so journaldbs don't manage the overlays themeselves. 70 | fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> io::Result; 71 | 72 | /// Mark a given block as canonical, indicating that competing blocks' states may be pruned out. 73 | fn mark_canonical(&mut self, batch: &mut DBTransaction, era: u64, id: &H256) 74 | -> io::Result; 75 | 76 | /// Commit all queued insert and delete operations without affecting any journalling -- this requires that all insertions 77 | /// and deletions are indeed canonical and will likely lead to an invalid database if that assumption is violated. 78 | /// 79 | /// Any keys or values inserted or deleted must be completely independent of those affected 80 | /// by any previous `commit` operations. Essentially, this means that `inject` can be used 81 | /// either to restore a state to a fresh database, or to insert data which may only be journalled 82 | /// from this point onwards. 83 | fn inject(&mut self, batch: &mut DBTransaction) -> io::Result; 84 | 85 | /// Whether this database is pruned. 86 | fn is_pruned(&self) -> bool { 87 | true 88 | } 89 | 90 | /// Get backing database. 91 | fn backing(&self) -> &Arc; 92 | 93 | /// Clear internal strucutres. This should called after changes have been written 94 | /// to the backing strage 95 | fn flush(&self) {} 96 | 97 | /// Consolidate all the insertions and deletions in the given memory overlay. 98 | fn consolidate(&mut self, overlay: ::memory_db::MemoryDB); 99 | 100 | /// State data query 101 | fn state(&self, id: &H256) -> Option; 102 | 103 | /// Commit all changes in a single batch 104 | #[cfg(test)] 105 | fn commit_batch(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> io::Result { 106 | let mut batch = self.backing().transaction(); 107 | let mut ops = self.journal_under(&mut batch, now, id)?; 108 | 109 | if let Some((end_era, canon_id)) = end { 110 | ops += self.mark_canonical(&mut batch, end_era, &canon_id)?; 111 | } 112 | 113 | let result = self.backing().write(batch).map(|_| ops).map_err(Into::into); 114 | self.flush(); 115 | result 116 | } 117 | 118 | /// Inject all changes in a single batch. 119 | #[cfg(test)] 120 | fn inject_batch(&mut self) -> io::Result { 121 | let mut batch = self.backing().transaction(); 122 | let res = self.inject(&mut batch)?; 123 | self.backing().write(batch).map(|_| res).map_err(Into::into) 124 | } 125 | } 126 | -------------------------------------------------------------------------------- /asb-authdb/parity-journaldb/src/util.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2015-2020 Parity Technologies (UK) Ltd. 2 | // This file is part of OpenEthereum. 3 | 4 | // OpenEthereum is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // OpenEthereum is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with OpenEthereum. If not, see . 16 | 17 | use ethereum_types::H256; 18 | use rlp::{DecoderError, Encodable, Rlp, RlpStream}; 19 | 20 | const PADDING: [u8; 10] = [0u8; 10]; 21 | 22 | pub struct DatabaseKey { 23 | pub era: u64, 24 | pub index: usize, 25 | } 26 | 27 | impl Encodable for DatabaseKey { 28 | fn rlp_append(&self, s: &mut RlpStream) { 29 | s.begin_list(3); 30 | s.append(&self.era); 31 | s.append(&self.index); 32 | s.append(&&PADDING[..]); 33 | } 34 | } 35 | 36 | pub struct DatabaseValueView<'a> { 37 | rlp: Rlp<'a>, 38 | } 39 | 40 | impl<'a> DatabaseValueView<'a> { 41 | pub fn from_rlp(data: &'a [u8]) -> Self { 42 | DatabaseValueView { 43 | rlp: Rlp::new(data), 44 | } 45 | } 46 | 47 | #[inline] 48 | pub fn id(&self) -> Result { 49 | self.rlp.val_at(0) 50 | } 51 | 52 | #[inline] 53 | pub fn inserts(&self) -> Result, DecoderError> { 54 | self.rlp.list_at(1) 55 | } 56 | 57 | #[inline] 58 | pub fn deletes(&self) -> Result, DecoderError> { 59 | self.rlp.list_at(2) 60 | } 61 | } 62 | 63 | pub struct DatabaseValueRef<'a> { 64 | pub id: &'a H256, 65 | pub inserts: &'a [H256], 66 | pub deletes: &'a [H256], 67 | } 68 | 69 | impl<'a> Encodable for DatabaseValueRef<'a> { 70 | fn rlp_append(&self, s: &mut RlpStream) { 71 | s.begin_list(3); 72 | s.append(self.id); 73 | s.append_list(self.inserts); 74 | s.append_list(self.deletes); 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /asb-authdb/patricia-trie-ethereum/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "patricia-trie-ethereum" 3 | version = "0.1.0" 4 | authors = ["Parity Technologies "] 5 | description = "Merkle-Patricia Trie (Ethereum Style)" 6 | license = "GPL-3.0" 7 | 8 | [dependencies] 9 | trie-db = "0.11.0" 10 | keccak-hasher = {git="https://github.com/openethereum/openethereum.git", rev="2ae2949"} 11 | blake2-hasher = { workspace = true } 12 | hash-db = "0.11.0" 13 | rlp = { version = "0.4.6" } 14 | parity-bytes = "0.1" 15 | ethereum-types = "0.9.2" 16 | elastic-array = "0.10" 17 | parity-journaldb = { workspace = true } 18 | 19 | 20 | 21 | [dev-dependencies] 22 | memory-db = "0.11.0" 23 | keccak-hash = "0.5.0" 24 | -------------------------------------------------------------------------------- /asb-authdb/patricia-trie-ethereum/src/lib.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2015-2020 Parity Technologies (UK) Ltd. 2 | // This file is part of OpenEthereum. 3 | 4 | // OpenEthereum is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // OpenEthereum is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with OpenEthereum. If not, see . 16 | 17 | //! Façade crate for `patricia_trie` for Ethereum specific impls 18 | 19 | extern crate blake2_hasher; 20 | extern crate elastic_array; 21 | extern crate ethereum_types; 22 | extern crate hash_db; 23 | extern crate keccak_hasher; 24 | extern crate parity_bytes; 25 | extern crate parity_journaldb as journaldb; 26 | extern crate rlp; 27 | pub extern crate trie_db as trie; // `pub` because we need to import this crate for the tests in `patricia_trie` and there were issues: https://gist.github.com/dvdplm/869251ee557a1b4bd53adc7c971979aa 28 | 29 | mod rlp_node_codec; 30 | 31 | pub use rlp_node_codec::RlpNodeCodec; 32 | 33 | use ethereum_types::H256; 34 | use keccak_hasher::KeccakHasher; 35 | use rlp::DecoderError; 36 | 37 | /// Convenience type alias to instantiate a Keccak-flavoured `RlpNodeCodec` 38 | pub type RlpCodec = RlpNodeCodec; 39 | 40 | /// Convenience type alias to instantiate a Keccak/Rlp-flavoured `TrieDB` 41 | /// 42 | /// Use it as a `Trie` trait object. You can use `db()` to get the backing database object. 43 | /// Use `get` and `contains` to query values associated with keys in the trie. 44 | /// 45 | /// # Example 46 | /// ``` 47 | /// extern crate trie_db as trie; 48 | /// extern crate patricia_trie_ethereum as ethtrie; 49 | /// extern crate hash_db; 50 | /// extern crate keccak_hasher; 51 | /// extern crate memory_db; 52 | /// extern crate ethereum_types; 53 | /// extern crate elastic_array; 54 | /// extern crate journaldb; 55 | /// 56 | /// use trie::*; 57 | /// use hash_db::*; 58 | /// use keccak_hasher::KeccakHasher; 59 | /// use memory_db::*; 60 | /// use ethereum_types::H256; 61 | /// use ethtrie::{TrieDB, TrieDBMut}; 62 | /// use elastic_array::ElasticArray128; 63 | /// 64 | /// type DBValue = ElasticArray128; 65 | /// 66 | /// fn main() { 67 | /// let mut memdb = journaldb::new_memory_db(); 68 | /// let mut root = H256::default(); 69 | /// TrieDBMut::new(&mut memdb, &mut root).insert(b"foo", b"bar").unwrap(); 70 | /// let t = TrieDB::new(&memdb, &root).unwrap(); 71 | /// assert!(t.contains(b"foo").unwrap()); 72 | /// assert_eq!(t.get(b"foo").unwrap().unwrap(), DBValue::from_slice(b"bar")); 73 | /// } 74 | /// ``` 75 | pub type TrieDB<'db> = trie::TrieDB<'db, KeccakHasher, RlpCodec>; 76 | 77 | /// Convenience type alias to instantiate a Keccak/Rlp-flavoured `SecTrieDB` 78 | pub type SecTrieDB<'db> = trie::SecTrieDB<'db, KeccakHasher, RlpCodec>; 79 | 80 | /// Convenience type alias to instantiate a Keccak/Rlp-flavoured `FatDB` 81 | pub type FatDB<'db> = trie::FatDB<'db, KeccakHasher, RlpCodec>; 82 | 83 | /// Convenience type alias to instantiate a Keccak/Rlp-flavoured `TrieDBMut` 84 | /// 85 | /// Use it as a `TrieMut` trait object. You can use `db()` to get the backing database object. 86 | /// Note that changes are not committed to the database until `commit` is called. 87 | /// Querying the root or dropping the trie will commit automatically. 88 | 89 | /// # Example 90 | /// ``` 91 | /// extern crate trie_db as trie; 92 | /// extern crate patricia_trie_ethereum as ethtrie; 93 | /// extern crate hash_db; 94 | /// extern crate keccak_hash; 95 | /// extern crate keccak_hasher; 96 | /// extern crate memory_db; 97 | /// extern crate ethereum_types; 98 | /// extern crate elastic_array; 99 | /// extern crate journaldb; 100 | /// 101 | /// use keccak_hash::KECCAK_NULL_RLP; 102 | /// use ethtrie::{TrieDBMut, trie::TrieMut}; 103 | /// use keccak_hasher::KeccakHasher; 104 | /// use memory_db::*; 105 | /// use ethereum_types::H256; 106 | /// use elastic_array::ElasticArray128; 107 | /// 108 | /// type DBValue = ElasticArray128; 109 | /// 110 | /// fn main() { 111 | /// let mut memdb = journaldb::new_memory_db(); 112 | /// let mut root = H256::default(); 113 | /// let mut t = TrieDBMut::new(&mut memdb, &mut root); 114 | /// assert!(t.is_empty()); 115 | /// assert_eq!(*t.root(), KECCAK_NULL_RLP); 116 | /// t.insert(b"foo", b"bar").unwrap(); 117 | /// assert!(t.contains(b"foo").unwrap()); 118 | /// assert_eq!(t.get(b"foo").unwrap().unwrap(), DBValue::from_slice(b"bar")); 119 | /// t.remove(b"foo").unwrap(); 120 | /// assert!(!t.contains(b"foo").unwrap()); 121 | /// } 122 | /// ``` 123 | pub type TrieDBMut<'db> = trie::TrieDBMut<'db, KeccakHasher, RlpCodec>; 124 | 125 | /// Convenience type alias to instantiate a Keccak/Rlp-flavoured `SecTrieDBMut` 126 | pub type SecTrieDBMut<'db> = trie::SecTrieDBMut<'db, KeccakHasher, RlpCodec>; 127 | 128 | /// Convenience type alias to instantiate a Keccak/Rlp-flavoured `FatDBMut` 129 | pub type FatDBMut<'db> = trie::FatDBMut<'db, KeccakHasher, RlpCodec>; 130 | 131 | /// Convenience type alias to instantiate a Keccak/Rlp-flavoured `TrieFactory` 132 | pub type TrieFactory = trie::TrieFactory; 133 | 134 | /// Convenience type alias for Keccak/Rlp flavoured trie errors 135 | pub type TrieError = trie::TrieError; 136 | /// Convenience type alias for Keccak/Rlp flavoured trie results 137 | pub type Result = trie::Result; 138 | -------------------------------------------------------------------------------- /asb-authdb/patricia-trie-ethereum/src/rlp_node_codec.rs: -------------------------------------------------------------------------------- 1 | // Copyright 2015-2020 Parity Technologies (UK) Ltd. 2 | // This file is part of OpenEthereum. 3 | 4 | // OpenEthereum is free software: you can redistribute it and/or modify 5 | // it under the terms of the GNU General Public License as published by 6 | // the Free Software Foundation, either version 3 of the License, or 7 | // (at your option) any later version. 8 | 9 | // OpenEthereum is distributed in the hope that it will be useful, 10 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | // GNU General Public License for more details. 13 | 14 | // You should have received a copy of the GNU General Public License 15 | // along with OpenEthereum. If not, see . 16 | 17 | //! `NodeCodec` implementation for Rlp 18 | 19 | use blake2_hasher::{Blake2bHasher, Blake2sHasher}; 20 | use elastic_array::ElasticArray128; 21 | use ethereum_types::H256; 22 | use hash_db::Hasher; 23 | use keccak_hasher::KeccakHasher; 24 | use rlp::{DecoderError, Prototype, Rlp, RlpStream}; 25 | use std::marker::PhantomData; 26 | use trie::{node::Node, ChildReference, NibbleSlice, NodeCodec}; 27 | 28 | /// Concrete implementation of a `NodeCodec` with Rlp encoding, generic over the `Hasher` 29 | #[derive(Default, Clone)] 30 | pub struct RlpNodeCodec { 31 | mark: PhantomData, 32 | } 33 | 34 | pub trait HashedNullNode: Hasher { 35 | const HASHED_NULL_NODE: H256; 36 | } 37 | 38 | impl HashedNullNode for KeccakHasher { 39 | const HASHED_NULL_NODE: H256 = H256([ 40 | 0x56, 0xe8, 0x1f, 0x17, 0x1b, 0xcc, 0x55, 0xa6, 0xff, 0x83, 0x45, 0xe6, 0x92, 0xc0, 0xf8, 41 | 0x6e, 0x5b, 0x48, 0xe0, 0x1b, 0x99, 0x6c, 0xad, 0xc0, 0x01, 0x62, 0x2f, 0xb5, 0xe3, 0x63, 42 | 0xb4, 0x21, 43 | ]); 44 | } 45 | 46 | impl HashedNullNode for Blake2sHasher { 47 | const HASHED_NULL_NODE: H256 = H256([ 48 | 0xfc, 0x84, 0x47, 0xd5, 0x56, 0x41, 0xbe, 0xee, 0x0c, 0x65, 0xd3, 0x85, 0xe8, 0xd8, 0x53, 49 | 0x9a, 0xbe, 0x95, 0x13, 0xc2, 0x3b, 0x2a, 0x0a, 0xea, 0xef, 0x1f, 0x29, 0x91, 0xd6, 0x91, 50 | 0xc6, 0x27, 51 | ]); 52 | } 53 | 54 | impl HashedNullNode for Blake2bHasher { 55 | const HASHED_NULL_NODE: H256 = H256([ 56 | 0x24, 0xfc, 0x3b, 0x2d, 0x20, 0x85, 0x2c, 0xa3, 0xf2, 0x66, 0x06, 0xcb, 0x64, 0x9f, 0xa0, 57 | 0x1c, 0x76, 0x8e, 0xf1, 0xe7, 0xb7, 0x3c, 0x1f, 0xb1, 0x01, 0x1c, 0xba, 0xb1, 0xb9, 0xd4, 58 | 0xd3, 0x40, 59 | ]); 60 | } 61 | 62 | // NOTE: what we'd really like here is: 63 | // `impl NodeCodec for RlpNodeCodec where H::Out: Decodable` 64 | // but due to the current limitations of Rust const evaluation we can't 65 | // do `const HASHED_NULL_NODE: H::Out = H::Out( … … )`. Perhaps one day soon? 66 | impl + HashedNullNode> NodeCodec for RlpNodeCodec { 67 | type Error = DecoderError; 68 | fn hashed_null_node() -> H::Out { 69 | H::HASHED_NULL_NODE 70 | } 71 | fn decode(data: &[u8]) -> ::std::result::Result { 72 | let r = Rlp::new(data); 73 | match r.prototype()? { 74 | // either leaf or extension - decode first item with NibbleSlice::??? 75 | // and use is_leaf return to figure out which. 76 | // if leaf, second item is a value (is_data()) 77 | // if extension, second item is a node (either SHA3 to be looked up and 78 | // fed back into this function or inline RLP which can be fed back into this function). 79 | Prototype::List(2) => match NibbleSlice::from_encoded(r.at(0)?.data()?) { 80 | (slice, true) => Ok(Node::Leaf(slice, r.at(1)?.data()?)), 81 | (slice, false) => Ok(Node::Extension(slice, r.at(1)?.as_raw())), 82 | }, 83 | // branch - first 16 are nodes, 17th is a value (or empty). 84 | Prototype::List(17) => { 85 | let mut nodes = [None as Option<&[u8]>; 16]; 86 | for i in 0..16 { 87 | let v = r.at(i)?; 88 | if v.is_empty() { 89 | nodes[i] = None; 90 | } else { 91 | nodes[i] = Some(v.as_raw()); 92 | } 93 | } 94 | Ok(Node::Branch( 95 | nodes, 96 | if r.at(16)?.is_empty() { 97 | None 98 | } else { 99 | Some(r.at(16)?.data()?) 100 | }, 101 | )) 102 | } 103 | // an empty branch index. 104 | Prototype::Data(0) => Ok(Node::Empty), 105 | // something went wrong. 106 | _ => Err(DecoderError::Custom("Rlp is not valid.")), 107 | } 108 | } 109 | fn try_decode_hash(data: &[u8]) -> Option { 110 | let r = Rlp::new(data); 111 | if r.is_data() && r.size() == KeccakHasher::LENGTH { 112 | Some(r.as_val().expect("Hash is the correct size; qed")) 113 | } else { 114 | None 115 | } 116 | } 117 | fn is_empty_node(data: &[u8]) -> bool { 118 | Rlp::new(data).is_empty() 119 | } 120 | fn empty_node() -> Vec { 121 | let mut stream = RlpStream::new(); 122 | stream.append_empty_data(); 123 | stream.drain() 124 | } 125 | 126 | fn leaf_node(partial: &[u8], value: &[u8]) -> Vec { 127 | let mut stream = RlpStream::new_list(2); 128 | stream.append(&partial); 129 | stream.append(&value); 130 | stream.drain() 131 | } 132 | 133 | fn ext_node( 134 | partial: &[u8], 135 | child_ref: ChildReference<::Out>, 136 | ) -> Vec { 137 | let mut stream = RlpStream::new_list(2); 138 | stream.append(&partial); 139 | match child_ref { 140 | ChildReference::Hash(h) => stream.append(&h), 141 | ChildReference::Inline(inline_data, len) => { 142 | let bytes = &AsRef::<[u8]>::as_ref(&inline_data)[..len]; 143 | stream.append_raw(bytes, 1) 144 | } 145 | }; 146 | stream.drain() 147 | } 148 | 149 | // fn branch_node(children: I, value: Option>) -> Vec 150 | fn branch_node(children: I, value: Option>) -> Vec 151 | where 152 | I: IntoIterator>>, 153 | { 154 | let mut stream = RlpStream::new_list(17); 155 | for child_ref in children { 156 | match child_ref { 157 | Some(c) => match c { 158 | ChildReference::Hash(h) => stream.append(&h), 159 | ChildReference::Inline(inline_data, len) => { 160 | let bytes = &AsRef::<[u8]>::as_ref(&inline_data)[..len]; 161 | stream.append_raw(bytes, 1) 162 | } 163 | }, 164 | None => stream.append_empty_data(), 165 | }; 166 | } 167 | if let Some(value) = value { 168 | stream.append(&&*value); 169 | } else { 170 | stream.append_empty_data(); 171 | } 172 | stream.drain() 173 | } 174 | } 175 | -------------------------------------------------------------------------------- /asb-authdb/rainblock-trie/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "rainblock-trie" 3 | version = "0.1.0" 4 | edition = "2021" 5 | authors = ["Chenxing Li "] 6 | 7 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 8 | 9 | [dependencies] 10 | rlp = "0.4.6" 11 | ethereum-types = "0.9" 12 | kvdb = { workspace = true } 13 | smallvec = "*" 14 | blake2-hasher = { workspace = true, optional = true } 15 | keccak-hasher = { workspace = true } 16 | hash-db = { workspace = true } 17 | 18 | [dev-dependencies] 19 | kvdb-memorydb = { workspace = true } 20 | rand = "0.7" 21 | 22 | [features] 23 | light-hash = ["blake2-hasher"] 24 | thread-safe = [] -------------------------------------------------------------------------------- /asb-authdb/rainblock-trie/src/child_ref.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | cell::RefCell, 3 | ops::{Deref, DerefMut, Index, IndexMut}, 4 | sync::Arc, 5 | }; 6 | 7 | use crate::{ 8 | nibble::Nibble, trie_node::TrieNode, trie_node_ext::TrieNodeExt, NodePtr, NodePtrWeak, 9 | }; 10 | use ethereum_types::H256; 11 | use kvdb::KeyValueDB; 12 | use rlp::{Decodable, Encodable}; 13 | 14 | type Bytes = Vec; 15 | 16 | #[derive(Clone, Default)] 17 | pub enum ChildRef { 18 | #[default] 19 | Null, 20 | Ref(H256), 21 | Owned(NodePtr), 22 | } 23 | 24 | impl PartialEq for ChildRef { 25 | fn eq(&self, other: &Self) -> bool { 26 | match (self, other) { 27 | (Self::Null, Self::Null) => true, 28 | (Self::Ref(l0), Self::Ref(r0)) => l0 == r0, 29 | (Self::Owned(l0), Self::Owned(r0)) => NodePtr::ptr_eq(l0, r0), 30 | _ => false, 31 | } 32 | } 33 | } 34 | 35 | impl Eq for ChildRef {} 36 | 37 | pub type ChildRefCell = RefCell; 38 | 39 | impl ChildRef { 40 | pub fn is_null(&self) -> bool { 41 | *self == ChildRef::Null 42 | } 43 | 44 | pub fn owned_mut(&mut self) -> Option<&mut NodePtr> { 45 | if let ChildRef::Owned(node) = self { 46 | Some(node) 47 | } else { 48 | None 49 | } 50 | } 51 | 52 | pub fn loaded_mut(&mut self, db: &Arc) -> Option<&mut NodePtr> { 53 | if let ChildRef::Ref(digest) = self { 54 | let node = TrieNodeExt::load(db, digest.clone()).seal(); 55 | *self = ChildRef::Owned(node); 56 | } 57 | match self { 58 | ChildRef::Null => None, 59 | ChildRef::Ref(_) => unreachable!(), 60 | ChildRef::Owned(node) => Some(node), 61 | } 62 | } 63 | 64 | pub fn owned_or_load( 65 | me: &RefCell, 66 | db: &Arc, 67 | ) -> Option<(NodePtr, bool)> { 68 | let borrowed_ref = me.borrow(); 69 | match &*borrowed_ref { 70 | ChildRef::Null => None, 71 | ChildRef::Ref(digest) => { 72 | let node = TrieNodeExt::load(db, digest.clone()).seal(); 73 | std::mem::drop(borrowed_ref); 74 | 75 | me.replace(ChildRef::Owned(node.clone())); 76 | Some((node, true)) 77 | } 78 | ChildRef::Owned(node) => Some((node.clone(), false)), 79 | } 80 | } 81 | 82 | #[inline] 83 | pub fn truncate(me: &RefCell) { 84 | let mut replaced_hash = None; 85 | if let Self::Owned(node) = &*me.borrow() { 86 | if node.as_ref().is_small_node() { 87 | return; 88 | } 89 | replaced_hash = Some(node.as_ref().hash()); 90 | } 91 | if let Some(hash) = replaced_hash { 92 | *me.borrow_mut() = Self::Ref(hash) 93 | } 94 | } 95 | 96 | #[inline] 97 | pub fn exile(&self, depth: usize, exile_nodes: &mut Vec) { 98 | if let Self::Owned(node) = self { 99 | TrieNodeExt::exile::(node, depth, exile_nodes); 100 | } 101 | } 102 | 103 | #[inline] 104 | pub fn commit( 105 | me: &RefCell, 106 | depth: usize, 107 | put_ops: &mut Vec<(H256, Vec)>, 108 | top_layer: bool, 109 | ) { 110 | if let Self::Owned(node) = &*me.borrow() { 111 | node.as_ref().commit::(depth, put_ops, top_layer) 112 | } 113 | } 114 | 115 | #[inline] 116 | #[cfg(test)] 117 | pub fn loaded_nodes_count(me: &RefCell) -> usize { 118 | if let Self::Owned(node) = &*me.borrow() { 119 | if node.as_ref().get_rlp_encode().len() >= 32 { 120 | node.as_ref().loaded_nodes_count() 121 | } else { 122 | 0 123 | } 124 | } else { 125 | 0 126 | } 127 | } 128 | 129 | #[inline] 130 | #[cfg(test)] 131 | pub fn print_node(me: &RefCell, ident: usize) { 132 | match &*me.borrow() { 133 | ChildRef::Null => { 134 | println!("null") 135 | } 136 | ChildRef::Ref(digest) => { 137 | println!("digest {digest:x?}") 138 | } 139 | ChildRef::Owned(node) => node.as_ref().print_node(ident), 140 | } 141 | } 142 | } 143 | 144 | impl Encodable for ChildRef { 145 | fn rlp_append(&self, s: &mut rlp::RlpStream) { 146 | match self { 147 | ChildRef::Null => Bytes::new().rlp_append(s), 148 | ChildRef::Ref(digest) => digest.rlp_append(s), 149 | ChildRef::Owned(node) => { 150 | if node.as_ref().is_small_node() { 151 | let rlp_encoded = node.as_ref().get_rlp_encode(); 152 | s.append_raw(&rlp_encoded, 0); 153 | } else { 154 | node.as_ref().hash().rlp_append(s) 155 | } 156 | } 157 | } 158 | } 159 | } 160 | impl Decodable for ChildRef { 161 | fn decode(rlp: &rlp::Rlp) -> Result { 162 | Ok(if rlp.is_empty() { 163 | ChildRef::Null 164 | } else if rlp.is_list() { 165 | let rlp_encode = rlp.as_raw().to_vec(); 166 | let trie_node = TrieNode::decode(rlp)?; 167 | ChildRef::Owned(TrieNodeExt::from_child_ref(trie_node, rlp_encode).seal()) 168 | } else { 169 | ChildRef::Ref(H256::decode(rlp)?) 170 | }) 171 | } 172 | } 173 | 174 | #[derive(Default, Clone, Eq, PartialEq)] 175 | pub struct ChildRefGroup([ChildRefCell; 16]); 176 | 177 | impl ChildRefGroup { 178 | pub fn enumerate_mut(&mut self) -> impl Iterator { 179 | self.0 180 | .iter_mut() 181 | .enumerate() 182 | .map(|(idx, child_ref)| (Nibble::from_lo(idx as u8), child_ref.get_mut())) 183 | } 184 | 185 | pub fn no_child(&mut self) -> bool { 186 | self.enumerate_mut().all(|(_, child)| child.is_null()) 187 | } 188 | 189 | pub fn child_cnt(&self) -> usize { 190 | self.0.iter().filter(|cell| !cell.borrow().is_null()).count() 191 | } 192 | 193 | pub fn only_child_mut(&mut self) -> Option<(Nibble, &mut ChildRef)> { 194 | let mut non_null_child = None; 195 | for (idx, _) in self.enumerate_mut().filter(|(_, child)| !child.is_null()) { 196 | if non_null_child.is_some() { 197 | non_null_child = None; 198 | break; 199 | } else { 200 | non_null_child = Some(idx) 201 | } 202 | } 203 | non_null_child.map(|idx| (idx, self[idx].get_mut())) 204 | } 205 | } 206 | 207 | impl Deref for ChildRefGroup { 208 | type Target = [ChildRefCell; 16]; 209 | 210 | fn deref(&self) -> &Self::Target { 211 | &self.0 212 | } 213 | } 214 | 215 | impl DerefMut for ChildRefGroup { 216 | fn deref_mut(&mut self) -> &mut Self::Target { 217 | &mut self.0 218 | } 219 | } 220 | 221 | impl Index for ChildRefGroup { 222 | type Output = ChildRefCell; 223 | 224 | fn index(&self, index: Nibble) -> &Self::Output { 225 | // SAFETY: a nibble must belongs to [0, 16) 226 | unsafe { self.0.get_unchecked(index.inner() as usize) } 227 | } 228 | } 229 | 230 | impl IndexMut for ChildRefGroup { 231 | fn index_mut(&mut self, index: Nibble) -> &mut Self::Output { 232 | // SAFETY: a nibble must belongs to [0, 16) 233 | unsafe { self.0.get_unchecked_mut(index.inner() as usize) } 234 | } 235 | } 236 | -------------------------------------------------------------------------------- /asb-authdb/rainblock-trie/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod child_ref; 2 | mod nibble; 3 | mod rain_mpt; 4 | #[cfg(test)] 5 | mod tests; 6 | mod trie_node; 7 | mod trie_node_ext; 8 | 9 | #[cfg(not(feature = "thread-safe"))] 10 | mod thread_non_safe; 11 | #[cfg(not(feature = "thread-safe"))] 12 | pub use thread_non_safe::{Node, NodePtr, NodePtrWeak}; 13 | #[cfg(not(feature = "thread-safe"))] 14 | unsafe impl Send for MerklePatriciaTree {} // As internal Rc does not expose to outsize, it is safe to declare it as Send. 15 | #[cfg(feature = "thread-safe")] 16 | mod thread_safe; 17 | #[cfg(feature = "thread-safe")] 18 | pub use thread_safe::{Node, NodePtr, NodePtrWeak}; 19 | 20 | pub use rain_mpt::MerklePatriciaTree; 21 | 22 | fn common_prefix_iter<'a, T: Eq>(a: &'a [T], b: &'a [T]) -> impl Iterator { 23 | a.iter() 24 | .zip(b.iter()) 25 | .take_while(|(x, y)| x == y) 26 | .map(|(x, _)| x.clone()) 27 | } 28 | 29 | fn add_prefix(base: &mut Vec, prefix: &[T]) { 30 | *base = [prefix, &base[..]].concat() 31 | } 32 | 33 | #[cfg(feature = "light-hash")] 34 | pub use blake2_hasher::Blake2bHasher as RlpHasher; 35 | #[cfg(not(feature = "light-hash"))] 36 | pub use keccak_hasher::KeccakHasher as RlpHasher; 37 | -------------------------------------------------------------------------------- /asb-authdb/rainblock-trie/src/nibble.rs: -------------------------------------------------------------------------------- 1 | use std::fmt::Debug; 2 | 3 | use smallvec::{smallvec, SmallVec}; 4 | 5 | #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] 6 | pub struct Nibble(u8); 7 | 8 | impl Debug for Nibble { 9 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 10 | self.inner().fmt(f) 11 | } 12 | } 13 | 14 | impl Nibble { 15 | #[inline(always)] 16 | pub const fn inner(self) -> u8 { 17 | self.0 18 | } 19 | 20 | #[inline(always)] 21 | pub const fn from_hi(x: u8) -> Self { 22 | Self(x >> 4) 23 | } 24 | 25 | #[inline(always)] 26 | pub const fn from_lo(x: u8) -> Self { 27 | Self(x & 0xf) 28 | } 29 | 30 | #[inline(always)] 31 | pub const fn from_hi_and_lo(x: u8) -> [Self; 2] { 32 | [Self::from_hi(x), Self::from_lo(x)] 33 | } 34 | 35 | #[inline(always)] 36 | pub const fn combine_pair(hi: Nibble, lo: Nibble) -> u8 { 37 | (hi.0 << 4) | lo.0 38 | } 39 | 40 | #[inline(always)] 41 | pub const fn zero() -> Self { 42 | Nibble(0) 43 | } 44 | 45 | #[inline(always)] 46 | pub const fn is_zero(self) -> bool { 47 | self.inner() == 0u8 48 | } 49 | 50 | pub fn all() -> impl Iterator { 51 | (0..16).map(Nibble::from_lo) 52 | } 53 | } 54 | 55 | pub fn bytes_to_nibble_list(data: Vec) -> Vec { 56 | nibble_iter(&data).collect() 57 | } 58 | 59 | fn nibble_iter(data: &[u8]) -> impl Iterator + '_ { 60 | data.iter().cloned().map(Nibble::from_hi_and_lo).flatten() 61 | } 62 | 63 | pub fn from_mpt_key(key: Vec) -> (Vec, bool) { 64 | let mut iterator = nibble_iter(&key); 65 | let ty = iterator.next().unwrap(); 66 | if (ty.inner() & 0x1) == 0 { 67 | iterator.next().unwrap(); 68 | } 69 | let leaf = (ty.inner() & 0x2) != 0; 70 | 71 | return (iterator.collect(), leaf); 72 | } 73 | 74 | pub fn to_mpt_key(key: &[Nibble], leaf: bool) -> Vec { 75 | let odd = (key.len() % 2) == 1; 76 | let prefix: SmallVec<[Nibble; 4]> = match (odd, leaf) { 77 | (false, false) => smallvec![Nibble(0), Nibble(0)], 78 | (true, false) => smallvec![Nibble(1)], 79 | (false, true) => smallvec![Nibble(2), Nibble(0)], 80 | (true, true) => smallvec![Nibble(3)], 81 | }; 82 | 83 | let mut iterator = prefix.iter().cloned().chain(key.iter().cloned()); 84 | let mut answer = Vec::with_capacity(key.len() / 2 + 2); 85 | while let Some(hi) = iterator.next() { 86 | let lo = iterator.next().unwrap(); 87 | answer.push(Nibble::combine_pair(hi, lo)); 88 | } 89 | answer 90 | } 91 | -------------------------------------------------------------------------------- /asb-authdb/rainblock-trie/src/tests.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use crate::rain_mpt::EMPTY_ROOT; 4 | 5 | use super::*; 6 | use kvdb::KeyValueDB; 7 | use kvdb_memorydb; 8 | use rand::prelude::*; 9 | use rand::rngs::StdRng; 10 | 11 | type Bytes = Vec; 12 | 13 | fn new_db() -> Arc { 14 | Arc::new(kvdb_memorydb::create(1)) 15 | } 16 | 17 | #[test] 18 | fn test_put_random() { 19 | let mut rng = StdRng::seed_from_u64(123); 20 | 21 | let mut trie = MerklePatriciaTree::<3>::new(new_db()); 22 | let mut tasks: Vec<(Bytes, Bytes)> = (0..=255u8).map(|x| (vec![x], vec![x])).collect(); 23 | tasks.shuffle(&mut rng); 24 | 25 | for (key, value) in tasks.drain(..) { 26 | trie.put(key, value); 27 | } 28 | 29 | for i in 0..=255u8 { 30 | assert_eq!( 31 | trie.get(vec![i]), 32 | Some(vec![i]), 33 | "Fail on position {} before commit", 34 | i 35 | ); 36 | } 37 | 38 | let first_hash = trie.commit().unwrap(); 39 | 40 | for i in 0..=255u8 { 41 | assert_eq!( 42 | trie.get(vec![i]), 43 | Some(vec![i]), 44 | "Fail on position {} after commit", 45 | i 46 | ); 47 | } 48 | 49 | let mut another_trie = MerklePatriciaTree::<3>::new(new_db()); 50 | 51 | for i in 0..=255 { 52 | another_trie.put(vec![i], vec![i]); 53 | } 54 | let second_hash = another_trie.commit().unwrap(); 55 | 56 | assert_eq!(first_hash, second_hash); 57 | } 58 | 59 | #[test] 60 | fn comprehensive_test_with_similar_prefix() { 61 | let mut rng = StdRng::seed_from_u64(124); 62 | let last_byte = vec![0x00, 0xf0, 0xff]; 63 | let make_key = 64 | |x: usize| -> Vec { [&vec![0xff; x / 3][..], &last_byte[x % 3..=x % 3]].concat() }; 65 | let make_value = |x: usize| -> Vec { vec![x as u8] }; 66 | const SAMPLES: usize = 256; 67 | 68 | let mut trie = MerklePatriciaTree::<1000>::new(new_db()); 69 | let mut tasks: Vec = (0..SAMPLES).collect(); 70 | tasks.shuffle(&mut rng); 71 | 72 | // Check put consistensy 73 | for i in tasks.drain(..) { 74 | trie.put(make_key(i), make_value(i)); 75 | } 76 | 77 | for i in 0..SAMPLES { 78 | assert_eq!( 79 | trie.get(make_key(i)), 80 | Some(make_value(i)), 81 | "Fail on position {} before commit", 82 | i 83 | ); 84 | } 85 | 86 | let first_hash = trie.commit().unwrap(); 87 | 88 | for i in 0..SAMPLES { 89 | assert_eq!( 90 | trie.get(make_key(i)), 91 | Some(make_value(i)), 92 | "Fail on position {} after commit", 93 | i 94 | ); 95 | } 96 | 97 | // Check put consistensy 98 | let db2 = new_db(); 99 | let mut trie2 = MerklePatriciaTree::<3>::new(db2.clone()); 100 | for i in 0..SAMPLES { 101 | trie2.put(make_key(i), make_value(i)); 102 | } 103 | 104 | let second_hash = trie2.commit().unwrap(); 105 | assert_eq!(first_hash, second_hash); 106 | 107 | // Check the number of cached nodes. 108 | assert!(trie2.loaded_nodes_count() <= 5); 109 | 110 | // Read all the data and check the number cached nodes. 111 | (0..SAMPLES).for_each(|x| { 112 | trie2.get(make_key(x)); 113 | }); 114 | assert!(trie2.loaded_nodes_count() > SAMPLES / 3); 115 | trie2.commit().unwrap(); 116 | assert!(trie2.loaded_nodes_count() <= 5); 117 | 118 | // Check flush top layers 119 | let cached_nodes = trie2.loaded_nodes_count(); 120 | let dumped_nodes = db2.iter_from_prefix(0, &[]).count(); 121 | trie2.flush_all().unwrap(); 122 | let new_dumped_nodes = db2.iter_from_prefix(0, &[]).count(); 123 | assert_eq!(cached_nodes + dumped_nodes, new_dumped_nodes); 124 | 125 | // Check reload trie from db 126 | let mut trie2 = MerklePatriciaTree::<3>::new(db2.clone()); 127 | trie2.commit().unwrap(); 128 | assert!(trie2.loaded_nodes_count() <= 5); 129 | 130 | // Check deletion 131 | let mut tasks: Vec = (0..SAMPLES).collect(); 132 | tasks.shuffle(&mut rng); 133 | for i in tasks.drain(..) { 134 | assert_eq!( 135 | trie2.get(make_key(i)), 136 | Some(make_value(i)), 137 | "Fail in deletion", 138 | ); 139 | trie2.put(make_key(i), vec![]); 140 | } 141 | 142 | let empty_hash = trie2.commit().unwrap(); 143 | 144 | // Check no memory leak 145 | assert!(trie2.loaded_nodes_count() <= 5); 146 | assert_eq!(empty_hash, EMPTY_ROOT); 147 | 148 | // Check no leak on db 149 | assert!(db2.iter_from_prefix(0, &vec![]).next().is_none()); 150 | } 151 | -------------------------------------------------------------------------------- /asb-authdb/rainblock-trie/src/thread_non_safe.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | ops::{Deref, DerefMut}, 3 | rc::{Rc, Weak}, 4 | }; 5 | 6 | use crate::{trie_node::TrieNode, trie_node_ext::TrieNodeExt}; 7 | 8 | #[derive(Clone)] 9 | pub struct Node(pub TrieNodeExt); 10 | 11 | impl Node { 12 | pub fn as_ref(&self) -> impl Deref + '_ { 13 | &self.0 14 | } 15 | 16 | pub fn as_mut(&mut self) -> impl DerefMut + '_ { 17 | &mut self.0 18 | } 19 | 20 | pub fn as_mut_inner(&mut self) -> impl DerefMut + '_ { 21 | &mut *self.0 22 | } 23 | } 24 | 25 | #[derive(Clone)] 26 | pub struct NodePtr(pub Rc); 27 | 28 | impl NodePtr { 29 | pub fn ptr_eq(me: &Self, other: &Self) -> bool { 30 | Rc::ptr_eq(&me.0, &other.0) 31 | } 32 | 33 | pub fn downgrade(me: &Self) -> NodePtrWeak { 34 | NodePtrWeak(Rc::downgrade(&me.0)) 35 | } 36 | 37 | pub fn make_mut(me: &mut Self) -> &mut Node { 38 | Rc::make_mut(&mut me.0) 39 | } 40 | 41 | pub fn as_ref(&self) -> impl Deref + '_ { 42 | &self.0.deref().0 43 | } 44 | } 45 | 46 | pub struct NodePtrWeak(pub Weak); 47 | 48 | impl NodePtrWeak { 49 | pub fn upgrade(&self) -> Option { 50 | self.0.upgrade().map(|x| NodePtr(x)) 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /asb-authdb/rainblock-trie/src/thread_safe.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | ops::{Deref, DerefMut}, 3 | sync::{Arc, Mutex, MutexGuard, Weak}, 4 | }; 5 | 6 | use crate::{trie_node::TrieNode, trie_node_ext::TrieNodeExt}; 7 | 8 | pub struct Node(pub Mutex); 9 | 10 | impl Clone for Node { 11 | fn clone(&self) -> Self { 12 | Self(Mutex::new(self.0.try_lock().unwrap().clone())) 13 | } 14 | } 15 | 16 | impl Node { 17 | pub fn as_ref(&self) -> impl Deref + '_ { 18 | self.0.try_lock().unwrap() 19 | } 20 | 21 | pub fn as_mut(&mut self) -> impl DerefMut + '_ { 22 | self.0.try_lock().unwrap() 23 | } 24 | 25 | pub fn as_mut_inner(&mut self) -> impl DerefMut + '_ { 26 | NodeGuard(self.0.try_lock().unwrap()) 27 | } 28 | } 29 | 30 | #[derive(Clone)] 31 | pub struct NodePtr(pub Arc); 32 | 33 | impl NodePtr { 34 | pub fn ptr_eq(me: &Self, other: &Self) -> bool { 35 | Arc::ptr_eq(&me.0, &other.0) 36 | } 37 | 38 | pub fn downgrade(me: &Self) -> NodePtrWeak { 39 | NodePtrWeak(Arc::downgrade(&me.0)) 40 | } 41 | 42 | pub fn make_mut(me: &mut Self) -> &mut Node { 43 | Arc::make_mut(&mut me.0) 44 | } 45 | 46 | pub fn as_ref(&self) -> impl Deref + '_ { 47 | self.0.deref().0.try_lock().unwrap() 48 | } 49 | } 50 | 51 | pub struct NodePtrWeak(pub Weak); 52 | 53 | impl NodePtrWeak { 54 | pub fn upgrade(&self) -> Option { 55 | self.0.upgrade().map(|x| NodePtr(x)) 56 | } 57 | } 58 | 59 | pub struct NodeGuard<'a>(MutexGuard<'a, TrieNodeExt>); 60 | 61 | impl Deref for NodeGuard<'_> { 62 | type Target = TrieNode; 63 | 64 | fn deref(&self) -> &Self::Target { 65 | self.0.deref().deref() 66 | } 67 | } 68 | 69 | impl DerefMut for NodeGuard<'_> { 70 | fn deref_mut(&mut self) -> &mut Self::Target { 71 | self.0.deref_mut().deref_mut() 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /asb-authdb/src/amt.rs: -------------------------------------------------------------------------------- 1 | use std::sync::Arc; 2 | 3 | use kvdb::KeyValueDB; 4 | 5 | use lvmt_db::{lvmt_db::cached_pp_with_depth, single_amt::AmtDB}; 6 | 7 | use asb_options::Options; 8 | use authdb::AuthDB; 9 | 10 | pub struct Amt { 11 | amt: AmtDB, 12 | print_root_period: Option, 13 | } 14 | 15 | pub fn new(backend: Arc, opts: &Options) -> Amt { 16 | let pp = cached_pp_with_depth("./pp", N); 17 | pp.warm_quotient(); 18 | let shard_info = opts.shards.map(|size| (size.trailing_zeros() as usize, 0)); 19 | Amt { 20 | amt: AmtDB::new(backend, pp, shard_info), 21 | print_root_period: if opts.print_root { 22 | Some(opts.report_epoch) 23 | } else { 24 | None 25 | }, 26 | } 27 | } 28 | 29 | impl AuthDB for Amt { 30 | fn get(&self, key: Vec) -> Option> { 31 | // println!("read"); 32 | self.amt.get(&key).map(Into::into) 33 | } 34 | 35 | fn set(&mut self, key: Vec, value: Vec) { 36 | // println!("write"); 37 | self.amt.set(&key, value) 38 | } 39 | 40 | fn commit(&mut self, index: usize) { 41 | // println!("commit"); 42 | let root = self.amt.commit(); 43 | if let Some(period) = self.print_root_period { 44 | if index % period == 0 { 45 | println!("Commitment {:?}", root); 46 | } 47 | } 48 | } 49 | 50 | fn backend(&self) -> Option<&dyn KeyValueDB> { 51 | Some(&*self.amt.db) 52 | } 53 | 54 | fn flush_all(&mut self) { 55 | let _ = self.amt.commit(); 56 | } 57 | } 58 | 59 | #[allow(unused)] 60 | #[derive(Clone)] 61 | pub struct AMTCounter { 62 | put_count: [u64; 4], 63 | inc_key_count: u64, 64 | inc_tree_count: u64, 65 | inc_key_level_count: u64, 66 | } 67 | 68 | impl Default for AMTCounter { 69 | fn default() -> Self { 70 | Self { 71 | put_count: [0; 4], 72 | inc_key_count: 0, 73 | inc_tree_count: 0, 74 | inc_key_level_count: 0, 75 | } 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /asb-authdb/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod amt; 2 | #[cfg(feature = "lmpts")] 3 | mod lmpts; 4 | mod lvmt; 5 | mod mpt; 6 | mod rain_mpt; 7 | mod raw; 8 | 9 | use lvmt::LvmtCounter; 10 | use mpt::MptCounter; 11 | 12 | use asb_options::{AuthAlgo, Options}; 13 | use asb_profile::CounterTrait; 14 | use asb_profile::{Counter, Reporter}; 15 | use authdb::AuthDB; 16 | use kvdb::KeyValueDB; 17 | use std::sync::Arc; 18 | 19 | fn open_lmpts(dir: &str) -> Box { 20 | #[cfg(feature = "lmpts")] 21 | { 22 | Box::new(lmpts::new(dir)) 23 | } 24 | #[cfg(not(feature = "lmpts"))] 25 | { 26 | let _ = dir; 27 | panic!("LMPTs can only work with feature asb-backend!") 28 | } 29 | } 30 | 31 | pub fn new<'a>(backend: Arc, opts: &'a Options) -> (Box, Reporter<'a>) { 32 | let (db, counter): (Box, Box) = match opts.algorithm { 33 | AuthAlgo::RAW => (Box::new(raw::new(backend)), Box::new(Counter::default())), 34 | AuthAlgo::LVMT => ( 35 | Box::new(lvmt::new(backend, opts)), 36 | Box::new(LvmtCounter::default()), 37 | ), 38 | AuthAlgo::MPT => { 39 | let mpt_db = mpt::new(backend, opts); 40 | let counter = MptCounter::from_mpt_db(&mpt_db); 41 | (Box::new(mpt_db), Box::new(counter)) 42 | } 43 | AuthAlgo::LMPTS => (open_lmpts(&opts.db_dir), Box::new(Counter::default())), 44 | AuthAlgo::AMT(x) => { 45 | let authdb = exaust_construct!(x, backend, opts, 20, 21, 22, 23, 24, 25, 26, 27, 28); 46 | (authdb, Box::new(Counter::default())) 47 | } 48 | AuthAlgo::RAIN => ( 49 | Box::new(rain_mpt::new(backend)), 50 | Box::new(Counter::default()), 51 | ), 52 | }; 53 | 54 | let mut reporter = Reporter::new(opts); 55 | reporter.set_counter(counter); 56 | 57 | return (db, reporter); 58 | } 59 | 60 | macro_rules! exaust_construct { 61 | ($input: ident, $backend: ident, $opts: ident, $idx:tt $(, $rest:tt)*) => { 62 | if $input == $idx { 63 | Box::new(amt::new::<$idx>($backend, $opts)) as Box 64 | } else { 65 | exaust_construct!($input, $backend, $opts, $($rest),*) 66 | } 67 | }; 68 | ($input: ident, $backend: ident, $opts: ident, )=>{ 69 | unreachable!("Unsupport index") 70 | } 71 | } 72 | use exaust_construct; 73 | -------------------------------------------------------------------------------- /asb-authdb/src/lmpts.rs: -------------------------------------------------------------------------------- 1 | use asb_backend::cfx_storage::{ 2 | state::StateTrait, state_manager::StateManagerTrait, StateIndex, StorageConfiguration, 3 | StorageManager, StorageState, 4 | }; 5 | use authdb::AuthDB; 6 | use cfx_primitives::StorageKey; 7 | use kvdb::KeyValueDB; 8 | use primitive_types::H256; 9 | 10 | use std::sync::Arc; 11 | 12 | pub struct Lmpts { 13 | manager: Arc, 14 | state: StorageState, 15 | } 16 | 17 | pub fn new(dir: &str) -> Lmpts { 18 | let config = StorageConfiguration::new_default(dir, 200); 19 | let manager = Arc::new(StorageManager::new(config).unwrap()); 20 | let state = manager.get_state_for_genesis_write(); 21 | Lmpts { manager, state } 22 | } 23 | 24 | impl AuthDB for Lmpts { 25 | fn get(&self, key: Vec) -> Option> { 26 | let key = StorageKey::AccountKey(key.as_slice()); 27 | self.state.get(key).unwrap() 28 | } 29 | 30 | fn set(&mut self, key: Vec, value: Vec) { 31 | let key = StorageKey::AccountKey(key.as_slice()); 32 | self.state.set(key, value.into_boxed_slice()).unwrap() 33 | } 34 | 35 | fn commit(&mut self, index: usize) { 36 | let mut epoch_id = H256::default(); 37 | epoch_id.0[0..8].copy_from_slice(index.to_le_bytes().as_ref()); 38 | 39 | let state_root = self.state.compute_state_root().unwrap(); 40 | self.state.commit(epoch_id).unwrap(); 41 | let state_index = StateIndex::new_for_next_epoch( 42 | &epoch_id, 43 | &state_root, 44 | index as u64 + 1, 45 | self.manager 46 | .get_storage_manager() 47 | .get_snapshot_epoch_count(), 48 | ); 49 | self.state = self 50 | .manager 51 | .get_state_for_next_epoch(state_index) 52 | .expect("unwrap result") 53 | .expect("unwrap option") 54 | } 55 | 56 | fn backend(&self) -> Option<&dyn KeyValueDB> { 57 | None 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /asb-authdb/src/lvmt.rs: -------------------------------------------------------------------------------- 1 | use asb_options::Options; 2 | use asb_profile::CounterTrait; 3 | use authdb::AuthDB; 4 | use kvdb::KeyValueDB; 5 | use lvmt_db::crypto::export::ProjectiveCurve; 6 | use lvmt_db::{ 7 | lvmt_db::{cached_pp, LvmtDB, INC_KEY_COUNT, INC_KEY_LEVEL_SUM, INC_TREE_COUNT}, 8 | multi_layer_amt::Key, 9 | storage::access::PUT_COUNT, 10 | }; 11 | use std::sync::Arc; 12 | 13 | pub struct Lvmt { 14 | amt: LvmtDB, 15 | print_root_period: Option, 16 | } 17 | 18 | pub fn new(backend: Arc, opts: &Options) -> Lvmt { 19 | let pp = cached_pp("./pp"); 20 | pp.warm_quotient(); 21 | let shard_info = opts.shards.map(|size| (size.trailing_zeros() as usize, 0)); 22 | Lvmt { 23 | amt: LvmtDB::new(backend, pp, true, shard_info), 24 | print_root_period: if opts.print_root { 25 | Some(opts.report_epoch) 26 | } else { 27 | None 28 | }, 29 | } 30 | } 31 | 32 | impl AuthDB for Lvmt { 33 | fn get(&self, key: Vec) -> Option> { 34 | // println!("read"); 35 | self.amt.get(&Key(key)).unwrap() 36 | } 37 | 38 | fn set(&mut self, key: Vec, value: Vec) { 39 | // println!("write"); 40 | self.amt.set(&Key(key), value.into_boxed_slice()) 41 | } 42 | 43 | fn commit(&mut self, index: usize) { 44 | // println!("commit"); 45 | let (commit, root) = self.amt.commit(index as u64).unwrap(); 46 | if let Some(period) = self.print_root_period { 47 | if index % period == 0 { 48 | let aff_comm = commit.into_affine(); 49 | println!("Commitment {:?}, Merkle {:?}", aff_comm, root); 50 | } 51 | } 52 | } 53 | 54 | fn backend(&self) -> Option<&dyn KeyValueDB> { 55 | Some(&*self.amt.kvdb) 56 | } 57 | 58 | fn flush_all(&mut self) { 59 | self.amt.flush_root(); 60 | } 61 | } 62 | 63 | #[derive(Clone)] 64 | pub struct LvmtCounter { 65 | put_count: [u64; 4], 66 | inc_key_count: u64, 67 | inc_tree_count: u64, 68 | inc_key_level_count: u64, 69 | } 70 | 71 | impl Default for LvmtCounter { 72 | fn default() -> Self { 73 | Self { 74 | put_count: [0; 4], 75 | inc_key_count: 0, 76 | inc_tree_count: 0, 77 | inc_key_level_count: 0, 78 | } 79 | } 80 | } 81 | 82 | impl CounterTrait for LvmtCounter { 83 | fn report(&mut self) -> String { 84 | let put_count = *PUT_COUNT.lock().unwrap(); 85 | let inc_key_count = *INC_KEY_COUNT.lock().unwrap(); 86 | let inc_tree_count = *INC_TREE_COUNT.lock().unwrap(); 87 | let inc_key_level_count = *INC_KEY_LEVEL_SUM.lock().unwrap(); 88 | 89 | let key_diff = inc_key_count - self.inc_key_count; 90 | let tree_diff = inc_tree_count - self.inc_tree_count; 91 | let level_diff = inc_key_level_count - self.inc_key_level_count; 92 | let avg_level = (level_diff as f64) / (key_diff as f64); 93 | 94 | let answer = format!( 95 | "avg levels: {:.3}, access writes {:?}, data writes {} {}", 96 | avg_level, 97 | self.put_count 98 | .iter() 99 | .zip(put_count.iter()) 100 | .map(|(x, y)| y - x) 101 | .collect::>(), 102 | key_diff * 2, 103 | tree_diff * 2, 104 | ); 105 | 106 | self.put_count = *PUT_COUNT.lock().unwrap(); 107 | self.inc_key_count = *INC_KEY_COUNT.lock().unwrap(); 108 | self.inc_tree_count = *INC_TREE_COUNT.lock().unwrap(); 109 | self.inc_key_level_count = *INC_KEY_LEVEL_SUM.lock().unwrap(); 110 | 111 | answer 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /asb-authdb/src/mpt.rs: -------------------------------------------------------------------------------- 1 | use std::cell::RefCell; 2 | use std::collections::BTreeMap; 3 | use std::sync::Arc; 4 | 5 | use hash_db::Hasher; 6 | use kvdb::{DBKey, DBOp, DBTransaction, KeyValueDB}; 7 | use patricia_trie_ethereum::RlpNodeCodec; 8 | use primitive_types::H256; 9 | use trie_db::{NodeCodec, Trie, TrieMut}; 10 | 11 | use parity_journaldb::{Algorithm, DBHasher, JournalDB}; 12 | use parity_scale_codec::KeyedVec; 13 | 14 | use asb_options::Options; 15 | use asb_profile::CounterTrait; 16 | use authdb::AuthDB; 17 | 18 | pub type TrieDBMut<'db> = trie_db::TrieDBMut<'db, DBHasher, RlpNodeCodec>; 19 | pub type TrieDB<'db> = trie_db::TrieDB<'db, DBHasher, RlpNodeCodec>; 20 | 21 | pub struct MptDB { 22 | backing: Arc, 23 | db: Arc>>, 24 | root: H256, 25 | epoch: usize, 26 | print_root_period: Option, 27 | journal_epoch: usize, 28 | } 29 | 30 | fn epoch_hash(epoch: usize) -> H256 { 31 | DBHasher::hash(&epoch.to_le_bytes()) 32 | } 33 | 34 | pub(crate) fn new(backend: Arc, opts: &Options) -> MptDB { 35 | let db = parity_journaldb::new(backend.clone(), Algorithm::OverlayRecent, 0); 36 | let db = Arc::new(RefCell::new(db)); 37 | let print_root_period = if opts.print_root { 38 | Some(opts.report_epoch) 39 | } else { 40 | None 41 | }; 42 | let root = if let Some(value) = backend.get([0u8; 256].to_vec()) { 43 | H256::from_slice(&value) 44 | } else { 45 | RlpNodeCodec::::hashed_null_node() 46 | }; 47 | 48 | let journal_epoch = 0; 49 | 50 | MptDB { 51 | db, 52 | backing: backend, 53 | root, 54 | epoch: 0, 55 | print_root_period, 56 | journal_epoch, 57 | } 58 | } 59 | 60 | impl AuthDB for MptDB { 61 | // This logic is in function `require_or_from` of OpenEthereum 62 | fn get(&self, key: Vec) -> Option> { 63 | let db = self.db.borrow(); 64 | let hash_db = &db.as_hash_db(); 65 | 66 | let trie = TrieDB::new(hash_db, &self.root).unwrap(); 67 | trie.get(key.as_slice()) 68 | .unwrap() 69 | .map(|x| x.into_vec().into_boxed_slice()) 70 | } 71 | 72 | // This logic is in function `commit` in `ethcore/src/state/run` of OpenEthereum 73 | fn set(&mut self, key: Vec, value: Vec) { 74 | let mut db = self.db.borrow_mut(); 75 | let hash_db = db.as_hash_db_mut(); 76 | 77 | let mut trie = TrieDBMut::from_existing(hash_db, &mut self.root).unwrap(); 78 | trie.insert(key.as_slice(), value.as_slice()).unwrap(); 79 | } 80 | 81 | // This logic is in function `commit` in `ethcore/src/state/run` of OpenEthereum 82 | fn commit(&mut self, index: usize) { 83 | self.epoch = index; 84 | 85 | let mut batch = DBTransaction::new(); 86 | let mut db = self.db.borrow_mut(); 87 | 88 | // The third parameter is not used in archive journal db. We feed an arbitrary data. 89 | db.journal_under(&mut batch, index as u64, &epoch_hash(index)) 90 | .unwrap(); 91 | if let Some(old_index) = index.checked_sub(self.journal_epoch) { 92 | db.mark_canonical(&mut batch, old_index as u64, &epoch_hash(old_index)) 93 | .unwrap(); 94 | } 95 | db.backing().write(batch).unwrap(); 96 | db.flush(); 97 | 98 | if let Some(period) = self.print_root_period { 99 | if index % period == 0 { 100 | println!("Root {:?}", self.root); 101 | } 102 | } 103 | } 104 | 105 | fn flush_all(&mut self) { 106 | let mut batch = DBTransaction::new(); 107 | let mut db = self.db.borrow_mut(); 108 | for i in (0..self.journal_epoch).into_iter().rev() { 109 | let index = self.epoch - i; 110 | db.mark_canonical(&mut batch, index as u64, &epoch_hash(index)) 111 | .unwrap(); 112 | } 113 | batch.ops.push(DBOp::Insert { 114 | col: 0, 115 | key: DBKey::from_slice(&[0u8; 256]), 116 | value: self.root.to_keyed_vec(&[]), 117 | }); 118 | db.backing().write(batch).unwrap(); 119 | db.flush(); 120 | } 121 | 122 | fn backend(&self) -> Option<&dyn KeyValueDB> { 123 | Some(&*self.backing) 124 | } 125 | } 126 | 127 | pub struct MptCounter { 128 | journal_db: Arc>>, 129 | } 130 | 131 | impl MptCounter { 132 | pub fn from_mpt_db(mpt_db: &MptDB) -> Self { 133 | Self { 134 | journal_db: mpt_db.db.clone(), 135 | } 136 | } 137 | } 138 | 139 | impl CounterTrait for MptCounter { 140 | fn report(&mut self) -> String { 141 | let mut sizes = BTreeMap::new(); 142 | self.journal_db.borrow().get_sizes(&mut sizes); 143 | format!( 144 | "Recent backing size: {}", 145 | sizes.get("db_overlay_recent_backing_size").unwrap() 146 | ) 147 | } 148 | } 149 | -------------------------------------------------------------------------------- /asb-authdb/src/rain_mpt.rs: -------------------------------------------------------------------------------- 1 | use std::sync::{Arc, RwLock}; 2 | 3 | use authdb::AuthDB; 4 | use kvdb::KeyValueDB; 5 | use rainblock_trie::MerklePatriciaTree; 6 | 7 | const CACHED_LEVEL: usize = 6; 8 | pub struct RainMpt( 9 | RwLock>, 10 | Arc, 11 | ); 12 | 13 | pub fn new(backend: Arc) -> RainMpt { 14 | RainMpt( 15 | RwLock::new(MerklePatriciaTree::::new(backend.clone())), 16 | backend, 17 | ) 18 | } 19 | 20 | impl AuthDB for RainMpt { 21 | fn get(&self, key: Vec) -> Option> { 22 | self.0.write().unwrap().get(key).map(Vec::into_boxed_slice) 23 | } 24 | 25 | fn set(&mut self, key: Vec, value: Vec) { 26 | self.0.write().unwrap().put(key, value); 27 | } 28 | 29 | fn commit(&mut self, _index: usize) { 30 | self.0.write().unwrap().commit().unwrap(); 31 | } 32 | 33 | fn backend(&self) -> Option<&dyn KeyValueDB> { 34 | Some(&*self.1) 35 | } 36 | 37 | fn flush_all(&mut self) { 38 | self.0.write().unwrap().flush_all().unwrap() 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /asb-authdb/src/raw.rs: -------------------------------------------------------------------------------- 1 | use kvdb::KeyValueDB; 2 | use std::sync::Arc; 3 | 4 | pub fn new(backend: Arc) -> Arc { 5 | backend 6 | } 7 | -------------------------------------------------------------------------------- /asb-backend/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "asb-backend" 3 | version = "0.1.0" 4 | edition = "2021" 5 | authors = ["Chenxing Li "] 6 | 7 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 8 | 9 | [dependencies] 10 | asb-options = { workspace = true } 11 | kvdb = { workspace = true } 12 | kvdb-memorydb = { workspace = true } 13 | libmdbx = { workspace = true } 14 | ouroboros = "0.15.6" 15 | stats = { workspace = true } 16 | parity-util-mem = { workspace = true } 17 | 18 | # Note: we met a version conflict of rocksdb, so you need manually change the dependency of rocksdb. 19 | 20 | 21 | ## Uncomment the following lines in default backend (no features) 22 | cfx-kvdb-rocksdb = { path = "./cfx-kvdb-rocksdb" } 23 | 24 | ## Uncomment the following lines in feature parity-backend 25 | # kvdb-rocksdb = "0.9.1" 26 | 27 | # Uncomment the following lines in feature lmpts-backend 28 | # cfx-storage = { git = "https://github.com/Conflux-Chain/conflux-rust.git", rev = "9de2cc9"} 29 | # cfx-kvdb-rocksdb = { package="kvdb-rocksdb", git = "https://github.com/Conflux-Chain/conflux-rust.git", rev = "2ee2765"} 30 | 31 | 32 | [features] 33 | default = [] 34 | parity-backend=[] 35 | lmpts-backend=[] -------------------------------------------------------------------------------- /asb-backend/cfx-kvdb-rocksdb/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "cfx-kvdb-rocksdb" 3 | version = "0.1.6-var" 4 | authors = ["Parity Technologies "] 5 | repository = "https://github.com/paritytech/parity-common" 6 | description = "kvdb implementation backed by rocksDB" 7 | license = "GPL-3.0" 8 | edition = "2018" 9 | 10 | [dependencies] 11 | fs-swap = "0.2.4" 12 | kvdb = "0.4" 13 | log = "0.4.8" 14 | num_cpus = "1.10.1" 15 | parking_lot = "0.11" 16 | regex = "1.3.1" 17 | malloc_size_of = { workspace = true } 18 | malloc_size_of_derive = { workspace = true } 19 | 20 | [dependencies.parity-util-mem] 21 | version = "0.5" 22 | default-features = false 23 | 24 | [dev-dependencies] 25 | cfx-types = { workspace = true } 26 | tempdir = "0.3.7" 27 | 28 | [dependencies.rocksdb] 29 | git = "https://github.com/Conflux-Chain/rust-rocksdb.git" 30 | rev = "cac983ab886a0701c8458bce76a76f63de16e1bd" 31 | -------------------------------------------------------------------------------- /asb-backend/parity-stats/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "parity-stats" 3 | version = "0.1.0" 4 | authors = ["Parity Technologies "] 5 | 6 | [dependencies] 7 | log = "0.4" 8 | prometheus = "0.9.0" 9 | -------------------------------------------------------------------------------- /asb-backend/src/cfx_kvdb_rocksdb.rs: -------------------------------------------------------------------------------- 1 | use std::path::Path; 2 | use std::sync::Arc; 3 | 4 | use cfx_kvdb_rocksdb::{CompactionProfile, Database, DatabaseConfig}; 5 | 6 | use asb_options::Options; 7 | 8 | pub fn open(db_dir: &str, opts: &Options) -> Arc { 9 | let mut db_config = DatabaseConfig::with_columns(opts.num_cols()); 10 | 11 | db_config.memory_budget = Some(opts.cache_size as usize); 12 | db_config.compaction = CompactionProfile::auto(Path::new(db_dir)); 13 | db_config.disable_wal = false; 14 | #[cfg(not(any(feature = "parity-backend", feature = "lmpts-backend")))] 15 | { 16 | db_config.enable_statistics = !opts.no_stat; 17 | } 18 | 19 | let db = Database::open(&db_config, db_dir).unwrap(); 20 | 21 | Arc::new(db) 22 | } 23 | -------------------------------------------------------------------------------- /asb-backend/src/db_with_mertics.rs: -------------------------------------------------------------------------------- 1 | #![allow(unused)] 2 | 3 | use std::io::Read; 4 | use std::sync::Arc; 5 | 6 | use cfx_kvdb_rocksdb::Database; 7 | use kvdb::{DBOp, DBTransaction, DBValue, KeyValueDB}; 8 | use parity_util_mem::{MallocSizeOf, MallocSizeOfOps}; 9 | 10 | // Database with enabled statistics 11 | pub struct DatabaseWithMetrics { 12 | db: Arc, 13 | pub reads: std::sync::atomic::AtomicI64, 14 | pub writes: std::sync::atomic::AtomicI64, 15 | bytes_read: std::sync::atomic::AtomicI64, 16 | bytes_written: std::sync::atomic::AtomicI64, 17 | } 18 | 19 | impl DatabaseWithMetrics { 20 | /// Create a new instance 21 | pub fn new(db: Arc) -> Self { 22 | Self { 23 | db, 24 | reads: std::sync::atomic::AtomicI64::new(0), 25 | writes: std::sync::atomic::AtomicI64::new(0), 26 | bytes_read: std::sync::atomic::AtomicI64::new(0), 27 | bytes_written: std::sync::atomic::AtomicI64::new(0), 28 | } 29 | } 30 | } 31 | 32 | impl MallocSizeOf for DatabaseWithMetrics { 33 | fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize { 34 | MallocSizeOf::size_of(&*self.db, ops) 35 | } 36 | } 37 | 38 | impl KeyValueDB for DatabaseWithMetrics { 39 | fn get(&self, col: u32, key: &[u8]) -> std::io::Result> { 40 | let res = self.db.get(col, key); 41 | let count = res 42 | .as_ref() 43 | .map_or(0, |y| y.as_ref().map_or(0, |x| x.bytes().count())); 44 | 45 | self.reads 46 | .fetch_add(1, std::sync::atomic::Ordering::Relaxed); 47 | self.bytes_read 48 | .fetch_add(count as i64, std::sync::atomic::Ordering::Relaxed); 49 | 50 | res 51 | } 52 | 53 | fn get_by_prefix(&self, col: u32, prefix: &[u8]) -> Option> { 54 | let res = self.db.get_by_prefix(col, prefix); 55 | let count = res.as_ref().map_or(0, |x| x.bytes().count()); 56 | 57 | self.reads 58 | .fetch_add(1, std::sync::atomic::Ordering::Relaxed); 59 | self.bytes_read 60 | .fetch_add(count as i64, std::sync::atomic::Ordering::Relaxed); 61 | 62 | res 63 | } 64 | fn write_buffered(&self, transaction: DBTransaction) { 65 | let mut count = 0; 66 | for op in &transaction.ops { 67 | count += match op { 68 | DBOp::Insert { value, .. } => value.bytes().count(), 69 | _ => 0, 70 | }; 71 | } 72 | 73 | self.writes.fetch_add( 74 | transaction.ops.len() as i64, 75 | std::sync::atomic::Ordering::Relaxed, 76 | ); 77 | self.bytes_written 78 | .fetch_add(count as i64, std::sync::atomic::Ordering::Relaxed); 79 | 80 | self.db.write_buffered(transaction) 81 | } 82 | fn write(&self, transaction: DBTransaction) -> std::io::Result<()> { 83 | let mut count = 0; 84 | for op in &transaction.ops { 85 | count += match op { 86 | DBOp::Insert { value, .. } => value.bytes().count(), 87 | _ => 0, 88 | }; 89 | } 90 | 91 | self.bytes_written 92 | .fetch_add(count as i64, std::sync::atomic::Ordering::Relaxed); 93 | self.writes.fetch_add( 94 | transaction.ops.len() as i64, 95 | std::sync::atomic::Ordering::Relaxed, 96 | ); 97 | self.db.write(transaction) 98 | } 99 | fn flush(&self) -> std::io::Result<()> { 100 | self.db.flush() 101 | } 102 | 103 | fn iter<'a>(&'a self, col: u32) -> Box<(dyn Iterator, Box<[u8]>)> + 'a)> { 104 | KeyValueDB::iter(&*self.db, col) 105 | } 106 | 107 | fn iter_from_prefix<'a>( 108 | &'a self, 109 | col: u32, 110 | prefix: &'a [u8], 111 | ) -> Box, Box<[u8]>)> + 'a> { 112 | self.db.iter_from_prefix(col, prefix) 113 | } 114 | 115 | fn restore(&self, new_db: &str) -> std::io::Result<()> { 116 | self.db.restore(new_db) 117 | } 118 | } 119 | 120 | impl stats::PrometheusMetrics for DatabaseWithMetrics { 121 | fn prometheus_metrics(&self, p: &mut stats::PrometheusRegistry) { 122 | p.register_counter( 123 | "kvdb_reads", 124 | "db reads", 125 | self.reads.load(std::sync::atomic::Ordering::Relaxed) as i64, 126 | ); 127 | p.register_counter( 128 | "kvdb_writes", 129 | "db writes", 130 | self.writes.load(std::sync::atomic::Ordering::Relaxed) as i64, 131 | ); 132 | p.register_counter( 133 | "kvdb_bytes_read", 134 | "db bytes_reads", 135 | self.bytes_read.load(std::sync::atomic::Ordering::Relaxed) as i64, 136 | ); 137 | p.register_counter( 138 | "kvdb_bytes_written", 139 | "db bytes_written", 140 | self.bytes_written 141 | .load(std::sync::atomic::Ordering::Relaxed) as i64, 142 | ); 143 | } 144 | } 145 | -------------------------------------------------------------------------------- /asb-backend/src/in_mem_with_metrics.rs: -------------------------------------------------------------------------------- 1 | #![allow(unused)] 2 | 3 | // The original journaldb relies on some metric run in crate `ethcore-db`. But it doesn't rely on the other dependencies. 4 | 5 | // Copyright 2015-2020 Parity Technologies (UK) Ltd. 6 | // The following code is part of OpenEthereum. 7 | 8 | // OpenEthereum is free software: you can redistribute it and/or modify 9 | // it under the terms of the GNU General Public License as published by 10 | // the Free Software Foundation, either version 3 of the License, or 11 | // (at your option) any later version. 12 | 13 | // OpenEthereum is distributed in the hope that it will be useful, 14 | // but WITHOUT ANY WARRANTY; without even the implied warranty of 15 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 | // GNU General Public License for more details. 17 | 18 | // You should have received a copy of the GNU General Public License 19 | // along with OpenEthereum. If not, see . 20 | 21 | use kvdb::KeyValueDB; 22 | use std::ops::Deref; 23 | 24 | /// InMemory with disabled statistics 25 | pub struct InMemoryWithMetrics { 26 | db: kvdb_memorydb::InMemory, 27 | } 28 | 29 | impl Deref for InMemoryWithMetrics { 30 | type Target = kvdb_memorydb::InMemory; 31 | 32 | fn deref(&self) -> &Self::Target { 33 | &self.db 34 | } 35 | } 36 | 37 | impl stats::PrometheusMetrics for InMemoryWithMetrics { 38 | fn prometheus_metrics(&self, _: &mut stats::PrometheusRegistry) {} 39 | } 40 | 41 | impl InMemoryWithMetrics { 42 | /// Create new instance 43 | pub fn create(num_cols: u32) -> Self { 44 | Self { 45 | db: kvdb_memorydb::create(num_cols), 46 | } 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /asb-backend/src/lib.rs: -------------------------------------------------------------------------------- 1 | use asb_options::{Backend, Options}; 2 | use kvdb::KeyValueDB; 3 | use std::sync::Arc; 4 | 5 | #[cfg(not(feature = "parity-backend"))] 6 | mod cfx_kvdb_rocksdb; 7 | 8 | #[cfg(not(feature = "parity-backend"))] 9 | mod db_with_mertics; 10 | 11 | #[cfg(feature = "lmpts-backend")] 12 | pub extern crate cfx_storage; 13 | 14 | #[cfg(all(feature = "parity-backend", feature = "lmpts-backend"))] 15 | compile_error!("Multiple backends are chosen!"); 16 | 17 | mod in_mem_with_metrics; 18 | mod mdbx; 19 | 20 | #[cfg(feature = "parity-backend")] 21 | mod parity_kvdb_rocksdb; 22 | 23 | pub fn backend(opts: &Options) -> Arc { 24 | match opts.backend { 25 | Backend::RocksDB => { 26 | let db_dir = opts.db_dir.as_str(); 27 | #[cfg(not(feature = "parity-backend"))] 28 | { 29 | cfx_kvdb_rocksdb::open(db_dir, opts) 30 | } 31 | #[cfg(feature = "parity-backend")] 32 | { 33 | parity_kvdb_rocksdb::open(db_dir, opts.num_cols()) 34 | } 35 | } 36 | Backend::InMemoryDB => Arc::new(kvdb_memorydb::create(opts.num_cols())), 37 | Backend::MDBX => Arc::new(mdbx::open_database(opts)), 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /asb-backend/src/mdbx.rs: -------------------------------------------------------------------------------- 1 | #![allow(unused)] 2 | 3 | use libmdbx::Cursor; 4 | use libmdbx::Database; 5 | use libmdbx::DatabaseFlags; 6 | use libmdbx::Environment; 7 | use libmdbx::EnvironmentBuilder; 8 | use libmdbx::Geometry; 9 | use libmdbx::Transaction; 10 | use libmdbx::WriteFlags; 11 | use libmdbx::WriteMap; 12 | use libmdbx::RW; 13 | 14 | use asb_options::Options; 15 | 16 | use kvdb::DBValue; 17 | use kvdb::KeyValueDB; 18 | use std::io; 19 | use std::io::ErrorKind::Other; 20 | use std::sync::Arc; 21 | use std::sync::RwLock; 22 | use std::sync::RwLockWriteGuard; 23 | 24 | use ouroboros::self_referencing; 25 | use std::path::Path; 26 | 27 | pub fn open_database(opts: &Options) -> MdbxDatabase { 28 | const TB: usize = 1 << 40; 29 | const GB: usize = 1 << 30; 30 | let mut builder: EnvironmentBuilder = Environment::new(); 31 | builder.set_max_dbs(10); 32 | builder.set_geometry(Geometry { 33 | size: Some(0..4 * TB), 34 | growth_step: Some(4 * GB as isize), 35 | shrink_threshold: None, 36 | page_size: None, 37 | }); 38 | builder.set_rp_augment_limit(16 * 256 * 1024); 39 | let env = builder.open(Path::new(&opts.db_dir)).unwrap(); 40 | let txn = MdbxTransaction::build(Arc::new(env)); 41 | make_backend(opts, txn) 42 | } 43 | 44 | #[self_referencing] 45 | pub struct MdbxTransaction { 46 | env: Arc>, 47 | #[borrows(env)] 48 | #[covariant] 49 | txn: Option>, 50 | } 51 | 52 | impl MdbxTransaction { 53 | pub fn build(env: Arc>) -> Self { 54 | MdbxTransactionBuilder { 55 | env, 56 | txn_builder: |env| Some(env.begin_rw_txn().unwrap()), 57 | } 58 | .build() 59 | } 60 | 61 | fn commit(&mut self) { 62 | self.with_txn_mut(|txn| std::mem::take(txn).unwrap().commit()); 63 | let mut next_txn = MdbxTransaction::build(self.borrow_env().clone()); 64 | std::mem::swap(self, &mut next_txn); 65 | } 66 | } 67 | 68 | pub struct MdbxDatabase { 69 | txn: RwLock, 70 | buffer: RwLock>, 71 | num_cols: u32, 72 | } 73 | 74 | impl MdbxDatabase { 75 | fn cursor(&self, col: u32) -> Cursor<'_, RW> { 76 | let table_name = format!("table{}", col); 77 | self.txn.write().unwrap().with_txn_mut(|txn| { 78 | let txn = txn.as_mut().unwrap(); 79 | let db = txn 80 | .create_db(Some(&table_name), DatabaseFlags::empty()) 81 | .unwrap(); 82 | txn.cursor(&db).unwrap() 83 | }) 84 | } 85 | } 86 | 87 | pub fn make_backend(opts: &Options, db: MdbxTransaction) -> MdbxDatabase { 88 | let num_cols = opts.num_cols(); 89 | MdbxDatabase { 90 | txn: RwLock::new(db), 91 | buffer: RwLock::new(Vec::new()), 92 | num_cols, 93 | } 94 | } 95 | 96 | fn into_io_error(err: libmdbx::Error) -> io::Error { 97 | io::Error::new(Other, err) 98 | } 99 | 100 | fn map_output( 101 | output: libmdbx::Result, Vec)>>, 102 | f: F, 103 | ) -> io::Result> 104 | where 105 | F: FnOnce((Vec, Vec)) -> T, 106 | { 107 | match output { 108 | Ok(out) => Ok(out.map(f)), 109 | Err(err) => Err(into_io_error(err)), 110 | } 111 | } 112 | 113 | fn is_prefix(prefix: &[u8], full: &[u8]) -> bool { 114 | if prefix.len() > full.len() { 115 | return false; 116 | } 117 | full.starts_with(prefix) 118 | } 119 | 120 | impl KeyValueDB for MdbxDatabase { 121 | fn get(&self, col: u32, key: &[u8]) -> io::Result> { 122 | map_output(self.cursor(col).set_key(key), |(_, v)| v) 123 | } 124 | 125 | fn get_by_prefix(&self, col: u32, prefix: &[u8]) -> Option> { 126 | let value = map_output(self.cursor(col).set_range(prefix), |(k, v)| { 127 | is_prefix(prefix, &k).then(|| v.into_boxed_slice()) 128 | }) 129 | .unwrap(); 130 | value.and_then(|x| x) 131 | } 132 | 133 | fn write_buffered(&self, transaction: kvdb::DBTransaction) { 134 | self.buffer.write().unwrap().push(transaction); 135 | } 136 | 137 | fn flush(&self) -> std::io::Result<()> { 138 | let mut cursors: Vec<_> = (0..self.num_cols).map(|db| self.cursor(db)).collect(); 139 | for kvdb_txn in self.buffer.write().unwrap().drain(..) { 140 | for kvdb_op in kvdb_txn.ops { 141 | match kvdb_op { 142 | kvdb::DBOp::Insert { col, key, value } => cursors[col as usize] 143 | .put(key.as_ref(), value.as_ref(), WriteFlags::UPSERT) 144 | .map_err(into_io_error) 145 | .unwrap(), 146 | kvdb::DBOp::Delete { col, key } => { 147 | let mut cursor = &mut cursors[col as usize]; 148 | if cursor 149 | .set_key::<(), ()>(&key) 150 | .map_err(into_io_error) 151 | .unwrap() 152 | .is_some() 153 | { 154 | cursor 155 | .del(WriteFlags::CURRENT) 156 | .map_err(into_io_error) 157 | .unwrap(); 158 | } 159 | } 160 | } 161 | } 162 | } 163 | self.txn.write().unwrap().commit(); 164 | // self.txn.read().unwrap().borrow_env().sync(true).unwrap(); 165 | // println!("Commit"); 166 | Ok(()) 167 | } 168 | 169 | fn iter<'a>(&'a self, col: u32) -> Box, Box<[u8]>)> + 'a> { 170 | let mut cursor = self.cursor(col); 171 | cursor.first::<(), ()>().unwrap(); 172 | return Box::new(MdbxCursorIterator { 173 | cursor, 174 | prefix: vec![], 175 | }); 176 | } 177 | 178 | fn iter_from_prefix<'a>( 179 | &'a self, 180 | col: u32, 181 | prefix: &'a [u8], 182 | ) -> Box, Box<[u8]>)> + 'a> { 183 | let mut cursor = self.cursor(col); 184 | cursor.set_key::<(), ()>(prefix).unwrap(); 185 | return Box::new(MdbxCursorIterator { 186 | cursor, 187 | prefix: prefix.to_vec(), 188 | }); 189 | } 190 | 191 | fn restore(&self, new_db: &str) -> std::io::Result<()> { 192 | unimplemented!() 193 | } 194 | } 195 | 196 | pub struct MdbxCursorIterator<'txn> { 197 | cursor: Cursor<'txn, RW>, 198 | prefix: Vec, 199 | } 200 | 201 | impl<'txn> Iterator for MdbxCursorIterator<'txn> { 202 | type Item = (Box<[u8]>, Box<[u8]>); 203 | 204 | fn next(&mut self) -> Option { 205 | let (k, v): Self::Item = map_output(self.cursor.get_current(), |(k, v)| { 206 | (k.into_boxed_slice(), v.into_boxed_slice()) 207 | }) 208 | .unwrap()?; 209 | if !is_prefix(&self.prefix, &v) { 210 | return None; 211 | } 212 | self.cursor.next::<(), ()>().unwrap(); 213 | Some((k, v)) 214 | } 215 | } 216 | 217 | impl parity_util_mem::MallocSizeOf for MdbxDatabase { 218 | fn size_of(&self, ops: &mut parity_util_mem::MallocSizeOfOps) -> usize { 219 | 0 220 | } 221 | } 222 | -------------------------------------------------------------------------------- /asb-backend/src/parity_kvdb_rocksdb.rs: -------------------------------------------------------------------------------- 1 | use kvdb::{DBOp, DBTransaction, DBValue, IoStats, IoStatsKind, KeyValueDB}; 2 | use kvdb07::{ 3 | DBOp as DBOp07, DBTransaction as DBTransaction07, IoStatsKind as IoStatsKind07, 4 | KeyValueDB as KeyValueDB07, 5 | }; 6 | use kvdb_rocksdb::{CompactionProfile, Database, DatabaseConfig}; 7 | use parity_util_mem::{MallocSizeOf, MallocSizeOfOps}; 8 | use std::path::Path; 9 | use std::sync::{Arc, RwLock}; 10 | 11 | pub fn open(db_dir: &str, num_cols: u32) -> Arc { 12 | let mut config = DatabaseConfig::with_columns(num_cols); 13 | config.enable_statistics = false; 14 | config.compaction = CompactionProfile::auto(&Path::new(db_dir)); 15 | 16 | let db: WrappedDataBase = kvdb_rocksdb::Database::open(&config, db_dir) 17 | .unwrap() 18 | .into(); 19 | Arc::new(db) 20 | } 21 | 22 | pub struct WrappedDataBase { 23 | pub db: Database, 24 | pub buffered_transactions: RwLock>, 25 | } 26 | 27 | impl MallocSizeOf for WrappedDataBase { 28 | fn size_of(&self, _ops: &mut MallocSizeOfOps) -> usize { 29 | unimplemented!() 30 | } 31 | } 32 | 33 | impl From for WrappedDataBase { 34 | fn from(db: Database) -> Self { 35 | Self { 36 | db, 37 | buffered_transactions: Default::default(), 38 | } 39 | } 40 | } 41 | 42 | impl KeyValueDB for WrappedDataBase { 43 | fn get(&self, col: u32, key: &[u8]) -> std::io::Result> { 44 | KeyValueDB07::get(&self.db, col, key) 45 | } 46 | 47 | fn get_by_prefix(&self, col: u32, prefix: &[u8]) -> Option> { 48 | KeyValueDB07::get_by_prefix(&self.db, col, prefix) 49 | } 50 | 51 | fn write_buffered(&self, mut transaction: DBTransaction) { 52 | let ops: Vec = transaction 53 | .ops 54 | .drain(..) 55 | .map(|x| match x { 56 | DBOp::Insert { col, key, value } => DBOp07::Insert { col, key, value }, 57 | DBOp::Delete { col, key } => DBOp07::Delete { col, key }, 58 | }) 59 | .collect(); 60 | let txs = &mut *self.buffered_transactions.write().unwrap(); 61 | txs.extend_from_slice(&ops); 62 | } 63 | 64 | fn flush(&self) -> std::io::Result<()> { 65 | let ops = std::mem::take(&mut *self.buffered_transactions.write().unwrap()); 66 | KeyValueDB07::write(&self.db, DBTransaction07 { ops }) 67 | } 68 | 69 | fn iter<'a>(&'a self, col: u32) -> Box, Box<[u8]>)> + 'a> { 70 | KeyValueDB07::iter(&self.db, col) 71 | } 72 | 73 | fn iter_from_prefix<'a>( 74 | &'a self, 75 | col: u32, 76 | prefix: &'a [u8], 77 | ) -> Box, Box<[u8]>)> + 'a> { 78 | KeyValueDB07::iter_with_prefix(&self.db, col, prefix) 79 | } 80 | 81 | fn restore(&self, new_db: &str) -> std::io::Result<()> { 82 | KeyValueDB07::restore(&self.db, new_db) 83 | } 84 | 85 | fn io_stats(&self, kind: IoStatsKind) -> IoStats { 86 | let kind = match kind { 87 | IoStatsKind::Overall => IoStatsKind07::Overall, 88 | IoStatsKind::SincePrevious => IoStatsKind07::SincePrevious, 89 | }; 90 | let stats = KeyValueDB07::io_stats(&self.db, kind); 91 | IoStats { 92 | transactions: stats.transactions, 93 | reads: stats.reads, 94 | cache_reads: stats.cache_reads, 95 | writes: stats.writes, 96 | bytes_read: stats.bytes_read, 97 | cache_read_bytes: stats.cache_read_bytes, 98 | bytes_written: stats.bytes_written, 99 | started: stats.started, 100 | span: stats.span, 101 | } 102 | } 103 | } 104 | -------------------------------------------------------------------------------- /asb-options/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "asb-options" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | structopt = { version = "0.3", default-features = false } 10 | strum = "0.22.0" 11 | strum_macros = "0.22.0" -------------------------------------------------------------------------------- /asb-options/src/lib.rs: -------------------------------------------------------------------------------- 1 | extern crate structopt; 2 | #[macro_use] 3 | extern crate strum_macros; 4 | 5 | pub use structopt::StructOpt; 6 | 7 | #[derive(Debug, StructOpt)] 8 | #[structopt(about = "Authenticated Storage Benchmarks", rename_all = "kebab-case")] 9 | pub struct Options { 10 | #[structopt(short = "a", parse(try_from_str = parse_algo), long)] 11 | pub algorithm: AuthAlgo, 12 | 13 | #[structopt(short = "b", parse(try_from_str = parse_backend), long, default_value="rocksdb")] 14 | pub backend: Backend, 15 | 16 | #[structopt(short = "k", long, parse(try_from_str = parse_num), default_value = "100000")] 17 | pub total_keys: usize, 18 | 19 | #[structopt(long, default_value = "64")] 20 | pub seed: u64, 21 | 22 | #[structopt(long, default_value = "1500")] 23 | pub cache_size: u64, 24 | 25 | #[structopt(long)] 26 | pub max_time: Option, 27 | 28 | #[structopt(long)] 29 | pub max_epoch: Option, 30 | 31 | #[structopt(long, default_value = "2")] 32 | pub report_epoch: usize, 33 | 34 | #[structopt(long, default_value = "100")] 35 | pub profile_epoch: usize, 36 | 37 | #[structopt(long, default_value = "50000")] 38 | pub epoch_size: usize, 39 | 40 | #[structopt(long = "pprof-report-to")] 41 | pub report_dir: Option, 42 | 43 | #[structopt(long = "db", default_value = "./__benchmarks")] 44 | pub db_dir: String, 45 | 46 | #[structopt(long = "trace", default_value = "./trace")] 47 | pub trace_dir: String, 48 | 49 | #[structopt(long, help = "Use real trace")] 50 | pub real_trace: bool, 51 | 52 | #[structopt(long, help = "Disable backend stat")] 53 | pub no_stat: bool, 54 | 55 | #[structopt(long, help = "Output the usage of memory")] 56 | pub stat_mem: bool, 57 | 58 | #[structopt(long, help = "No warmup")] 59 | pub no_warmup: bool, 60 | 61 | #[structopt(long, help = "Enable print root")] 62 | pub print_root: bool, 63 | 64 | #[structopt(long)] 65 | pub warmup_to: Option, 66 | 67 | #[structopt(long)] 68 | pub warmup_from: Option, 69 | 70 | #[structopt(long)] 71 | pub shards: Option, 72 | } 73 | 74 | impl Options { 75 | fn warmup_dir(&self, input: &str) -> String { 76 | let task_code = if !self.real_trace { 77 | format!("{:e}", self.total_keys) 78 | } else { 79 | "real".into() 80 | }; 81 | if self.algorithm != AuthAlgo::LVMT || self.shards.is_none() { 82 | format!("{}/{:?}_{}/", input, self.algorithm, task_code) 83 | } else { 84 | format!("{}/LVMT{}_{}/", input, self.shards.unwrap(), task_code) 85 | } 86 | } 87 | pub fn settings(&self) -> String { 88 | format!("{:?},{:e}", self.algorithm, self.total_keys) 89 | } 90 | pub fn warmup_to(&self) -> Option { 91 | self.warmup_to.as_ref().map(|x| self.warmup_dir(x)) 92 | } 93 | pub fn warmup_from(&self) -> Option { 94 | self.warmup_from.as_ref().map(|x| self.warmup_dir(x)) 95 | } 96 | 97 | pub fn num_cols(&self) -> u32 { 98 | match self.algorithm { 99 | AuthAlgo::LVMT => 3, 100 | _ => 1, 101 | } 102 | } 103 | } 104 | 105 | #[derive(Debug, Eq, PartialEq, EnumString)] 106 | #[strum(serialize_all = "lowercase")] 107 | pub enum AuthAlgo { 108 | RAW, 109 | AMT(usize), 110 | LVMT, 111 | MPT, 112 | LMPTS, 113 | RAIN, 114 | } 115 | 116 | fn parse_algo(s: &str) -> Result { 117 | if s.len() >= 4 && &s[0..4] == "amt" { 118 | let depth = s[4..].parse::().map_err(|x| x.to_string())?; 119 | return Ok(AuthAlgo::AMT(depth)); 120 | } 121 | return Ok(match s { 122 | "raw" => AuthAlgo::RAW, 123 | "lvmt" => AuthAlgo::LVMT, 124 | "mpt" => AuthAlgo::MPT, 125 | "lmpts" => AuthAlgo::LMPTS, 126 | "rain" => AuthAlgo::RAIN, 127 | _ => { 128 | return Err("Unrecognized algorithm".into()); 129 | } 130 | }); 131 | } 132 | 133 | fn parse_num(s: &str) -> Result { 134 | let base = match s 135 | .chars() 136 | .rev() 137 | .next() 138 | .ok_or::("empty input".into())? 139 | { 140 | 'k' | 'K' => 1_000, 141 | 'm' | 'M' => 1_000_000, 142 | 'g' | 'G' => 1_000_000_000, 143 | _ => 1, 144 | }; 145 | let num = if base > 1 { 146 | let mut chars = s.chars(); 147 | chars.next_back(); 148 | chars.as_str() 149 | } else { 150 | s 151 | }; 152 | Ok(base * num.parse::().map_err(|x| x.to_string())?) 153 | } 154 | 155 | #[derive(Debug, Eq, PartialEq, EnumString)] 156 | #[strum(serialize_all = "lowercase")] 157 | pub enum Backend { 158 | RocksDB, 159 | InMemoryDB, 160 | MDBX, 161 | } 162 | 163 | fn parse_backend(s: &str) -> Result { 164 | return Ok(match s { 165 | "rocksdb" => Backend::RocksDB, 166 | "memory" => Backend::InMemoryDB, 167 | "mdbx" => Backend::MDBX, 168 | _ => { 169 | return Err("Unrecognized backend".into()); 170 | } 171 | }); 172 | } 173 | -------------------------------------------------------------------------------- /asb-profile/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "asb-profile" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | pprof = { version = "0.3", features = ["flamegraph", "protobuf"] } 10 | kvdb = { workspace = true } 11 | asb-options = { workspace = true } 12 | lazy_static = { workspace = true } 13 | tokio = { workspace = true } 14 | authdb = { workspace = true } 15 | num-format = "0.4.0" 16 | simple-process-stats = "0.1.0" -------------------------------------------------------------------------------- /asb-profile/src/counter.rs: -------------------------------------------------------------------------------- 1 | use super::profiler::Profiler; 2 | use asb_options::Options; 3 | use authdb::AuthDB; 4 | 5 | use kvdb::IoStatsKind; 6 | use lazy_static::lazy_static; 7 | use num_format::{Locale, WriteFormatted}; 8 | #[cfg(any(target_os = "linux", target_os = "windows"))] 9 | use simple_process_stats::ProcessStats; 10 | use std::fs; 11 | use std::fs::File; 12 | use std::io::Write; 13 | use std::sync::Mutex; 14 | use std::time::Instant; 15 | use tokio::runtime::Runtime; 16 | 17 | lazy_static! { 18 | pub static ref RUNTIME: Mutex = Mutex::new(Runtime::new().unwrap()); 19 | } 20 | 21 | pub struct Reporter<'a> { 22 | pub start_time: Instant, 23 | total_read_count: usize, 24 | total_write_count: usize, 25 | log_file: Option, 26 | 27 | round_start_time: Instant, 28 | round_start_read_count: usize, 29 | round_start_write_count: usize, 30 | 31 | empty_reads: usize, 32 | 33 | opts: &'a Options, 34 | counter: Box, 35 | } 36 | 37 | impl<'a> Reporter<'a> { 38 | pub fn new(opts: &'a Options) -> Self { 39 | let log_file = if let Some(ref path) = opts.report_dir { 40 | let file = fs::OpenOptions::new() 41 | .create(true) 42 | .append(true) 43 | .open(path.to_string() + "/timing.log") 44 | .unwrap(); 45 | Some(file) 46 | } else { 47 | None 48 | }; 49 | 50 | Reporter { 51 | start_time: Instant::now(), 52 | round_start_time: Instant::now(), 53 | log_file, 54 | opts, 55 | counter: Box::new(Counter::default()), 56 | empty_reads: 0, 57 | total_read_count: 0, 58 | total_write_count: 0, 59 | round_start_read_count: 0, 60 | round_start_write_count: 0, 61 | } 62 | } 63 | 64 | pub fn set_counter(&mut self, counter: Box) { 65 | self.counter = counter; 66 | } 67 | 68 | pub fn start(&mut self) { 69 | self.start_time = Instant::now(); 70 | self.round_start_time = Instant::now(); 71 | self.counter.reset(); 72 | } 73 | 74 | pub fn notify_empty_read(&mut self) { 75 | self.empty_reads += 1; 76 | } 77 | 78 | #[cfg(any(target_os = "linux", target_os = "windows"))] 79 | pub async fn report_mem() { 80 | let process_stats = ProcessStats::get().await.unwrap(); 81 | println!( 82 | "Memory {:>3.3} bytes", 83 | (process_stats.memory_usage_bytes as f64) / ((1 << 30) as f64) 84 | ); 85 | } 86 | 87 | #[cfg(not(any(target_os = "linux", target_os = "windows")))] 88 | pub async fn report_mem() {} 89 | 90 | pub fn notify_epoch( 91 | &mut self, 92 | epoch: usize, 93 | read_count: usize, 94 | write_count: usize, 95 | db: &dyn AuthDB, 96 | opts: &Options, 97 | ) { 98 | fn c(n: u64) -> String { 99 | let mut ans = String::new(); 100 | ans.write_formatted(&n, &Locale::en).unwrap(); 101 | ans 102 | } 103 | 104 | self.total_read_count += read_count; 105 | self.total_write_count += write_count; 106 | 107 | if (epoch + 1) % self.opts.report_epoch != 0 { 108 | return; 109 | } 110 | 111 | let read_count = self.total_read_count - self.round_start_read_count; 112 | let write_count = self.total_write_count - self.round_start_write_count; 113 | 114 | let last = self.round_start_time.elapsed(); 115 | let avg_time = last.as_secs_f64() / (read_count + write_count) as f64; 116 | 117 | let common = format!( 118 | "{:>6?}: {:>7.3?} s > {:>7} ops, {:>7.3?} us/op, {:>5} empty reads >", 119 | epoch + 1, 120 | self.start_time.elapsed().as_secs_f64(), 121 | c((1f64 / avg_time) as u64), 122 | avg_time * 1e6, 123 | self.empty_reads, 124 | ); 125 | 126 | if opts.stat_mem { 127 | RUNTIME.lock().unwrap().block_on(Self::report_mem()); 128 | } 129 | 130 | // let db_stat = { 131 | // let stats = db.backend().io_stats(IoStatsKind::SincePrevious); 132 | // let bytes_per_read = (stats.bytes_read as f64) / (stats.reads as f64); 133 | // let bytes_per_write = (stats.bytes_written as f64) / (stats.writes as f64); 134 | // let cached_rate = (stats.cache_reads as f64) / (stats.reads as f64); 135 | // format!( 136 | // "{} / {} r ({:.0}% cached) {} w, avg bytes {:.2}, {:.2} >", 137 | // c(stats.reads), 138 | // c(stats.cache_reads), 139 | // cached_rate * 100.0, 140 | // c(stats.writes), 141 | // bytes_per_read, 142 | // bytes_per_write, 143 | // ) 144 | // }; 145 | let (stdout, fileout) = { 146 | if let Some(backend) = db.backend() { 147 | let stats = backend.io_stats(IoStatsKind::SincePrevious); 148 | let ra = stats.reads as f64 / (read_count as f64); 149 | let wa = stats.writes as f64 / (write_count as f64); 150 | ( 151 | format!("Read amp {:>6.3}, Write amp {:>6.3} > ", ra, wa), 152 | format!("{},{}", ra, wa), 153 | ) 154 | } else { 155 | ("".into(), "".into()) 156 | } 157 | }; 158 | let customized = self.counter.report(); 159 | println!("{} {} {}", common, stdout, customized); 160 | 161 | if let Some(file) = &mut self.log_file { 162 | let _ = writeln!( 163 | file, 164 | "{},{},{:.3?},{}", 165 | self.opts.settings(), 166 | (epoch + 1) / self.opts.report_epoch, 167 | avg_time * 1e6, 168 | fileout 169 | ); 170 | } 171 | self.empty_reads = 0; 172 | self.round_start_time = Instant::now(); 173 | self.round_start_read_count = self.total_read_count; 174 | self.round_start_write_count = self.total_write_count; 175 | } 176 | 177 | pub fn collect_profiling(&self, profiler: Profiler) { 178 | if self.opts.report_dir.is_none() { 179 | return; 180 | } 181 | 182 | let profile_prefix = self.opts.report_dir.as_ref().unwrap().clone() 183 | + "/" 184 | + &str::replace(&self.opts.settings(), ",", "_"); 185 | profiler.report_to_file(&profile_prefix) 186 | } 187 | } 188 | 189 | pub trait CounterTrait { 190 | fn reset(&mut self) {} 191 | fn report(&mut self) -> String { 192 | "".to_string() 193 | } 194 | } 195 | 196 | #[derive(Default)] 197 | pub struct Counter; 198 | 199 | impl CounterTrait for Counter {} 200 | -------------------------------------------------------------------------------- /asb-profile/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod counter; 2 | mod profiler; 3 | 4 | pub use counter::{Counter, CounterTrait, Reporter}; 5 | pub use profiler::Profiler; 6 | -------------------------------------------------------------------------------- /asb-profile/src/profiler.rs: -------------------------------------------------------------------------------- 1 | use pprof::{protos::Message, ProfilerGuard, Report}; 2 | use std::fs::File; 3 | use std::io::Write; 4 | 5 | pub struct Profiler { 6 | inner: Option>, 7 | frequency: i32, 8 | reports: Vec, 9 | } 10 | 11 | impl Profiler { 12 | pub fn new(frequency: i32) -> Self { 13 | if frequency > 0 { 14 | Self { 15 | inner: Some(pprof::ProfilerGuard::new(frequency).unwrap()), 16 | frequency, 17 | reports: Vec::new(), 18 | } 19 | } else { 20 | Self { 21 | inner: None, 22 | frequency, 23 | reports: Vec::new(), 24 | } 25 | } 26 | } 27 | 28 | pub fn tick(&mut self) { 29 | if self.frequency <= 0 { 30 | return; 31 | } 32 | let profiler = std::mem::take(&mut self.inner).unwrap(); 33 | let report = profiler.report().build().unwrap(); 34 | self.reports.push(report); 35 | std::mem::drop(profiler); 36 | self.inner = Some(pprof::ProfilerGuard::new(self.frequency).unwrap()) 37 | } 38 | 39 | pub fn report_to_file(self, prefix: &str) { 40 | if self.frequency <= 0 { 41 | return; 42 | } 43 | print!("Writing profiles... "); 44 | 45 | for (index, report) in self.reports.into_iter().enumerate() { 46 | let path = format!("{}_{:02}.pb", prefix, index); 47 | let mut file = File::create(path).unwrap(); 48 | let profile = report.pprof().unwrap(); 49 | 50 | let mut content = Vec::new(); 51 | profile.encode(&mut content).unwrap(); 52 | file.write_all(&content).unwrap(); 53 | } 54 | println!("Done"); 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /asb-tasks/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "asb-tasks" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | asb-options = { workspace = true } 10 | 11 | keccak-hash = "0.5.1" 12 | crc64fast = "1.0.0" 13 | rand = "0.7" 14 | rand_pcg = "0.2" 15 | serde = "1.0.149" 16 | postcard = "1.0.2" -------------------------------------------------------------------------------- /asb-tasks/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod read_then_write; 2 | pub mod real_trace; 3 | 4 | use asb_options::Options; 5 | use std::sync::Arc; 6 | 7 | pub use read_then_write::ReadThenWrite; 8 | pub use real_trace::RealTrace; 9 | 10 | type Key = Vec; 11 | type Value = Vec; 12 | 13 | pub fn tasks(opts: &Options) -> Arc { 14 | if opts.real_trace { 15 | Arc::new(RealTrace::new(&opts, opts.warmup_from.is_none())) 16 | } else { 17 | Arc::new(ReadThenWrite::::new(&opts)) 18 | } 19 | } 20 | 21 | pub trait TaskTrait { 22 | fn warmup<'a>(&'a self) -> Box + 'a> { 23 | Box::new(NoopIter) 24 | } 25 | fn tasks<'a>(&'a self) -> Box + 'a>; 26 | } 27 | 28 | pub enum Event { 29 | Read(Key), 30 | Write(Key, Value), 31 | } 32 | 33 | pub struct Events(pub Vec); 34 | 35 | fn hash(input: &[u8]) -> [u8; 32] { 36 | keccak_hash::keccak(input).0 37 | } 38 | 39 | pub struct NoopIter; 40 | 41 | impl Iterator for NoopIter { 42 | type Item = Events; 43 | 44 | fn next(&mut self) -> Option { 45 | None 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /asb-tasks/src/read_then_write.rs: -------------------------------------------------------------------------------- 1 | use super::*; 2 | use asb_options::Options; 3 | use rand::prelude::*; 4 | use std::{ 5 | marker::PhantomData, 6 | sync::mpsc::{sync_channel, Receiver}, 7 | time::Duration, 8 | }; 9 | 10 | pub struct ReadThenWrite { 11 | pub total_keys: usize, 12 | pub batch_size: usize, 13 | pub seed: u64, 14 | _phantom: PhantomData, 15 | } 16 | 17 | impl Clone for ReadThenWrite { 18 | fn clone(&self) -> Self { 19 | Self { 20 | total_keys: self.total_keys.clone(), 21 | batch_size: self.batch_size.clone(), 22 | seed: self.seed.clone(), 23 | _phantom: PhantomData, 24 | } 25 | } 26 | } 27 | 28 | impl ReadThenWrite { 29 | pub fn new(opts: &Options) -> Self { 30 | Self { 31 | total_keys: opts.total_keys, 32 | batch_size: opts.epoch_size, 33 | seed: opts.seed, 34 | _phantom: PhantomData, 35 | } 36 | } 37 | } 38 | 39 | pub struct ReadThenWriteTaskGenerator { 40 | receiver: Receiver, 41 | } 42 | 43 | impl ReadThenWriteTaskGenerator { 44 | fn new(params: ReadThenWrite) -> Self { 45 | let (sender, receiver) = sync_channel(10); 46 | 47 | std::thread::spawn(move || { 48 | let mut random = R::seed_from_u64(params.seed + 1); 49 | loop { 50 | let mut events = Vec::with_capacity(params.batch_size * 2); 51 | for _ in 0..params.batch_size { 52 | let integer = random.gen_range(0, params.total_keys); 53 | let key = hash(&integer.to_be_bytes()).to_vec(); 54 | events.push(Event::Read(key.clone())); 55 | events.push(Event::Write(key.clone(), random.gen::<[u8; 32]>().to_vec())); 56 | } 57 | let res = sender.send(Events(events)); 58 | if res.is_err() { 59 | return; 60 | } 61 | } 62 | }); 63 | 64 | Self { receiver } 65 | } 66 | } 67 | 68 | impl Iterator for ReadThenWriteTaskGenerator { 69 | type Item = Events; 70 | 71 | fn next(&mut self) -> Option { 72 | let task = self.receiver.recv_timeout(Duration::from_secs(1)).unwrap(); 73 | Some(task) 74 | } 75 | } 76 | 77 | pub struct ReadThenWriteWarmupIter<'a, R: Rng + SeedableRng> { 78 | inner: &'a ReadThenWrite, 79 | random: R, 80 | keys: Vec, 81 | } 82 | 83 | impl Iterator for ReadThenWriteWarmupIter<'_, R> { 84 | type Item = Events; 85 | 86 | fn next(&mut self) -> Option { 87 | let mut task_keys = Vec::with_capacity(self.inner.batch_size); 88 | for _ in 0..self.inner.batch_size { 89 | if let Some(v) = self.keys.pop() { 90 | task_keys.push(v); 91 | } else { 92 | break; 93 | } 94 | } 95 | if task_keys.is_empty() { 96 | return None; 97 | } 98 | let mut events = Vec::with_capacity(task_keys.len()); 99 | for key in task_keys.into_iter() { 100 | let key = hash(&key.to_be_bytes()).to_vec(); 101 | events.push(Event::Write( 102 | key.clone(), 103 | self.random.gen::<[u8; 32]>().to_vec(), 104 | )); 105 | } 106 | Some(Events(events)) 107 | } 108 | } 109 | 110 | impl TaskTrait for ReadThenWrite { 111 | fn warmup<'a>(&'a self) -> Box + 'a> { 112 | let mut random = R::seed_from_u64(self.seed + 1); 113 | let mut keys: Vec = (0..self.total_keys).collect(); 114 | keys.shuffle(&mut random); 115 | Box::new(ReadThenWriteWarmupIter { 116 | inner: &self, 117 | random, 118 | keys, 119 | }) 120 | } 121 | 122 | fn tasks(&self) -> Box> { 123 | Box::new(ReadThenWriteTaskGenerator::new(self.clone())) 124 | } 125 | } 126 | -------------------------------------------------------------------------------- /asb-tasks/src/real_trace.rs: -------------------------------------------------------------------------------- 1 | #![allow(unused)] 2 | use std::{ 3 | collections::VecDeque, fs, path::Path, sync::mpsc::sync_channel, thread::Thread, time::Duration, 4 | }; 5 | 6 | use postcard::from_bytes; 7 | use serde::{Deserialize, Serialize}; 8 | use std::sync::mpsc::{channel, Receiver, Sender}; 9 | 10 | use asb_options::{AuthAlgo, Options}; 11 | 12 | use super::{Event, Events, TaskTrait}; 13 | 14 | #[derive(Clone, Debug, Serialize, Deserialize)] 15 | enum ExperimentTask { 16 | Read([u8; 32]), 17 | Write([u8; 32], Bytes), 18 | } 19 | 20 | type Bytes = Vec; 21 | type InitTasks = Vec<([u8; 32], Bytes)>; 22 | type BlockTask = VecDeque; 23 | 24 | fn read_from_file>(path: S) -> T 25 | where 26 | for<'a> T: Deserialize<'a>, 27 | { 28 | let loaded = std::fs::read(path.as_ref()).unwrap(); 29 | from_bytes(&loaded).unwrap() 30 | } 31 | 32 | pub struct TaskProducer { 33 | receiver: Receiver>, 34 | group_size: usize, 35 | events: Vec, 36 | } 37 | 38 | impl TaskProducer { 39 | fn new(path: String, group_size: usize) -> Self { 40 | let (sender, receiver) = sync_channel(1); 41 | std::thread::spawn(move || { 42 | let path = Path::new(&path); 43 | let mut idx = 0usize; 44 | let mut next_file = path.join(format!("real_trace.{}.data", idx)); 45 | while fs::metadata(&next_file).is_ok() { 46 | let loaded = std::fs::read(next_file).unwrap(); 47 | let block_group: Vec> = from_bytes(&loaded).unwrap(); 48 | let events = block_group 49 | .into_iter() 50 | .map(|block| { 51 | Events( 52 | block 53 | .into_iter() 54 | .map(|io| match io { 55 | ExperimentTask::Read(key) => Event::Read(key.to_vec()), 56 | ExperimentTask::Write(key, value) => { 57 | Event::Write(key.to_vec(), value.clone()) 58 | } 59 | }) 60 | .collect::>(), 61 | ) 62 | }) 63 | .collect(); 64 | sender.send(events); 65 | 66 | idx += 1; 67 | next_file = path.join(format!("real_trace.{}.data", idx)); 68 | } 69 | }); 70 | Self { 71 | receiver, 72 | events: vec![], 73 | group_size, 74 | } 75 | } 76 | 77 | fn pick_next(&mut self) -> Option { 78 | if self.events.is_empty() { 79 | match self.receiver.recv_timeout(Duration::from_secs(1)) { 80 | Ok(events) => { 81 | self.events = events; 82 | } 83 | Err(std::sync::mpsc::RecvTimeoutError::Timeout) => { 84 | panic!("Load data timeout"); 85 | } 86 | Err(std::sync::mpsc::RecvTimeoutError::Disconnected) => return None, 87 | } 88 | } 89 | 90 | Some(self.events.pop().unwrap()) 91 | } 92 | } 93 | 94 | impl Iterator for TaskProducer { 95 | type Item = Events; 96 | fn next(&mut self) -> Option { 97 | if self.group_size == 1 { 98 | self.pick_next() 99 | } else { 100 | let mut grouped_events = Vec::with_capacity(100_000); 101 | for i in 0..self.group_size { 102 | if let Some(events) = self.pick_next() { 103 | grouped_events.extend(events.0); 104 | } 105 | } 106 | if grouped_events.is_empty() { 107 | None 108 | } else { 109 | Some(Events(grouped_events)) 110 | } 111 | } 112 | } 113 | } 114 | 115 | pub struct RealTrace { 116 | path: String, 117 | init_tasks: Option, 118 | group_size: usize, 119 | } 120 | 121 | impl RealTrace { 122 | pub fn new(opt: &Options, load_warmup: bool) -> Self { 123 | RealTrace { 124 | path: opt.trace_dir.clone(), 125 | init_tasks: if load_warmup { 126 | Some(read_from_file( 127 | &Path::new(&opt.trace_dir).join(format!("real_trace.init")), 128 | )) 129 | } else { 130 | None 131 | }, 132 | group_size: if opt.algorithm == AuthAlgo::RAIN || opt.algorithm == AuthAlgo::MPT { 133 | 50 134 | } else { 135 | 1 136 | }, 137 | } 138 | } 139 | } 140 | 141 | impl TaskTrait for RealTrace { 142 | fn tasks(&self) -> Box> { 143 | Box::new(TaskProducer::new(self.path.clone(), self.group_size)) 144 | } 145 | 146 | fn warmup<'a>(&'a self) -> Box + 'a> { 147 | Box::new(self.init_tasks.as_ref().unwrap().chunks(1000).map(|arr| { 148 | Events( 149 | arr.iter() 150 | .map(|(key, value)| Event::Write(key.to_vec(), value.clone())) 151 | .collect::>(), 152 | ) 153 | })) 154 | } 155 | } 156 | -------------------------------------------------------------------------------- /benchmarks/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "asb-main" 3 | version = "0.1.0" 4 | authors = ["Chenxing Li "] 5 | edition = "2018" 6 | 7 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 8 | 9 | [dependencies] 10 | 11 | asb-options = { workspace = true } 12 | asb-backend = { workspace = true } 13 | asb-authdb = { workspace = true } 14 | asb-profile = { workspace = true } 15 | asb-tasks = { workspace = true } 16 | 17 | authdb = { workspace = true } 18 | kvdb = { workspace = true } 19 | 20 | fs_extra = "1.2.0" 21 | -------------------------------------------------------------------------------- /benchmarks/src/main.rs: -------------------------------------------------------------------------------- 1 | use fs_extra::dir::CopyOptions; 2 | use std::fs; 3 | 4 | mod run; 5 | 6 | use asb_options::{AuthAlgo, Backend, Options, StructOpt}; 7 | use run::run_tasks; 8 | 9 | fn main() { 10 | let options: Options = Options::from_args(); 11 | if options.stat_mem && !options.no_stat { 12 | panic!("Stat will introduce memory cost") 13 | } 14 | if options.algorithm == AuthAlgo::LMPTS && options.backend != Backend::RocksDB { 15 | panic!("LMPTs can not change backend") 16 | } 17 | println!( 18 | "Testing {:?} with {}", 19 | options.algorithm, 20 | if options.real_trace { 21 | "real trace".into() 22 | } else { 23 | format!("{:e} addresses", options.total_keys) 24 | } 25 | ); 26 | 27 | let db_dir = &options.db_dir; 28 | let _ = fs::remove_dir_all(db_dir); 29 | fs::create_dir_all(db_dir).unwrap(); 30 | 31 | if let Some(ref warmup_dir) = options.warmup_from() { 32 | println!("warmup from {}", warmup_dir); 33 | let mut options = CopyOptions::new(); 34 | options.content_only = true; 35 | fs_extra::dir::copy(warmup_dir, db_dir, &options).unwrap(); 36 | } 37 | 38 | if let Some(ref dir) = options.report_dir { 39 | fs::create_dir_all(dir).unwrap() 40 | } 41 | 42 | let tasks = asb_tasks::tasks(&options); 43 | let backend = asb_backend::backend(&options); 44 | let (db, reporter) = asb_authdb::new(backend, &options); 45 | run_tasks(db, tasks, reporter, &options); 46 | } 47 | -------------------------------------------------------------------------------- /benchmarks/src/run.rs: -------------------------------------------------------------------------------- 1 | use asb_options::Options; 2 | use asb_profile::{Profiler, Reporter}; 3 | use asb_tasks::{Event, Events, TaskTrait}; 4 | use authdb::AuthDB; 5 | use fs_extra::dir::CopyOptions; 6 | use kvdb::IoStatsKind; 7 | use std::fs; 8 | use std::sync::Arc; 9 | use std::thread::sleep; 10 | use std::time::{Duration, Instant}; 11 | 12 | fn warmup(db: &mut dyn AuthDB, tasks: Box + '_>, opts: &Options) { 13 | let time = Instant::now(); 14 | 15 | for (epoch, events) in tasks.enumerate() { 16 | for event in events.0.into_iter() { 17 | if let Event::Write(key, value) = event { 18 | db.set(key, value); 19 | } 20 | } 21 | db.commit(epoch); 22 | if (epoch + 1) % opts.report_epoch == 0 { 23 | println!( 24 | "Time {:>7.3?}s, Warming up epoch: {:>5}", 25 | time.elapsed().as_secs_f64(), 26 | epoch + 1 27 | ); 28 | } 29 | } 30 | 31 | db.flush_all(); 32 | if let Some(backend) = db.backend() { 33 | backend.io_stats(IoStatsKind::SincePrevious); 34 | } 35 | } 36 | 37 | pub fn run_tasks( 38 | mut db: Box, 39 | // _backend_any: Arc, 40 | tasks: Arc, 41 | mut reporter: Reporter, 42 | opts: &Options, 43 | ) { 44 | println!("Start warming up"); 45 | if opts.warmup_from.is_none() && !opts.no_warmup { 46 | warmup(&mut *db, tasks.warmup(), opts); 47 | if let Some(ref warmup_dir) = opts.warmup_to() { 48 | println!("Waiting for post ops"); 49 | 50 | sleep(Duration::from_secs_f64(f64::max( 51 | 1.0, 52 | opts.total_keys as f64 / 1e6, 53 | ))); 54 | 55 | let _ = fs::remove_dir_all(warmup_dir); 56 | fs::create_dir_all(warmup_dir).unwrap(); 57 | 58 | let mut copy_options = CopyOptions::new(); 59 | copy_options.overwrite = true; 60 | copy_options.copy_inside = true; 61 | copy_options.content_only = true; 62 | println!("Writing warmup to {}", warmup_dir); 63 | let mut retry_cnt = 0usize; 64 | while retry_cnt < 10 { 65 | if let Err(e) = fs_extra::dir::copy(&opts.db_dir, warmup_dir, ©_options) { 66 | println!("Fail to save warmup file {:?}. Retry...", e); 67 | retry_cnt += 1; 68 | } else { 69 | println!("Writing done"); 70 | return; 71 | } 72 | } 73 | 74 | panic!("Retry limit exceeds!"); 75 | } 76 | } 77 | println!("Warm up done"); 78 | 79 | let frequency = if opts.report_dir.is_none() { -1 } else { 250 }; 80 | let mut profiler = Profiler::new(frequency); 81 | reporter.start(); 82 | 83 | for (epoch, events) in tasks.tasks().enumerate() { 84 | if reporter.start_time.elapsed().as_secs() >= opts.max_time.unwrap_or(u64::MAX) 85 | || epoch + 1 >= opts.max_epoch.unwrap_or(usize::MAX) 86 | { 87 | profiler.tick(); 88 | break; 89 | } 90 | 91 | if (epoch + 1) % opts.profile_epoch == 0 { 92 | profiler.tick(); 93 | } 94 | 95 | let mut read_count = 0; 96 | let mut write_count = 0; 97 | 98 | for event in events.0.into_iter() { 99 | match event { 100 | Event::Read(key) => { 101 | read_count += 1; 102 | let ans = db.get(key); 103 | if ans.is_none() { 104 | reporter.notify_empty_read(); 105 | } 106 | } 107 | Event::Write(key, value) => { 108 | write_count += 1; 109 | db.set(key, value); 110 | } 111 | } 112 | } 113 | db.commit(epoch); 114 | 115 | reporter.notify_epoch(epoch, read_count, write_count, &*db, opts); 116 | } 117 | 118 | reporter.collect_profiling(profiler); 119 | } 120 | -------------------------------------------------------------------------------- /run.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import subprocess 3 | import sys 4 | from functools import partial 5 | import numpy as np 6 | 7 | CARGO_RUN = "cargo run --release --".split(" ") 8 | DRY_RUN = False 9 | WARMUP = "./warmup/v4" 10 | RESULT = "./paper_experiment/osdi23" 11 | # CGRUN_PREFIX = "cgrun" 12 | 13 | 14 | def to_amt_size(key): 15 | if key == "fresh": 16 | return 1e8 17 | if key == "real": 18 | return 2e6 19 | elif key[-1].lower() in "kmg": 20 | exp = 10 ** ("kmg".index(key[-1].lower()) * 3 + 3) 21 | base = float(key[:-1]) * exp 22 | else: 23 | base = float(key) 24 | return int(np.ceil(np.log2(base * 5))) 25 | 26 | 27 | def run(commands, output=None): 28 | if type(commands) is str: 29 | commands = commands.split(" ") 30 | 31 | if output is None: 32 | message = " ".join(commands) 33 | else: 34 | message = " ".join(commands) + f" > {output}" 35 | 36 | if DRY_RUN: 37 | print(message) 38 | return 39 | 40 | print("") 41 | print(f">>>>>>>>>>> {message}") 42 | sys.stdout.flush() 43 | 44 | if output is not None: 45 | output = open(output, "w") 46 | 47 | subprocess.run(commands, stdout=output) 48 | print(f"<<<<<<<<<<< done") 49 | sys.stdout.flush() 50 | 51 | 52 | def warmup(alg, key, shards=None): 53 | if key == "fresh": 54 | return 55 | 56 | if alg == "amt": 57 | amt_size = to_amt_size(key) 58 | if amt_size > 26: 59 | return 60 | alg = alg + f"{amt_size:d}" 61 | 62 | prefix = CARGO_RUN + ["--no-stat", "--warmup-to", WARMUP] 63 | run("rm -rf __benchmarks") 64 | 65 | if key == "real": 66 | prefix = prefix + ["--real-trace"] 67 | else: 68 | prefix = prefix + f"-k {key}".split(" ") 69 | 70 | if shards is None: 71 | run(prefix + f"-a {alg}".split(" ")) 72 | else: 73 | run(prefix + f"-a {alg} --shards {shards}".split(" ")) 74 | 75 | 76 | def bench(task, alg, key, shards=None): 77 | if alg == "amt": 78 | amt_size = to_amt_size(key) 79 | if amt_size > 26: 80 | return 81 | alg = alg + f"{amt_size:d}" 82 | 83 | prefix = CARGO_RUN + f"--max-time 5400 -a {alg}".split(" ") 84 | 85 | if task == "time": 86 | prefix = prefix + ["--no-stat"] 87 | if "CGRUN_PREFIX" in globals(): 88 | prefix = globals()["CGRUN_PREFIX"].split(" ") + prefix 89 | run("sudo sysctl -w vm.drop_caches=3") 90 | else: 91 | pass 92 | 93 | if key != "real": 94 | prefix = prefix + "--max-epoch 200".split(" ") 95 | 96 | if key == "fresh": 97 | prefix = prefix + ["--no-warmup"] 98 | else: 99 | prefix = prefix + f"--warmup-from {WARMUP}".split(" ") 100 | 101 | if key == "fresh": 102 | prefix = prefix + f"-k 10g".split(" ") 103 | elif key == "real": 104 | if alg in ["rain", "mpt"]: 105 | prefix = prefix + f"--real-trace --report-epoch 1".split(" ") 106 | else: 107 | prefix = prefix + f"--real-trace --report-epoch 25".split(" ") 108 | else: 109 | prefix = prefix + f"-k {key}".split(" ") 110 | 111 | if task == "stat": 112 | prefix = prefix + "--cache-size 8192".split(" ") 113 | elif alg in ["raw", "mpt"]: 114 | prefix = prefix + "--cache-size 4096".split(" ") 115 | else: 116 | prefix = prefix + "--cache-size 2048".split(" ") 117 | 118 | run("rm -rf __benchmarks") 119 | 120 | 121 | if shards is None: 122 | output = f"{RESULT}/{task}_{alg}_{key}.log" 123 | run(prefix, output) 124 | else: 125 | output = f"{RESULT}/{task}_{alg}{shards}_{key}.log" 126 | run(prefix + f"--shards {shards}".split(" "), output) 127 | 128 | 129 | bench_time = partial(bench, "time") 130 | bench_stat = partial(bench, "stat") 131 | 132 | 133 | def warmup_all(): 134 | for key in ["real", "1m", "1600k", "2500k", "4m", "6300k", "10m", "16m", "25m", "40m", "63m", "100m"]: 135 | warmup("raw", key) 136 | warmup("lvmt", key) 137 | warmup("rain", key) 138 | warmup("mpt", key) 139 | for shards in [64, 16, 1]: 140 | if shards == 1 and key in ["real", "16m", "25m", "40m", "63m", "100m"]: 141 | continue 142 | warmup("lvmt", key, shards) 143 | 144 | def bench_all_time(): 145 | for key in ["fresh", "real", "1m", "1600k", "2500k", "4m", "6300k", "10m", "16m", "25m", "40m", "63m", "100m"]: 146 | bench_time("raw", key) 147 | bench_time("lvmt", key) 148 | bench_time("rain", key) 149 | bench_time("mpt", key) 150 | for shards in [64, 16, 1]: 151 | if shards == 1 and key in ["real", "16m", "25m", "40m", "63m", "100m"]: 152 | continue 153 | bench_time("lvmt", key, shards) 154 | 155 | def bench_all_stat(): 156 | for key in ["fresh", "real", "1m", "10m", "100m"]: 157 | bench_stat("raw", key) 158 | bench_stat("lvmt", key) 159 | bench_stat("rain", key) 160 | bench_stat("mpt", key) 161 | for shards in [64, 16]: 162 | bench_stat("lvmt", key, shards) 163 | 164 | 165 | 166 | run("rm -rf __reports __benchmarks") 167 | run(f"mkdir -p {WARMUP}") 168 | run(f"mkdir -p {RESULT}") 169 | 170 | warmup_all() 171 | bench_all_time() 172 | bench_all_stat() 173 | -------------------------------------------------------------------------------- /rust-toolchain: -------------------------------------------------------------------------------- 1 | 1.67.0 2 | --------------------------------------------------------------------------------