├── .gitignore ├── .travis.yml ├── Cargo.toml ├── LICENSE ├── README.md ├── appveyor.yml ├── art ├── icon.svg ├── logo.svg └── logo_v2.svg ├── ledb-actix ├── Cargo.toml ├── README.md ├── examples │ ├── client.rc │ ├── server.rs │ └── simple.rs └── src │ ├── actor.rs │ ├── extra.rs │ ├── lib.rs │ ├── macros.rs │ └── scope.rs ├── ledb-derive ├── Cargo.toml ├── README.md └── src │ ├── document.rs │ ├── lib.rs │ └── wrapper.rs ├── ledb-node ├── .gitignore ├── .npmignore ├── LICENSE ├── README.md ├── index.d.ts ├── native │ ├── Cargo.toml │ ├── build.rs │ └── src │ │ ├── collection.rs │ │ ├── documents.rs │ │ ├── helper.rs │ │ ├── lib.rs │ │ ├── refine.rs │ │ └── storage.rs ├── package.json ├── test │ └── index.ts └── tsconfig.json ├── ledb-types ├── Cargo.toml ├── README.md └── src │ ├── document.rs │ ├── identifier.rs │ ├── index.rs │ └── lib.rs └── ledb ├── Cargo.toml ├── README.md └── src ├── collection.rs ├── document.rs ├── enumerate.rs ├── error.rs ├── filter.rs ├── float.rs ├── index.rs ├── lib.rs ├── macros.rs ├── modify.rs ├── pool.rs ├── selection.rs ├── storage.rs ├── test.rs └── value.rs /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | /target/ 4 | 5 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 6 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 7 | Cargo.lock 8 | 9 | # These are backup files generated by rustfmt 10 | **/*.rs.bk 11 | 12 | # Temporary test databases 13 | test_db/ 14 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: rust 2 | rust: 3 | - stable 4 | - beta 5 | - nightly 6 | jobs: 7 | allow_failures: 8 | - rust: nightly 9 | fast_finish: true 10 | env: 11 | matrix: 12 | - TRAVIS_NODE_VERSION="10" 13 | - TRAVIS_NODE_VERSION="14" 14 | before_install: 15 | - source $HOME/.nvm/nvm.sh 16 | - nvm install ${TRAVIS_NODE_VERSION} 17 | - nvm use ${TRAVIS_NODE_VERSION} 18 | script: 19 | - cargo test --verbose --all 20 | - cd ledb-node && npm install && npm test 21 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | 3 | members = [ 4 | "ledb-types", 5 | "ledb-derive", 6 | "ledb", 7 | "ledb-actix", 8 | ] 9 | 10 | exclude = [ 11 | "ledb-node/native", 12 | ] 13 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Kayo 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Lightweight embedded database 2 | 3 | [![License: MIT](https://img.shields.io/badge/License-MIT-brightgreen.svg)](https://opensource.org/licenses/MIT) 4 | [![Travis-CI Build Status](https://travis-ci.org/katyo/ledb.svg?branch=master)](https://travis-ci.org/katyo/ledb) 5 | [![Appveyor Build status](https://ci.appveyor.com/api/projects/status/1wrmhivii22emfxg)](https://ci.appveyor.com/project/katyo/ledb) 6 | [![Crates.io Package](https://img.shields.io/crates/v/ledb.svg?style=popout)](https://crates.io/crates/ledb) 7 | [![Docs.rs API Documentation](https://docs.rs/ledb/badge.svg)](https://docs.rs/ledb) 8 | 9 | The **LEDB** is an attempt to implement simple but efficient, lightweight but powerful document storage. 10 | 11 | The abbreviation *LEDB* may be treated as an Lightweight Embedded DB, also Low End DB, also Literium Engine DB, also LitE DB, and so on. 12 | 13 | ## Documents storage library (`ledb` crate) 14 | 15 | This is a basic library which implements document storage and query functionality. 16 | 17 | See [README](ledb/README.md). 18 | 19 | ## Basic types for storable documents (`ledb-types` crate) 20 | 21 | This crate defines basic types and traits which can be used to turn structs into storable documents. 22 | 23 | See [README](ledb-types/README.md). 24 | 25 | ## Derive macro for storable documents (`ledb-derive` crate) 26 | 27 | The crate implements derive macros which helps defining storable documents. 28 | 29 | See [README](ledb-derive/README.md). 30 | 31 | ## Actor and REST-interface for documents storage (`ledb-actix` crate) 32 | 33 | This is an actor which helps interacting with database in applications which builts on the [actix](https://actix.rs/) actor framework. 34 | 35 | See [README](ledb-actix/README.md). 36 | -------------------------------------------------------------------------------- /appveyor.yml: -------------------------------------------------------------------------------- 1 | # Appveyor configuration template for Rust using rustup for Rust installation 2 | # https://github.com/starkat99/appveyor-rust 3 | 4 | ## Operating System (VM environment) ## 5 | 6 | # Rust needs at least Visual Studio 2013 Appveyor OS for MSVC targets. 7 | os: Visual Studio 2015 8 | 9 | ## Build Matrix ## 10 | 11 | # This configuration will setup a build for each channel & target combination (12 windows 12 | # combinations in all). 13 | # 14 | # There are 3 channels: stable, beta, and nightly. 15 | # 16 | # Alternatively, the full version may be specified for the channel to build using that specific 17 | # version (e.g. channel: 1.5.0) 18 | # 19 | # The values for target are the set of windows Rust build targets. Each value is of the form 20 | # 21 | # ARCH-pc-windows-TOOLCHAIN 22 | # 23 | # Where ARCH is the target architecture, either x86_64 or i686, and TOOLCHAIN is the linker 24 | # toolchain to use, either msvc or gnu. See https://www.rust-lang.org/downloads.html#win-foot for 25 | # a description of the toolchain differences. 26 | # See https://github.com/rust-lang-nursery/rustup.rs/#toolchain-specification for description of 27 | # toolchains and host triples. 28 | # 29 | # Comment out channel/target combos you do not wish to build in CI. 30 | # 31 | # You may use the `cargoflags` and `RUSTFLAGS` variables to set additional flags for cargo commands 32 | # and rustc, respectively. For instance, you can uncomment the cargoflags lines in the nightly 33 | # channels to enable unstable features when building for nightly. Or you could add additional 34 | # matrix entries to test different combinations of features. 35 | environment: 36 | matrix: 37 | 38 | ### MSVC Toolchains ### 39 | 40 | # Stable 64-bit MSVC 41 | - channel: stable 42 | target: x86_64-pc-windows-msvc 43 | # Stable 32-bit MSVC 44 | - channel: stable 45 | target: i686-pc-windows-msvc 46 | # Beta 64-bit MSVC 47 | - channel: beta 48 | target: x86_64-pc-windows-msvc 49 | # Beta 32-bit MSVC 50 | - channel: beta 51 | target: i686-pc-windows-msvc 52 | # Nightly 64-bit MSVC 53 | - channel: nightly 54 | target: x86_64-pc-windows-msvc 55 | #cargoflags: --features "unstable" 56 | # Nightly 32-bit MSVC 57 | - channel: nightly 58 | target: i686-pc-windows-msvc 59 | #cargoflags: --features "unstable" 60 | 61 | ### GNU Toolchains ### 62 | 63 | # Stable 64-bit GNU 64 | - channel: stable 65 | target: x86_64-pc-windows-gnu 66 | bits: 64 67 | # Stable 32-bit GNU 68 | - channel: stable 69 | target: i686-pc-windows-gnu 70 | bits: 32 71 | # Beta 64-bit GNU 72 | - channel: beta 73 | target: x86_64-pc-windows-gnu 74 | bits: 64 75 | # Beta 32-bit GNU 76 | - channel: beta 77 | target: i686-pc-windows-gnu 78 | bits: 32 79 | # Nightly 64-bit GNU 80 | - channel: nightly 81 | target: x86_64-pc-windows-gnu 82 | bits: 64 83 | #cargoflags: --features "unstable" 84 | # Nightly 32-bit GNU 85 | - channel: nightly 86 | target: i686-pc-windows-gnu 87 | bits: 32 88 | #cargoflags: --features "unstable" 89 | 90 | ### Allowed failures ### 91 | 92 | # See Appveyor documentation for specific details. In short, place any channel or targets you wish 93 | # to allow build failures on (usually nightly at least is a wise choice). This will prevent a build 94 | # or test failure in the matching channels/targets from failing the entire build. 95 | matrix: 96 | allow_failures: 97 | - channel: nightly 98 | 99 | # If you only care about stable channel build failures, uncomment the following line: 100 | #- channel: beta 101 | 102 | ## Install Script ## 103 | 104 | # This is the most important part of the Appveyor configuration. This installs the version of Rust 105 | # specified by the 'channel' and 'target' environment variables from the build matrix. This uses 106 | # rustup to install Rust. 107 | # 108 | # For simple configurations, instead of using the build matrix, you can simply set the 109 | # default-toolchain and default-host manually here. 110 | install: 111 | - appveyor DownloadFile https://win.rustup.rs/ -FileName rustup-init.exe 112 | - rustup-init -yv --default-toolchain %channel% --default-host %target% 113 | - set PATH=%PATH%;%USERPROFILE%\.cargo\bin;C:\msys64\mingw%bits%\bin;C:\msys64\usr\bin 114 | # Fix undefined references to __acrt_iob_func() per 115 | # https://github.com/nabijaczleweli/cargo-update/issues/74#issuecomment-376581009 116 | - sed -rie "s/#define std([[:alpha:]]+)[[:space:]]+\(__acrt_iob_func\(([[:digit:]]+)\)\)/#define std\1 (\&__iob_func()[\2])/" "C:\msys64\mingw64\x86_64-w64-mingw32\include\stdio.h" 117 | - sed -rie "s/#define std([[:alpha:]]+)[[:space:]]+\(__acrt_iob_func\(([[:digit:]]+)\)\)/#define std\1 (\&__iob_func()[\2])/" "C:\msys64\mingw32\i686-w64-mingw32\include\stdio.h" 118 | - rustc -vV 119 | - cargo -vV 120 | 121 | ## Build Script ## 122 | 123 | # 'cargo test' takes care of building for us, so disable Appveyor's build stage. This prevents 124 | # the "directory does not contain a project or solution file" error. 125 | build: false 126 | 127 | # Uses 'cargo test' to run tests and build. Alternatively, the project may call compiled programs 128 | #directly or perform other testing commands. Rust will automatically be placed in the PATH 129 | # environment variable. 130 | test_script: 131 | - cargo test --verbose --all --exclude ledb-node %cargoflags% 132 | -------------------------------------------------------------------------------- /art/icon.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 19 | 21 | 40 | 43 | 44 | 46 | 47 | 49 | image/svg+xml 50 | 52 | 53 | 54 | 55 | 56 | 61 | 66 | L 78 | E 90 | 91 | 92 | -------------------------------------------------------------------------------- /art/logo.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 19 | 21 | 40 | 43 | 44 | 46 | 47 | 49 | image/svg+xml 50 | 52 | 53 | 54 | 55 | 56 | 61 | 66 | LEDB 77 | 83 | Low End 94 | 95 | 96 | -------------------------------------------------------------------------------- /art/logo_v2.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 19 | 21 | 40 | 43 | 44 | 46 | 47 | 49 | image/svg+xml 50 | 52 | 53 | 54 | 55 | 56 | 61 | 66 | LE 77 | 83 | Low End 94 | Database 105 | DB 116 | 117 | 118 | -------------------------------------------------------------------------------- /ledb-actix/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "ledb-actix" 3 | version = "0.4.0" 4 | authors = ["Kayo "] 5 | license = "MIT" 6 | readme = "README.md" 7 | repository = "https://github.com/katyo/ledb" 8 | homepage = "https://github.com/katyo/ledb/tree/master/ledb-actix" 9 | keywords = ["actor", "rest", "storage", "json", "lmdb"] 10 | categories = ["database", "web-programming", "asynchronous"] 11 | description = "LEDB Actor for Actix actor framework and storage REST-interface" 12 | edition = "2018" 13 | 14 | [badges] 15 | travis-ci = { repository = "katyo/ledb" } 16 | appveyor = { repository = "katyo/ledb" } 17 | 18 | [[example]] 19 | name = "simple" 20 | 21 | [[example]] 22 | name = "server" 23 | required-features = ["web"] 24 | 25 | [dependencies] 26 | serde = { version = "^1", features = ["derive"] } 27 | serde_with = { version = "^0.2", optional = true, features = ["json"] } 28 | ledb = { version = "0.4", path = "../ledb", features = ["derive"] } 29 | ledb-types = { version = "0.4", path = "../ledb-types" } 30 | futures = "^0.3" 31 | actix = "^0.9" 32 | actix-web = { version = "^2", optional = true } 33 | 34 | [dev-dependencies] 35 | serde_json = "^1" 36 | futures = "^0.3" 37 | actix-rt = "^1" 38 | actix-web = "^2" 39 | log = "^0.4" 40 | pretty_env_logger = "^0.4" 41 | 42 | [features] 43 | default = [] 44 | web = ["serde_with", "actix-web"] 45 | 46 | [package.metadata.docs.rs] 47 | features = ["web"] 48 | -------------------------------------------------------------------------------- /ledb-actix/README.md: -------------------------------------------------------------------------------- 1 | # Actor and REST interface for LEDB 2 | 3 | [![License: MIT](https://img.shields.io/badge/License-MIT-brightgreen.svg)](https://opensource.org/licenses/MIT) 4 | [![Travis-CI Build Status](https://travis-ci.org/katyo/ledb.svg?branch=master)](https://travis-ci.org/katyo/ledb) 5 | [![Appveyor Build status](https://ci.appveyor.com/api/projects/status/1wrmhivii22emfxg)](https://ci.appveyor.com/project/katyo/ledb) 6 | [![Crates.io Package](https://img.shields.io/crates/v/ledb-actix.svg?style=popout)](https://crates.io/crates/ledb-actix) 7 | [![Docs.rs API Documentation](https://docs.rs/ledb-actix/badge.svg)](https://docs.rs/ledb-actix) 8 | 9 | The **LEDB** is an attempt to implement simple but efficient, lightweight but powerful document storage. 10 | 11 | The abbreviation *LEDB* may be treated as an Lightweight Embedded DB, also Low End DB, also Literium Engine DB, also LitE DB, and so on. 12 | 13 | ## Links 14 | 15 | * [ledb-actix Crate on crates.io](https://crates.io/crates/ledb-actix) 16 | * [ledb-actix API Docs on docs.rs](https://docs.rs/ledb-actix) 17 | * [ledb Crate on crates.io](https://crates.io/crates/ledb) 18 | * [ledb API Docs on docs.rs](https://docs.rs/ledb) 19 | * [ledb-types Crate on crates.io](https://crates.io/crates/ledb-types) 20 | * [ledb-types API Docs on docs.rs](https://docs.rs/ledb-types) 21 | * [ledb-derive Crate on crates.io](https://crates.io/crates/ledb-derive) 22 | * [ledb-derive API Docs on docs.rs](https://docs.rs/ledb-derive) 23 | 24 | ## REST-interface 25 | 26 | *LEDB HTTP interface 0.1.0* 27 | 28 | ### Storage API 29 | 30 | #### get database info 31 | 32 | __GET__ /info 33 | 34 | #### get database statistics 35 | 36 | __GET__ /stats 37 | 38 | ### Collection API 39 | 40 | #### get list of collections 41 | 42 | __GET__ /collection 43 | 44 | #### create new empty collection 45 | 46 | __POST__ /collection?name=_$collection_name_ 47 | 48 | #### drop collection with all documents 49 | 50 | __DELETE__ /collection/_$collection_name_ 51 | 52 | ### Index API 53 | 54 | #### get indexes of collection 55 | 56 | __GET__ /collection/_$collection_name_/index 57 | 58 | #### create new index for collection 59 | 60 | __POST__ /collection/_$collection_name_/index?path=_$field_name_&kind=_$index_kind_&key=_$key_type_ 61 | 62 | #### drop index of collection 63 | 64 | __DELETE__ /collection/_$collection_name_/document/_$index_name_ 65 | 66 | ### Document API 67 | 68 | #### find documents using query 69 | 70 | __GET__ /collection/_$collection_name_/document?filter=_$query_&order=_$ordering_&offset=_$skip_&length=_$take_ 71 | 72 | __GET__ /collection/_$collection_name_?filter=_$query_&order=_$ordering_&offset=_$skip_&length=_$take_ 73 | 74 | #### modify documents using query 75 | 76 | __PUT__ /collection/_$collection_name_/document?filter=_$query_&modify=_$modifications_ 77 | 78 | __PATCH__ /collection/_$collection_name_?filter=_$query_&modify=_$modifications_ 79 | 80 | #### remove documents using query 81 | 82 | __DELETE__ /collection/_$collection_name_/document?filter=_$query_ 83 | 84 | __PUT__ /collection/_$collection_name_?filter=_$query_ 85 | 86 | #### insert new document 87 | 88 | __POST__ /collection/_$collection_name_/document 89 | 90 | __POST__ /collection/_$collection_name_ 91 | 92 | #### get document by id 93 | 94 | __GET__ /collection/_$collection_name_/document/_$document_id_ 95 | 96 | __GET__ /collection/_$collection_name_/_$document_id_ 97 | 98 | #### replace document 99 | 100 | __PUT__ /collection/_$collection_name_/document/_$document_id_ 101 | 102 | __PUT__ /collection/_$collection_name_/_$document_id_ 103 | 104 | #### remove document 105 | 106 | __DELETE__ /collection/_$collection_name_/document/_$document_id_ 107 | 108 | __DELETE__ /collection/_$collection_name_/_$document_id_ 109 | 110 | ### Supported index kinds 111 | 112 | * uni -- Unique key 113 | * dup -- Duplicated keys 114 | 115 | ### Supported key types 116 | 117 | * int -- 64-bit signed integer 118 | * float -- 64-bit floating point number 119 | * bool -- boolean value 120 | * string -- UTF-8 string 121 | * binary -- binary data 122 | 123 | ## Actor 124 | 125 | ### Usage example 126 | 127 | ```rust 128 | use std::env; 129 | 130 | use serde::{Deserialize, Serialize}; 131 | use serde_json::json; 132 | 133 | use ledb_actix::{query, Document, Options, Primary, Storage, StorageAddrExt}; 134 | use log::info; 135 | use serde_json::from_value; 136 | 137 | #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Document)] 138 | struct BlogPost { 139 | #[document(primary)] 140 | pub id: Option, 141 | pub title: String, 142 | pub tags: Vec, 143 | pub content: String, 144 | } 145 | 146 | #[actix_rt::main] 147 | async fn main() { 148 | env::set_var("RUST_LOG", "info"); 149 | pretty_env_logger::init(); 150 | 151 | let _ = std::fs::remove_dir_all("example_db"); 152 | 153 | let addr = Storage::new("example_db", Options::default()) 154 | .unwrap() 155 | .start(1); 156 | 157 | let id = addr 158 | .send_query(query!( 159 | insert into blog { 160 | "title": "Absurd", 161 | "tags": ["absurd", "psychology"], 162 | "content": "Still nothing..." 163 | } 164 | )) 165 | .await 166 | .unwrap(); 167 | 168 | info!("Inserted document id: {}", id); 169 | assert_eq!(id, 1); 170 | 171 | let id = addr.send_query(query!( 172 | insert into blog { 173 | "title": "Lorem ipsum", 174 | "tags": ["lorem", "ipsum"], 175 | "content": "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum." 176 | } 177 | )).await.unwrap(); 178 | 179 | info!("Inserted document id: {}", id); 180 | assert_eq!(id, 2); 181 | 182 | addr.send_query(query!( 183 | index for blog tags string 184 | )) 185 | .await 186 | .unwrap(); 187 | 188 | info!("Indexing is ok"); 189 | 190 | let mut docs = addr 191 | .send_query(query!( 192 | find BlogPost in blog 193 | where tags == "psychology" 194 | order asc 195 | )) 196 | .await 197 | .unwrap(); 198 | 199 | info!("Number of found documents: {}", docs.size_hint().0); 200 | 201 | assert_eq!(docs.size_hint(), (1, Some(1))); 202 | 203 | let doc = docs.next().unwrap().unwrap(); 204 | 205 | info!("Found document: {:?}", doc); 206 | 207 | let doc_data: BlogPost = from_value(json!({ 208 | "id": 1, 209 | "title": "Absurd", 210 | "tags": ["absurd", "psychology"], 211 | "content": "Still nothing..." 212 | })) 213 | .unwrap(); 214 | 215 | assert_eq!(&doc, &doc_data); 216 | assert!(docs.next().is_none()); 217 | } 218 | ``` 219 | -------------------------------------------------------------------------------- /ledb-actix/examples/client.rc: -------------------------------------------------------------------------------- 1 | # -*- mode: restclient -*- 2 | :url = http://localhost:8888 3 | 4 | # get usage 5 | GET :url/ 6 | 7 | # get list collections 8 | GET :url/collection 9 | 10 | # create collection 11 | POST :url/collection?name=user 12 | 13 | # insert document 14 | POST :url/collection/user/document 15 | Content-Type: application/json 16 | 17 | {"name": "kayo", "role": "Full-stack developer"} 18 | 19 | # get document 20 | GET :url/collection/user/document/1 21 | 22 | # Ensure index (unique name) 23 | POST :url/collection/user/index?path=name&kind=unique&key=string 24 | 25 | # Ensure index (prefs) 26 | POST :url/collection/user/index?path=prefs&key=string 27 | 28 | # Drop index 29 | DELETE :url/collection/user/index/name 30 | 31 | # put new version of document 32 | PUT :url/collection/user/document/1 33 | Content-Type: application/json 34 | 35 | {"name": "kayo", "role": "Full-stack developer", "prefs": ["emacs", "nixos"] } 36 | 37 | # insert document 38 | POST :url/collection/user/document 39 | Content-Type: application/json 40 | 41 | {"name": "alien", "role": "Tester"} 42 | 43 | # insert document 44 | POST :url/collection/user/document 45 | Content-Type: application/json 46 | 47 | {"name": "ivan", "role": "Tester", "prefs": ["emacs", "debian"]} 48 | 49 | # insert document 50 | POST :url/collection/user/document 51 | Content-Type: application/json 52 | 53 | {"name": "strapper", "role": "Designer", "prefs": ["vim", "debian"]} 54 | 55 | # find all 56 | GET :url/collection/user/document 57 | 58 | # find by prefs 59 | GET :url/collection/user/document?filter=%7B%22prefs%22:%7B%22$eq%22:%22debian%22%7D%7D 60 | 61 | # remove document 62 | DELETE :url/collection/user/document/1 63 | -------------------------------------------------------------------------------- /ledb-actix/examples/server.rs: -------------------------------------------------------------------------------- 1 | use actix_web::{middleware::Logger, App, HttpServer}; 2 | use ledb_actix::{storage, Options, Storage}; 3 | use std::env; 4 | 5 | #[actix_rt::main] 6 | async fn main() { 7 | env::set_var("RUST_LOG", "info"); 8 | pretty_env_logger::init(); 9 | 10 | let addr = Storage::new("database", Options::default()) 11 | .unwrap() 12 | .start(4); 13 | 14 | let bind = "127.0.0.1:8888"; 15 | 16 | HttpServer::new(move || { 17 | App::new() 18 | .wrap(Logger::default()) 19 | .data(addr.clone()) 20 | .service(storage()) 21 | }) 22 | .bind(&bind) 23 | .unwrap() 24 | .run() 25 | .await 26 | .unwrap(); 27 | } 28 | -------------------------------------------------------------------------------- /ledb-actix/examples/simple.rs: -------------------------------------------------------------------------------- 1 | use std::env; 2 | 3 | use serde::{Deserialize, Serialize}; 4 | use serde_json::json; 5 | 6 | use ledb_actix::{query, Document, Options, Primary, Storage, StorageAddrExt}; 7 | use log::info; 8 | use serde_json::from_value; 9 | 10 | #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Document)] 11 | struct BlogPost { 12 | #[document(primary)] 13 | pub id: Option, 14 | pub title: String, 15 | pub tags: Vec, 16 | pub content: String, 17 | } 18 | 19 | #[actix_rt::main] 20 | async fn main() { 21 | env::set_var("RUST_LOG", "info"); 22 | pretty_env_logger::init(); 23 | 24 | let _ = std::fs::remove_dir_all("example_db"); 25 | 26 | let addr = Storage::new("example_db", Options::default()) 27 | .unwrap() 28 | .start(1); 29 | 30 | let id = addr 31 | .send_query(query!( 32 | insert into blog { 33 | "title": "Absurd", 34 | "tags": ["absurd", "psychology"], 35 | "content": "Still nothing..." 36 | } 37 | )) 38 | .await 39 | .unwrap(); 40 | 41 | info!("Inserted document id: {}", id); 42 | assert_eq!(id, 1); 43 | 44 | let id = addr.send_query(query!( 45 | insert into blog { 46 | "title": "Lorem ipsum", 47 | "tags": ["lorem", "ipsum"], 48 | "content": "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum." 49 | } 50 | )).await.unwrap(); 51 | 52 | info!("Inserted document id: {}", id); 53 | assert_eq!(id, 2); 54 | 55 | addr.send_query(query!( 56 | index for blog tags string 57 | )) 58 | .await 59 | .unwrap(); 60 | 61 | info!("Indexing is ok"); 62 | 63 | let mut docs = addr 64 | .send_query(query!( 65 | find BlogPost in blog 66 | where tags == "psychology" 67 | order asc 68 | )) 69 | .await 70 | .unwrap(); 71 | 72 | info!("Number of found documents: {}", docs.size_hint().0); 73 | 74 | assert_eq!(docs.size_hint(), (1, Some(1))); 75 | 76 | let doc = docs.next().unwrap().unwrap(); 77 | 78 | info!("Found document: {:?}", doc); 79 | 80 | let doc_data: BlogPost = from_value(json!({ 81 | "id": 1, 82 | "title": "Absurd", 83 | "tags": ["absurd", "psychology"], 84 | "content": "Still nothing..." 85 | })) 86 | .unwrap(); 87 | 88 | assert_eq!(&doc, &doc_data); 89 | assert!(docs.next().is_none()); 90 | } 91 | -------------------------------------------------------------------------------- /ledb-actix/src/extra.rs: -------------------------------------------------------------------------------- 1 | use std::pin::Pin; 2 | 3 | use super::Storage; 4 | use actix::{dev::ToEnvelope, Addr, Handler, MailboxError, Message}; 5 | use futures::{future::Either, Future}; 6 | 7 | /// Helper for sending queries 8 | /// 9 | /// This is alternative to `Addr::send` which unwraps results of type `Result` using `Either` type for wrapping errors. 10 | pub trait StorageAddrExt { 11 | fn get_storage_addr(&self) -> &Addr 12 | where 13 | A: Handler + Send, 14 | A::Context: ToEnvelope, 15 | M: Message> + Send + 'static, 16 | T: Send + 'static, 17 | E: Send + 'static; 18 | 19 | /// Send query and get unwrapped result 20 | fn send_query( 21 | &self, 22 | msg: M, 23 | ) -> Pin>> + Send>> 24 | where 25 | A: Handler + Send, 26 | A::Context: ToEnvelope, 27 | M: Message> + Send + 'static, 28 | T: Send + 'static, 29 | E: Send + 'static, 30 | { 31 | let request = self.get_storage_addr().send(msg); 32 | Box::pin(async { request.await.map_err(Either::Left)?.map_err(Either::Right) }) 33 | } 34 | } 35 | 36 | impl StorageAddrExt for Addr { 37 | fn get_storage_addr(&self) -> &Addr 38 | where 39 | Storage: Handler, 40 | M: Message> + Send, 41 | T: Send + 'static, 42 | E: Send + 'static, 43 | { 44 | self 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /ledb-actix/src/lib.rs: -------------------------------------------------------------------------------- 1 | /*! 2 | 3 | # LEDB Storage actor and REST interface 4 | 5 | An implementation of storage actor for [Actix](https://actix.rs/). 6 | 7 | *NOTE: Use `features = ["web"]` to enable an optional scoped REST-interface for **actix-web**.* 8 | 9 | ## Storage actor 10 | 11 | Usage example: 12 | 13 | ```rust 14 | use std::env; 15 | 16 | use serde::{Deserialize, Serialize}; 17 | use serde_json::json; 18 | 19 | use ledb_actix::{query, Document, Options, Primary, Storage, StorageAddrExt}; 20 | use log::info; 21 | use serde_json::from_value; 22 | 23 | #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Document)] 24 | struct BlogPost { 25 | #[document(primary)] 26 | pub id: Option, 27 | pub title: String, 28 | pub tags: Vec, 29 | pub content: String, 30 | } 31 | 32 | #[actix_rt::main] 33 | async fn main() { 34 | env::set_var("RUST_LOG", "info"); 35 | pretty_env_logger::init(); 36 | 37 | let _ = std::fs::remove_dir_all("example_db"); 38 | 39 | let addr = Storage::new("example_db", Options::default()) 40 | .unwrap() 41 | .start(1); 42 | 43 | let id = addr 44 | .send_query(query!( 45 | insert into blog { 46 | "title": "Absurd", 47 | "tags": ["absurd", "psychology"], 48 | "content": "Still nothing..." 49 | } 50 | )) 51 | .await 52 | .unwrap(); 53 | 54 | info!("Inserted document id: {}", id); 55 | assert_eq!(id, 1); 56 | 57 | let id = addr.send_query(query!( 58 | insert into blog { 59 | "title": "Lorem ipsum", 60 | "tags": ["lorem", "ipsum"], 61 | "content": "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum." 62 | } 63 | )).await.unwrap(); 64 | 65 | info!("Inserted document id: {}", id); 66 | assert_eq!(id, 2); 67 | 68 | addr.send_query(query!( 69 | index for blog tags string 70 | )) 71 | .await 72 | .unwrap(); 73 | 74 | info!("Indexing is ok"); 75 | 76 | let mut docs = addr 77 | .send_query(query!( 78 | find BlogPost in blog 79 | where tags == "psychology" 80 | order asc 81 | )) 82 | .await 83 | .unwrap(); 84 | 85 | info!("Number of found documents: {}", docs.size_hint().0); 86 | 87 | assert_eq!(docs.size_hint(), (1, Some(1))); 88 | 89 | let doc = docs.next().unwrap().unwrap(); 90 | 91 | info!("Found document: {:?}", doc); 92 | 93 | let doc_data: BlogPost = from_value(json!({ 94 | "id": 1, 95 | "title": "Absurd", 96 | "tags": ["absurd", "psychology"], 97 | "content": "Still nothing..." 98 | })) 99 | .unwrap(); 100 | 101 | assert_eq!(&doc, &doc_data); 102 | assert!(docs.next().is_none()); 103 | } 104 | ``` 105 | 106 | ## REST-interface 107 | 108 | *LEDB HTTP interface 0.1.0* 109 | 110 | ### Storage API 111 | 112 | #### get database info 113 | 114 | __GET__ /info 115 | 116 | #### get database statistics 117 | 118 | __GET__ /stats 119 | 120 | ### Collection API 121 | 122 | #### get list of collections 123 | 124 | __GET__ /collection 125 | 126 | #### create new empty collection 127 | 128 | __POST__ /collection?name=_$collection_name_ 129 | 130 | #### drop collection with all documents 131 | 132 | __DELETE__ /collection/_$collection_name_ 133 | 134 | ### Index API 135 | 136 | #### get indexes of collection 137 | 138 | __GET__ /collection/_$collection_name_/index 139 | 140 | #### create new index for collection 141 | 142 | __POST__ /collection/_$collection_name_/index?path=_$field_name_&kind=_$index_kind_&key=_$key_type_ 143 | 144 | #### drop index of collection 145 | 146 | __DELETE__ /collection/_$collection_name_/document/_$index_name_ 147 | 148 | ### Document API 149 | 150 | #### find documents using query 151 | 152 | __GET__ /collection/_$collection_name_/document?filter=_$query_&order=_$ordering_&offset=_$skip_&length=_$take_ 153 | 154 | __GET__ /collection/_$collection_name_?filter=_$query_&order=_$ordering_&offset=_$skip_&length=_$take_ 155 | 156 | #### modify documents using query 157 | 158 | __PUT__ /collection/_$collection_name_/document?filter=_$query_&modify=_$modifications_ 159 | 160 | __PATCH__ /collection/_$collection_name_?filter=_$query_&modify=_$modifications_ 161 | 162 | #### remove documents using query 163 | 164 | __DELETE__ /collection/_$collection_name_/document?filter=_$query_ 165 | 166 | __PUT__ /collection/_$collection_name_?filter=_$query_ 167 | 168 | #### insert new document 169 | 170 | __POST__ /collection/_$collection_name_/document 171 | 172 | __POST__ /collection/_$collection_name_ 173 | 174 | #### get document by id 175 | 176 | __GET__ /collection/_$collection_name_/document/_$document_id_ 177 | 178 | __GET__ /collection/_$collection_name_/_$document_id_ 179 | 180 | #### replace document 181 | 182 | __PUT__ /collection/_$collection_name_/document/_$document_id_ 183 | 184 | __PUT__ /collection/_$collection_name_/_$document_id_ 185 | 186 | #### remove document 187 | 188 | __DELETE__ /collection/_$collection_name_/document/_$document_id_ 189 | 190 | __DELETE__ /collection/_$collection_name_/_$document_id_ 191 | 192 | */ 193 | 194 | mod actor; 195 | mod extra; 196 | mod macros; 197 | #[cfg(feature = "web")] 198 | mod scope; 199 | 200 | pub use ledb::{ 201 | KeyType, Modify, Options, Order, OrderKind, Primary, Stats, _query_impl, query_extr, Action, 202 | Comp, Cond, Document, DocumentsIterator, Filter, Identifier, IndexKind, Info, KeyData, 203 | KeyField, KeyFields, Value, 204 | }; 205 | 206 | pub use actor::*; 207 | pub use extra::*; 208 | 209 | #[cfg(feature = "web")] 210 | pub use scope::*; 211 | -------------------------------------------------------------------------------- /ledb-actix/src/macros.rs: -------------------------------------------------------------------------------- 1 | /// Unified query message macro 2 | /// 3 | #[macro_export(local_inner_macros)] 4 | macro_rules! query { 5 | // call util 6 | (@$util:ident $($args:tt)*) => ( $crate::_query_impl!(@$util $($args)*) ); 7 | 8 | // make query 9 | ($($tokens:tt)+) => ( $crate::_query_impl!(@query _query_actix, $($tokens)+) ); 10 | } 11 | 12 | // native API output macros 13 | #[macro_export] 14 | #[doc(hidden)] 15 | macro_rules! _query_actix { 16 | (@index $coll:expr, [ $($indexes:tt),+ ]) => ( 17 | $crate::SetIndexes($crate::_query_impl!(@stringify $coll), ::ledb::_query_impl![@vec $($indexes),+]) 18 | ); 19 | (@index $coll:expr, $($indexes:tt)+) => ( 20 | $crate::SetIndexes($crate::_query_impl!(@stringify $coll), $($indexes)+) 21 | ); 22 | (@find $type:tt, $coll:expr, $filter:expr, $order:expr) => ( 23 | $crate::Find::<_, $type>($crate::_query_impl!(@stringify $coll), $filter, $order) 24 | ); 25 | (@insert $coll:expr, $doc:expr) => ( 26 | $crate::Insert($crate::_query_impl!(@stringify $coll), $doc) 27 | ); 28 | (@update $coll:expr, $filter:expr, $modify:expr) => ( 29 | $crate::Update($crate::_query_impl!(@stringify $coll), $filter, $modify) 30 | ); 31 | (@remove $coll:expr, $filter:expr) => ( 32 | $crate::Remove($crate::_query_impl!(@stringify $coll), $filter) 33 | ); 34 | } 35 | 36 | #[cfg(test)] 37 | mod test { 38 | use crate::actor::*; 39 | use ledb::Value; 40 | 41 | #[test] 42 | fn find() { 43 | let find_query: FindMsg = query!(find in collection); 44 | assert_eq!(find_query, Find("collection", None, query!(@order))); 45 | 46 | assert_eq!( 47 | query!(find Value in collection), 48 | Find("collection", None, query!(@order)) 49 | ); 50 | 51 | assert_eq!( 52 | query!(find Value in collection order desc), 53 | Find("collection", None, query!(@order desc)) 54 | ); 55 | 56 | assert_eq!( 57 | query!(find Value in collection order by field), 58 | Find("collection", query!(@filter), query!(@order by field asc)) 59 | ); 60 | 61 | assert_eq!( 62 | query!(find Value in collection order by field desc), 63 | Find("collection", query!(@filter), query!(@order by field desc)) 64 | ); 65 | 66 | assert_eq!( 67 | query!(find Value in collection where field == "abc"), 68 | Find("collection", query!(@filter field == "abc"), query!(@order)) 69 | ); 70 | 71 | assert_eq!( 72 | query!(find Value in collection where field == "abc" order desc), 73 | Find( 74 | "collection", 75 | query!(@filter field == "abc"), 76 | query!(@order desc) 77 | ) 78 | ); 79 | 80 | assert_eq!( 81 | query!(find Value in collection where some.field == "abc" order by other.field desc), 82 | Find( 83 | "collection", 84 | query!(@filter some.field == "abc"), 85 | query!(@order by other.field <) 86 | ) 87 | ); 88 | } 89 | 90 | #[test] 91 | fn update() { 92 | assert_eq!( 93 | query!(update in collection modify field = 123), 94 | Update("collection", query!(@filter), query!(@modify field = 123)) 95 | ); 96 | 97 | assert_eq!( 98 | query!(update in collection modify field.with.sub.field = 123), 99 | Update( 100 | "collection", 101 | query!(@filter), 102 | query!(@modify field.with.sub.field = 123) 103 | ) 104 | ); 105 | 106 | assert_eq!( 107 | query!(update in collection modify field.with.sub.field = 123, other.field = "abc"), 108 | Update( 109 | "collection", 110 | query!(@filter), 111 | query!(@modify field.with.sub.field = 123, other.field = "abc") 112 | ) 113 | ); 114 | 115 | assert_eq!( 116 | query!(update in collection modify field = "def" where field == "abc"), 117 | Update( 118 | "collection", 119 | query!(@filter field == "abc"), 120 | query!(@modify field = "def") 121 | ) 122 | ); 123 | 124 | assert_eq!( 125 | query!(update in collection modify field = "def", other.field += 123, some.flag~ where field == "abc" && some.flag?), 126 | Update( 127 | "collection", 128 | query!(@filter field == "abc" && some.flag?), 129 | query!(@modify field = "def", other.field += 123, some.flag~) 130 | ) 131 | ); 132 | } 133 | } 134 | -------------------------------------------------------------------------------- /ledb-actix/src/scope.rs: -------------------------------------------------------------------------------- 1 | use actix::Addr; 2 | use actix_web::{ 3 | error::{ 4 | ErrorBadRequest, ErrorInternalServerError, ErrorNotFound, ErrorServiceUnavailable, Result, 5 | }, 6 | web::{delete, get, patch, post, put, resource, scope, Data, Json, Path, Query}, 7 | HttpRequest, HttpResponse, Scope, 8 | }; 9 | use serde::{Deserialize, Serialize}; 10 | use serde_with::json::nested as json_str; 11 | use std::usize; 12 | 13 | use super::{ 14 | Delete, Document, DropCollection, DropIndex, EnsureCollection, EnsureIndex, Filter, Find, Get, 15 | GetCollections, GetIndexes, GetInfo, GetStats, Info, Insert, KeyField, ListCollections, Modify, 16 | Order, Primary, Put, Remove, Stats, Storage, Update, Value, 17 | }; 18 | 19 | /// Storage actor address type 20 | pub type StorageAddr = Addr; 21 | 22 | /// Scoped storage adapter for **actix-web** 23 | pub fn storage() -> Scope { 24 | scope("") 25 | .service(resource("/").name("usage").route(get().to(get_usage))) 26 | .service(resource("/info").name("info").route(get().to(get_info))) 27 | .service(resource("/stats").name("stats").route(get().to(get_stats))) 28 | .service( 29 | resource("/collection") 30 | .name("collections") 31 | .route(get().to(get_collections)) 32 | .route(post().to(ensure_collection)), 33 | ) 34 | .service( 35 | scope("/collection") 36 | .service( 37 | resource("/{collection}") 38 | .name("collection") 39 | .route(delete().to(drop_collection)) 40 | // shortcuts for document methods 41 | .route(post().to(insert_document)) 42 | .route(get().to(find_documents)) 43 | .route(patch().to(update_documents)) 44 | .route(put().to(remove_documents)), 45 | ) 46 | .service( 47 | scope("/{collection}") 48 | .service( 49 | resource("/index") 50 | .name("indexes") 51 | .route(get().to(get_indexes)) 52 | .route(post().to(ensure_index)), 53 | ) 54 | .service( 55 | scope("/index").service( 56 | resource("/{index}") 57 | .name("index") 58 | .route(delete().to(drop_index)), 59 | ), 60 | ) 61 | .service( 62 | resource("/document") 63 | .name("documents") 64 | .route(post().to(insert_document)) 65 | .route(get().to(find_documents)) 66 | .route(put().to(update_documents)) 67 | .route(delete().to(remove_documents)), 68 | ) 69 | .service( 70 | scope("/document").service( 71 | resource("/{id}") 72 | .name("document") 73 | .route(get().to(get_document)) 74 | .route(put().to(put_document)) 75 | .route(delete().to(delete_document)), 76 | ), 77 | ) 78 | .service( 79 | resource("/{id}") 80 | .name("document_short") 81 | .route(get().to(get_document)) 82 | .route(put().to(put_document)) 83 | .route(delete().to(delete_document)), 84 | ), 85 | ), 86 | ) 87 | } 88 | 89 | /// Usage info handler 90 | pub async fn get_usage(req: HttpRequest) -> String { 91 | format!( 92 | r#"LEDB HTTP interface {version} 93 | 94 | Storage API: 95 | 96 | # get database info 97 | GET {info} 98 | # get database statistics 99 | GET {stats} 100 | 101 | Collection API: 102 | 103 | # get list of collections 104 | GET {collections} 105 | # create new empty collection 106 | POST {collections}?name=$collection_name 107 | # drop collection with all documents 108 | DELETE {collection} 109 | 110 | Index API: 111 | 112 | # get indexes of collection 113 | GET {indexes} 114 | # create new index for collection 115 | POST {indexes}?path=$field_path&kind=$index_kind&key=$key_type 116 | # drop index of collection 117 | DELETE {index} 118 | 119 | Document API: 120 | 121 | # find documents using query 122 | GET {documents}?filter=$query&order=$ordering&offset=10&length=10 123 | GET {collection}?filter=$query&order=$ordering&offset=10&length=10 124 | # modify documents using query 125 | PUT {documents}?filter=$query&modify=$modifications 126 | PATCH {collection}?filter=$query&modify=$modifications 127 | # remove documents using query 128 | DELETE {documents}?filter=$query 129 | PUT {collection}?filter=$query 130 | 131 | # insert new document 132 | POST {documents} 133 | POST {collection} 134 | # get document by id 135 | GET {document} 136 | GET {document_short} 137 | # replace document 138 | PUT {document} 139 | PUT {document_short} 140 | # remove document 141 | DELETE {document} 142 | DELETE {document_short} 143 | 144 | Supported index kinds: 145 | 146 | index -- Normal index which may contain duplicated keys 147 | unique -- Index which contains unique keys only 148 | 149 | Supported key types: 150 | 151 | int -- 64-bit signed integer 152 | float -- 64-bit floating point number 153 | bool -- boolean value 154 | string -- UTF-8 string 155 | binary -- binary data 156 | 157 | See documentation: {documentation} 158 | "#, 159 | version = env!("CARGO_PKG_VERSION"), 160 | documentation = env!("CARGO_PKG_HOMEPAGE"), 161 | info = req.url_for_static("info").unwrap(), 162 | stats = req.url_for_static("stats").unwrap(), 163 | collections = req.url_for_static("collections").unwrap(), 164 | collection = req.url_for("collection", &["$collection_name"]).unwrap(), 165 | indexes = req.url_for("indexes", &["$collection_name"]).unwrap(), 166 | index = req 167 | .url_for("document", &["$collection_name", "$index_name"]) 168 | .unwrap(), 169 | documents = req.url_for("documents", &["$collection_name"]).unwrap(), 170 | document = req 171 | .url_for("document", &["$collection_name", "$document_id"]) 172 | .unwrap(), 173 | document_short = req 174 | .url_for("document_short", &["$collection_name", "$document_id"]) 175 | .unwrap(), 176 | ) 177 | } 178 | 179 | /// Storage info handler 180 | pub async fn get_info(addr: Data) -> Result> { 181 | addr.send(GetInfo) 182 | .await 183 | .map_err(ErrorServiceUnavailable) 184 | .and_then(|res| res.map(Json).map_err(ErrorInternalServerError)) 185 | } 186 | 187 | /// Storage stats handler 188 | pub async fn get_stats(addr: Data) -> Result> { 189 | addr.send(GetStats) 190 | .await 191 | .map_err(ErrorServiceUnavailable) 192 | .and_then(|res| res.map(Json).map_err(ErrorInternalServerError)) 193 | } 194 | 195 | /// Storage collections handler 196 | pub async fn get_collections(addr: Data) -> Result> { 197 | addr.send(GetCollections) 198 | .await 199 | .map_err(ErrorServiceUnavailable) 200 | .and_then(|res| res.map(Json).map_err(ErrorInternalServerError)) 201 | } 202 | 203 | /// Collection parameters 204 | #[derive(Serialize, Deserialize)] 205 | pub struct CollectionParams { 206 | pub name: String, 207 | } 208 | 209 | /// Ensure collection handler 210 | pub async fn ensure_collection( 211 | addr: Data, 212 | params: Query, 213 | req: HttpRequest, 214 | ) -> Result { 215 | let CollectionParams { name } = params.into_inner(); 216 | match req.url_for("collection", &[&name]) { 217 | Ok(url) => addr 218 | .send(EnsureCollection(name)) 219 | .await 220 | .map_err(ErrorServiceUnavailable) 221 | .and_then(|res| res.map_err(ErrorInternalServerError)) 222 | .map(move |res| { 223 | if res { 224 | HttpResponse::Created() 225 | } else { 226 | HttpResponse::Ok() 227 | } 228 | .header("location", url.as_str()) 229 | .finish() 230 | }), 231 | Err(error) => Err(ErrorBadRequest(format!( 232 | "Cannot get url for collection ({})", 233 | error 234 | ) /*"Invalid collection name"*/)), 235 | } 236 | } 237 | 238 | /// Drop collection handler 239 | pub async fn drop_collection(addr: Data, coll: Path) -> Result { 240 | addr.send(DropCollection(coll.into_inner())) 241 | .await 242 | .map_err(ErrorServiceUnavailable) 243 | .and_then(|res| res.map_err(ErrorInternalServerError)) 244 | .and_then(|res| { 245 | if res { 246 | Ok(HttpResponse::NoContent().finish()) 247 | } else { 248 | Err(ErrorNotFound("Collection not found")) 249 | } 250 | }) 251 | } 252 | 253 | /// Get indexes handler 254 | pub async fn get_indexes( 255 | addr: Data, 256 | coll: Path, 257 | ) -> Result>> { 258 | addr.send(GetIndexes(coll.into_inner())) 259 | .await 260 | .map_err(ErrorServiceUnavailable) 261 | .and_then(|res| res.map_err(ErrorInternalServerError)) 262 | .map(|indexes| Json(indexes.into_iter().collect())) 263 | } 264 | 265 | /// Ensure index handler 266 | pub async fn ensure_index( 267 | addr: Data, 268 | coll: Path, 269 | params: Query, 270 | req: HttpRequest, 271 | ) -> Result { 272 | let KeyField { path, kind, key } = params.into_inner(); 273 | if let Ok(url) = req.url_for("index", &[&coll, &path]) { 274 | addr.send(EnsureIndex(coll.into_inner(), path, kind, key)) 275 | .await 276 | .map_err(ErrorServiceUnavailable) 277 | .and_then(|res| res.map_err(ErrorInternalServerError)) 278 | .map(move |res| { 279 | if res { 280 | HttpResponse::Created() 281 | } else { 282 | HttpResponse::Ok() 283 | } 284 | .header("location", url.as_str()) 285 | .finish() 286 | }) 287 | } else { 288 | Err(ErrorBadRequest("Invalid index name")) 289 | } 290 | } 291 | 292 | /// Drop index handler 293 | pub async fn drop_index( 294 | addr: Data, 295 | path: Path<(String, String)>, 296 | ) -> Result { 297 | let (coll, idx) = path.into_inner(); 298 | addr.send(DropIndex(coll, idx)) 299 | .await 300 | .map_err(ErrorServiceUnavailable) 301 | .and_then(|res| res.map_err(ErrorInternalServerError)) 302 | .and_then(|res| { 303 | if res { 304 | Ok(HttpResponse::NoContent().finish()) 305 | } else { 306 | Err(ErrorNotFound("Index not found")) 307 | } 308 | }) 309 | } 310 | 311 | /// Insert document handler 312 | pub async fn insert_document( 313 | addr: Data, 314 | coll: Path, 315 | doc: Json, 316 | req: HttpRequest, 317 | ) -> Result { 318 | addr.send(Insert(&*coll, doc.into_inner())) 319 | .await 320 | .map_err(ErrorServiceUnavailable) 321 | .and_then(|res| res.map_err(ErrorInternalServerError)) 322 | .and_then(move |id| { 323 | req.url_for("document", &[&coll.into_inner(), &id.to_string()]) 324 | .map_err(ErrorInternalServerError) 325 | }) 326 | .map(|url| { 327 | HttpResponse::Created() 328 | .header("location", url.as_str()) 329 | .finish() 330 | }) 331 | } 332 | 333 | /// Find query parameters 334 | #[derive(Serialize, Deserialize)] 335 | pub struct FindParams { 336 | #[serde(default)] 337 | #[serde(with = "json_str")] 338 | pub filter: Option, 339 | #[serde(default)] 340 | #[serde(with = "json_str")] 341 | pub order: Order, 342 | #[serde(default)] 343 | pub offset: Option, 344 | #[serde(default)] 345 | pub length: Option, 346 | } 347 | 348 | /// Find documents query handler 349 | pub async fn find_documents( 350 | addr: Data, 351 | coll: Path, 352 | query: Query, 353 | ) -> Result>> { 354 | let FindParams { 355 | filter, 356 | order, 357 | offset, 358 | length, 359 | } = query.into_inner(); 360 | addr.send(Find::<_, Value>(coll.into_inner(), filter, order)) 361 | .await 362 | .map_err(ErrorServiceUnavailable) 363 | .and_then(|res| res.map_err(ErrorInternalServerError)) 364 | .and_then(move |docs| { 365 | docs.skip(offset.unwrap_or(0)) 366 | .take(length.unwrap_or(usize::MAX)) 367 | .collect::, _>>() 368 | .map_err(ErrorInternalServerError) 369 | .map(Json) 370 | }) 371 | .map_err(ErrorInternalServerError) 372 | } 373 | 374 | /// Update query parameters 375 | #[derive(Serialize, Deserialize)] 376 | pub struct UpdateParams { 377 | #[serde(default)] 378 | #[serde(with = "json_str")] 379 | pub filter: Option, 380 | pub modify: Modify, 381 | } 382 | 383 | /// Update documents query handler 384 | pub async fn update_documents( 385 | addr: Data, 386 | coll: Path, 387 | query: Query, 388 | ) -> Result { 389 | let UpdateParams { filter, modify } = query.into_inner(); 390 | addr.send(Update(coll.into_inner(), filter, modify)) 391 | .await 392 | .map_err(ErrorServiceUnavailable) 393 | .and_then(|res| res.map_err(ErrorInternalServerError)) 394 | .map(|affected_docs| { 395 | HttpResponse::NoContent() 396 | .header("affected", affected_docs.to_string()) 397 | .finish() 398 | }) 399 | } 400 | 401 | /// Remove query parameters 402 | #[derive(Serialize, Deserialize)] 403 | pub struct RemoveParams { 404 | #[serde(default)] 405 | #[serde(with = "json_str")] 406 | pub filter: Option, 407 | } 408 | 409 | /// Remove documents query handler 410 | pub async fn remove_documents( 411 | (addr, coll, query): (Data, Path, Query), 412 | ) -> Result { 413 | let RemoveParams { filter } = query.into_inner(); 414 | addr.send(Remove(coll.into_inner(), filter)) 415 | .await 416 | .map_err(ErrorServiceUnavailable) 417 | .and_then(|res| res.map_err(ErrorInternalServerError)) 418 | .map(|affected_docs| { 419 | HttpResponse::NoContent() 420 | .header("affected", affected_docs.to_string()) 421 | .finish() 422 | }) 423 | } 424 | 425 | /// Get document handler 426 | pub async fn get_document( 427 | addr: Data, 428 | path: Path<(String, Primary)>, 429 | ) -> Result> { 430 | let (coll, id) = path.into_inner(); 431 | addr.send(Get(coll, id)) 432 | .await 433 | .map_err(ErrorServiceUnavailable) 434 | .and_then(|res| res.map_err(ErrorInternalServerError)) 435 | .and_then(|res| { 436 | res.map(Json) 437 | .ok_or_else(|| ErrorNotFound("Document not found")) 438 | }) 439 | } 440 | 441 | #[derive(Serialize)] 442 | pub struct DocumentWithId { 443 | #[serde(rename = "$")] 444 | id: Primary, 445 | #[serde(flatten)] 446 | val: Value, 447 | } 448 | 449 | impl Document for DocumentWithId {} 450 | 451 | /// Put document handler 452 | pub async fn put_document( 453 | addr: Data, 454 | path: Path<(String, Primary)>, 455 | data: Json, 456 | ) -> Result { 457 | let (coll, id) = path.into_inner(); 458 | let doc = DocumentWithId { 459 | id, 460 | val: data.into_inner(), 461 | }; 462 | addr.send(Put(coll, doc)) 463 | .await 464 | .map_err(ErrorServiceUnavailable) 465 | .and_then(|res| res.map_err(ErrorInternalServerError)) 466 | .map(|_| HttpResponse::NoContent().finish()) 467 | } 468 | 469 | /// Delete document handler 470 | pub async fn delete_document( 471 | addr: Data, 472 | path: Path<(String, Primary)>, 473 | ) -> Result { 474 | let (coll, id) = path.into_inner(); 475 | addr.send(Delete(coll, id)) 476 | .await 477 | .map_err(ErrorServiceUnavailable) 478 | .and_then(|res| res.map_err(ErrorInternalServerError)) 479 | .and_then(|res| { 480 | if res { 481 | Ok(HttpResponse::NoContent().finish()) 482 | } else { 483 | Err(ErrorNotFound("Document not found")) 484 | } 485 | }) 486 | } 487 | -------------------------------------------------------------------------------- /ledb-derive/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "ledb-derive" 3 | version = "0.4.0" 4 | authors = ["Kayo "] 5 | license = "MIT" 6 | readme = "README.md" 7 | repository = "https://github.com/katyo/ledb" 8 | homepage = "https://github.com/katyo/ledb/tree/master/ledb" 9 | keywords = ["storage", "document", "database", "index"] 10 | categories = ["database"] 11 | description = "Derive macros for defining storable documents" 12 | edition = "2018" 13 | 14 | [badges] 15 | travis-ci = { repository = "katyo/ledb" } 16 | appveyor = { repository = "katyo/ledb" } 17 | 18 | [lib] 19 | proc-macro = true 20 | 21 | [dependencies] 22 | proc-macro2 = "^1" 23 | quote = "^1" 24 | syn = "^1" 25 | 26 | [dev-dependencies] 27 | serde = { version = "^1", features = ["derive"] } 28 | serde_bytes = "^0.11" 29 | ledb-types = { version = "0.4", path = "../ledb-types" } 30 | ledb = { version = "0.4", path = "../ledb" } 31 | -------------------------------------------------------------------------------- /ledb-derive/README.md: -------------------------------------------------------------------------------- 1 | # Derive macro for defining storable documents 2 | 3 | [![License: MIT](https://img.shields.io/badge/License-MIT-brightgreen.svg)](https://opensource.org/licenses/MIT) 4 | [![Travis-CI Build Status](https://travis-ci.org/katyo/ledb.svg?branch=master)](https://travis-ci.org/katyo/ledb) 5 | [![Appveyor Build status](https://ci.appveyor.com/api/projects/status/1wrmhivii22emfxg)](https://ci.appveyor.com/project/katyo/ledb) 6 | [![Crates.io Package](https://img.shields.io/crates/v/ledb.svg?style=popout)](https://crates.io/crates/ledb) 7 | [![Docs.rs API Documentation](https://docs.rs/ledb/badge.svg)](https://docs.rs/ledb) 8 | 9 | This **derive macro** helps to define documents which can be managed using persistent storages like *LEDB*. 10 | 11 | The **LEDB** is an attempt to implement simple but efficient, lightweight but powerful document storage. 12 | 13 | The abbreviation *LEDB* may be treated as an Lightweight Embedded DB, also Low End DB, also Literium Engine DB, also LitE DB, and so on. 14 | 15 | ## Links 16 | 17 | * [ledb-types Crate on crates.io](https://crates.io/crates/ledb-types) 18 | * [ledb-types API Docs on docs.rs](https://docs.rs/ledb-types) 19 | * [ledb-derive Crate on crates.io](https://crates.io/crates/ledb-derive) 20 | * [ledb-derive API Docs on docs.rs](https://docs.rs/ledb-derive) 21 | * [ledb Crate on crates.io](https://crates.io/crates/ledb) 22 | * [ledb API Docs on docs.rs](https://docs.rs/ledb) 23 | 24 | ## Usage example 25 | 26 | ```rust 27 | use serde::{Serialize, Deserialize}; 28 | use ledb::{Document}; 29 | 30 | #[derive(Serialize, Deserialize, Document)] 31 | struct MyDoc { 32 | // define optional primary key field 33 | #[document(primary)] 34 | id: Option, 35 | // define unique key field 36 | #[document(unique)] 37 | title: String, 38 | // define index fields 39 | #[document(index)] 40 | tag: Vec, 41 | #[document(unique)] 42 | timestamp: u32, 43 | // define nested document 44 | #[document(nested)] 45 | meta: MetaData, 46 | } 47 | 48 | #[derive(Serialize, Deserialize, Document)] 49 | #[document(nested)] 50 | struct MetaData { 51 | // define index field 52 | #[document(index)] 53 | keywords: Vec, 54 | // define other fields 55 | description: String, 56 | } 57 | ``` 58 | 59 | This automatically generate `Document` traits like so: 60 | 61 | ```rust 62 | impl Document for MyDoc { 63 | // declare primary key field name 64 | fn primary_field() -> Identifier { 65 | "id".into() 66 | } 67 | 68 | // declare other key fields for index 69 | fn key_fields() -> KeyFields { 70 | KeyFields::new() 71 | // add key fields of document 72 | .with_field(("title", String::key_type(), IndexKind::Unique)) 73 | .with_field(("tag", String::key_type(), IndexKind::Index)) 74 | .with_field(("timestamp", u32::key_type(), IndexKind::Unique)) 75 | // add key fields from nested document 76 | .with_fields(MetaData::key_fields().with_parent("meta")) 77 | } 78 | } 79 | 80 | impl Document for MetaData { 81 | // declare key fields for index 82 | fn key_fields() -> KeyFields { 83 | KeyFields::new() 84 | // add key fields of document 85 | .with_field(("keywords", KeyType::String, IndexKind::Index)) 86 | } 87 | } 88 | ``` 89 | -------------------------------------------------------------------------------- /ledb-derive/src/lib.rs: -------------------------------------------------------------------------------- 1 | /*! 2 | 3 | # Derive macro for defining storable documents 4 | 5 | This crate helps to turn rust structures into documents which can be stored, indexed and queried. 6 | 7 | ## Defining documents 8 | 9 | You may turn any struct into a document using `Document` in derive annotation like this: 10 | 11 | ```rust 12 | use serde::{Serialize, Deserialize}; 13 | use ledb::{Document}; 14 | 15 | #[derive(Serialize, Deserialize, Document)] 16 | struct MyDoc { 17 | // primary field 18 | #[document(primary)] 19 | id: Option, 20 | // other fields 21 | } 22 | ``` 23 | 24 | This generates `Document` trait implementation for struct `MyDoc`. 25 | It requires single field marked as primary key per document. 26 | Currently primary key should be an integer only. 27 | Also it not needed to be an optional field, but in this case you should take care of parsing (for example add `serde(default)` annotation). 28 | 29 | ## Defining key fields for indexing 30 | 31 | To turn document field into key you can add document index annotation to it: 32 | 33 | ```rust 34 | # extern crate serde; 35 | # extern crate ledb; 36 | # 37 | use serde::{Serialize, Deserialize}; 38 | use ledb::{Document}; 39 | 40 | #[derive(Serialize, Deserialize, Document)] 41 | struct MyDoc { 42 | // primary field 43 | #[serde(default)] 44 | #[document(primary)] 45 | id: u32, 46 | // unique string key 47 | #[document(unique)] 48 | title: String, 49 | // normal string index 50 | #[document(index)] 51 | keywords: Vec, 52 | // unique int key 53 | #[document(unique)] 54 | timestamp: u64, 55 | } 56 | ``` 57 | 58 | ## Overriding key types 59 | 60 | In some cases it may be ambiguous to determine actual type of key by field type. 61 | For example, when you try to index binary data using `Vec`, the actually determined key type is an integer (u8). 62 | So you required to override key type manually using annotation like so: 63 | 64 | ```rust 65 | # extern crate serde; 66 | # extern crate serde_bytes; 67 | # extern crate ledb; 68 | # 69 | use serde::{Serialize, Deserialize}; 70 | use serde_bytes; 71 | use ledb::{Document}; 72 | 73 | #[derive(Serialize, Deserialize, Document)] 74 | struct MyDoc { 75 | #[document(primary)] 76 | id: u32, 77 | // ... 78 | #[document(unique binary)] 79 | #[serde(with = "serde_bytes")] 80 | hash: Vec, 81 | } 82 | ``` 83 | 84 | ## Nested documents 85 | 86 | Of course you can add nested documents which may also have key fields: 87 | 88 | ```rust 89 | # extern crate serde; 90 | # extern crate ledb; 91 | # 92 | use std::collections::HashMap; 93 | use serde::{Serialize, Deserialize}; 94 | use ledb::{Document}; 95 | 96 | #[derive(Serialize, Deserialize, Document)] 97 | struct MyDoc { 98 | // primary field 99 | #[document(primary)] 100 | #[serde(default)] 101 | id: u32, 102 | // ...fields 103 | // simple nested document 104 | #[document(nested)] 105 | meta: Meta, 106 | // list of nested documents 107 | #[document(nested)] 108 | links: Vec, 109 | // map of nested documents 110 | #[document(nested)] 111 | props: HashMap, 112 | } 113 | 114 | #[derive(Serialize, Deserialize, Document)] 115 | #[document(nested)] 116 | struct Meta { 117 | #[document(index)] 118 | title: String, 119 | #[document(index)] 120 | author: String, 121 | annotation: String, 122 | } 123 | 124 | #[derive(Serialize, Deserialize, Document)] 125 | #[document(nested)] 126 | struct Link { 127 | href: String, 128 | text: String, 129 | } 130 | 131 | #[derive(Serialize, Deserialize, Document)] 132 | #[document(nested)] 133 | struct Prop { 134 | value: String, 135 | required: bool, 136 | } 137 | ``` 138 | 139 | The primary key field is omitted for nested documents. 140 | The nested documents should be explicitly marked as nested using `#[document(nested)]` directive as shown above. 141 | 142 | **NOTE**: When the `#[serde(flatten)]` directive is used the key fields of nested documents will be transferred to owner. 143 | 144 | ## Simple usage example 145 | 146 | ```rust 147 | # extern crate serde; 148 | # extern crate ledb; 149 | # 150 | use serde::{Serialize, Deserialize}; 151 | use ledb::{Document}; 152 | 153 | #[derive(Serialize, Deserialize, Document)] 154 | struct MyDoc { 155 | // define optional primary key field 156 | #[document(primary)] 157 | id: Option, 158 | // define unique key field 159 | #[document(unique)] 160 | title: String, 161 | // define index fields 162 | #[document(index)] 163 | tag: Vec, 164 | #[document(unique)] 165 | timestamp: u32, 166 | // define nested document 167 | #[document(nested)] 168 | meta: MetaData, 169 | } 170 | 171 | #[derive(Serialize, Deserialize, Document)] 172 | #[document(nested)] 173 | struct MetaData { 174 | // define index field 175 | #[document(index)] 176 | keywords: Vec, 177 | // define other fields 178 | description: String, 179 | } 180 | ``` 181 | 182 | It will generate the `Document` traits like so: 183 | 184 | ```ignore 185 | impl Document for MyDoc { 186 | // declare primary key field name 187 | fn primary_field() -> Identifier { 188 | "id".into() 189 | } 190 | 191 | // declare other key fields for index 192 | fn key_fields() -> KeyFields { 193 | KeyFields::new() 194 | // add key fields of document 195 | .with_field(("title", String::key_type(), IndexKind::Unique)) 196 | .with_field(("tag", String::key_type(), IndexKind::Index)) 197 | .with_field(("timestamp", u32::key_type(), IndexKind::Unique)) 198 | // add key fields from nested document 199 | .with_fields(MetaData::key_fields().with_parent("meta")) 200 | } 201 | } 202 | 203 | impl Document for MetaData { 204 | // declare key fields for index 205 | fn key_fields() -> KeyFields { 206 | KeyFields::new() 207 | // add key fields of document 208 | .with_field(("keywords", KeyType::String, IndexKind::Index)) 209 | } 210 | } 211 | ``` 212 | 213 | */ 214 | 215 | mod document; 216 | mod wrapper; 217 | 218 | use document::derive_document_wrapped; 219 | use proc_macro::TokenStream; 220 | use quote::quote; 221 | use syn::{parse_macro_input, DeriveInput}; 222 | 223 | #[proc_macro_derive(Document, attributes(document))] 224 | pub fn derive_document(input: TokenStream) -> TokenStream { 225 | let input = parse_macro_input!(input as DeriveInput); 226 | derive_document_wrapped(&input) 227 | .unwrap_or_else(compile_error) 228 | .into() 229 | } 230 | 231 | fn compile_error(message: String) -> proc_macro2::TokenStream { 232 | quote! { 233 | compile_error!(#message); 234 | } 235 | } 236 | -------------------------------------------------------------------------------- /ledb-derive/src/wrapper.rs: -------------------------------------------------------------------------------- 1 | use proc_macro2::{Span, TokenStream}; 2 | use syn::{Ident}; 3 | use quote::quote; 4 | 5 | pub fn wrap_in_const( 6 | trait_: &str, 7 | type_: &Ident, 8 | code: TokenStream, 9 | ) -> TokenStream { 10 | let dummy_const = Ident::new( 11 | &format!("_IMPL_{}_FOR_{}", trait_, unraw(&type_)), 12 | Span::call_site(), 13 | ); 14 | 15 | let use_types = quote! { 16 | #[cfg_attr(feature = "cargo-clippy", allow(useless_attribute))] 17 | #[allow(rust_2018_idioms)] 18 | extern crate ledb_types as _ledb_types; 19 | }; 20 | 21 | quote! { 22 | const #dummy_const: () = { 23 | #use_types 24 | #code 25 | }; 26 | } 27 | } 28 | 29 | #[allow(deprecated)] 30 | fn unraw(ident: &Ident) -> String { 31 | // str::trim_start_matches was added in 1.30, trim_left_matches deprecated 32 | // in 1.33. We currently support rustc back to 1.15 so we need to continue 33 | // to use the deprecated one. 34 | ident.to_string().trim_left_matches("r#").to_owned() 35 | } 36 | -------------------------------------------------------------------------------- /ledb-node/.gitignore: -------------------------------------------------------------------------------- 1 | native/target/* 2 | native/index.node 3 | native/artifacts.json 4 | **/*~ 5 | **/node_modules 6 | **/.DS_Store 7 | -------------------------------------------------------------------------------- /ledb-node/.npmignore: -------------------------------------------------------------------------------- 1 | test_db/ 2 | *.tgz 3 | native/target 4 | -------------------------------------------------------------------------------- /ledb-node/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2018 K. 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all 11 | copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | SOFTWARE. 20 | -------------------------------------------------------------------------------- /ledb-node/README.md: -------------------------------------------------------------------------------- 1 | # LEDB interface for NodeJS 2 | 3 | [![License: MIT](https://img.shields.io/badge/License-MIT-brightgreen.svg)](https://opensource.org/licenses/MIT) 4 | [![npm version](https://badge.fury.io/js/ledb.svg)](https://badge.fury.io/js/ledb) 5 | [![npm downloads](https://img.shields.io/npm/dm/ledb.svg)](https://www.npmjs.com/package/ledb) 6 | [![Travis-CI Build Status](https://travis-ci.org/katyo/ledb.svg?branch=master)](https://travis-ci.org/katyo/ledb) 7 | [![Appveyor Build status](https://ci.appveyor.com/api/projects/status/1wrmhivii22emfxg)](https://ci.appveyor.com/project/katyo/ledb) 8 | 9 | The **LEDB** is an attempt to implement simple but efficient, lightweight but powerful document storage. 10 | 11 | The abbreviation *LEDB* may be treated as an Lightweight Embedded DB, also Low End DB, also Literium Engine DB, also LitE DB, and so on. 12 | 13 | ## Links 14 | 15 | * [ledb NodeJS package on npmjs.com](https://www.npmjs.com/package/ledb) 16 | * [ledb Rust Crate on crates.io](https://crates.io/crates/ledb) 17 | * [ledb Rust API Docs on docs.rs](https://docs.rs/ledb) 18 | 19 | ## Features 20 | 21 | * Processing JSON documents 22 | * Identifying documents using auto-incrementing integer primary keys. 23 | * Indexing fields of documents using unique or duplicated indexes. 24 | * Searching and ordering documents using indexed fields or primary key. 25 | * Selecting documents using complex filters with fields comparing and logical operations. 26 | * Updating documents using rich set of modifiers. 27 | * Storing documents into independent storages so called collections. 28 | * Flexible JSON query filters similar to a [MongoDB](https://en.wikipedia.org/wiki/MongoDB). 29 | * The [LMDB](https://en.wikipedia.org/wiki/Lightning_Memory-Mapped_Database) as backend for document storage and indexing engine. 30 | 31 | ## Installation 32 | 33 | Until pre-compiled binaries is missing you need [Rust](https://www.rust-lang.org/) build environment for building native module. 34 | 35 | Use latest stable Rust compiler. You can install it using [rustup](https://rustup.rs/) or packages in your system. 36 | 37 | ## Usage example 38 | 39 | ```typescript 40 | import { Storage } from 'ledb'; 41 | 42 | // Open storage 43 | const storage = new Storage("test_db/storage"); 44 | // It allows open storage with same path multiple times 45 | 46 | // Get storage info 47 | console.log("Storage info:", storage.get_info()); 48 | console.log("Storage stats:", storage.get_stats()); 49 | 50 | // Get collection handle 51 | const posts = storage.collection("post"); 52 | 53 | // Insert document 54 | let doc_id = posts.insert({title: "Foo", tag: ["Bar", "Baz"], timestamp: 1234567890); 55 | 56 | // Get document by id 57 | let doc = posts.get(doc_id); 58 | console.log("Inserted document: ", doc); 59 | 60 | // Put new version of document 61 | posts.put(doc); 62 | 63 | // Delete document by id 64 | posts.delete(doc_id); 65 | 66 | // Ensure indexes 67 | posts.ensure_index("title", "unique", "string") 68 | posts.ensure_index("tag", "index", "string") 69 | 70 | // Get indexes 71 | console.log("Indexes of post:", posts.get_indexes()) 72 | 73 | // Find all documents 74 | let docs = posts.find(null); 75 | 76 | // Find all documents with descending ordering 77 | let docs = posts.find(null, "$desc"); 78 | 79 | // Find all documents with ascending ordering using field 80 | let docs = posts.find(null, { timestamp: "$asc" }); 81 | 82 | // Find documents using filter 83 | let docs = posts.find({ title: { $eq:"Foo" } }); 84 | let docs = posts.find({ $not: { title: { $eq: "Foo" } } }); 85 | let docs = posts.find({ $and: [ { timestamp: { $gt: 123456789 } } , 86 | { tag: { $eq: "Bar" } } ] }, 87 | { timestamp: "$desc" }); 88 | let docs = posts.find({ $or: [ { title: { $eq: "Foo" } } , 89 | { title: { $eq: "Bar" } } ] }); 90 | 91 | // Number of found documents 92 | console.log("Found docs:", docs.count()) 93 | 94 | // Get documents one by one 95 | for (let doc; doc = docs.next(); ) { 96 | console.log("Found doc:", doc); 97 | } 98 | 99 | // Skip N documents 100 | docs.skip(3); 101 | 102 | // Take N documents only 103 | docs.take(5); 104 | 105 | // Get all documents as an array 106 | console.log("Found documents:", docs.collect()); 107 | 108 | // Update all documents 109 | posts.update(null, { timestamp: { $set: 0 } }); 110 | 111 | // Update documents using filter 112 | posts.update({ timestamp: { $le: 123456789 } }, { timestamp: { $set: 0 } }); 113 | 114 | // Remove all documents 115 | posts.remove(null); 116 | 117 | // Remove documents using filter 118 | posts.remove({ timestamp: { $le: 123456789 } }); 119 | ``` 120 | 121 | See also [ledb.d.ts](https://github.com/katyo/ledb/blob/master/ledb-node/index.d.ts). 122 | -------------------------------------------------------------------------------- /ledb-node/index.d.ts: -------------------------------------------------------------------------------- 1 | export type Primary = number; 2 | 3 | export interface GenericDocument {} 4 | 5 | export type Document = { 6 | $: Primary; 7 | } & T; 8 | 9 | export interface Documents { 10 | skip(num: number): Documents; 11 | take(num: number): Documents; 12 | next(): Document | void; 13 | end(): boolean; 14 | collect(): Document[]; 15 | count(): number; 16 | } 17 | 18 | export type KeyType 19 | = 'int' 20 | | 'float' 21 | | 'bool' 22 | | 'string' 23 | | 'binary' 24 | ; 25 | 26 | export type KeyData = number | string | boolean | ArrayBufferLike; 27 | 28 | export type Value = any; 29 | 30 | export type IndexKind = 'index' | 'unique'; 31 | 32 | export interface KeyField { 33 | // field path 34 | path: string, 35 | // key type 36 | key: KeyType, 37 | // index kind 38 | kind: IndexKind, 39 | } 40 | 41 | export type KeyFields = KeyField[]; 42 | 43 | export type Filter 44 | = FilterCond 45 | | { [field: string]: FilterComp } 46 | | FilterNone 47 | ; 48 | 49 | export type FilterCond 50 | = FilterAnd 51 | | FilterOr 52 | | FilterNot 53 | ; 54 | 55 | export interface FilterAnd { $and: Filter[] } 56 | export interface FilterOr { $or: Filter[] } 57 | export interface FilterNot { $not: Filter } 58 | 59 | export type FilterComp 60 | = FilterEq 61 | | FilterIn 62 | | FilterLt 63 | | FilterLe 64 | | FilterGt 65 | | FilterGe 66 | | FilterBw 67 | | FilterHas 68 | ; 69 | 70 | export interface FilterEq { $eq: KeyData } 71 | export interface FilterIn { $in: KeyData[] } 72 | export interface FilterLt { $lt: KeyData } 73 | export interface FilterLe { $le: KeyData } 74 | export interface FilterGt { $gt: KeyData } 75 | export interface FilterGe { $ge: KeyData } 76 | export interface FilterBw { $in: [KeyData, boolean, KeyData, boolean] } 77 | 78 | export type FilterHas = '$has'; 79 | 80 | export type FilterNone = null; 81 | 82 | export type Order 83 | = OrderByPrimary 84 | | OrderByField; 85 | 86 | export type OrderByPrimary = OrderKind; 87 | export type OrderByField = [string, OrderKind]; 88 | 89 | export type OrderKind = '$asc' | '$desc'; 90 | 91 | export type Modify = [string, Action][]; 92 | 93 | export type Action 94 | = ActionSet 95 | | ActionDelete 96 | | ActionAdd 97 | | ActionSub 98 | | ActionMul 99 | | ActionDiv 100 | | ActionToggle 101 | | ActionReplace 102 | | ActionMerge 103 | ; 104 | 105 | export interface ActionSet { $set: Value } 106 | export type ActionDelete = '$delete'; 107 | 108 | export interface ActionAdd { $add: Value } 109 | export interface ActionSub { $sub: Value } 110 | export interface ActionMul { $mul: Value } 111 | export interface ActionDiv { $div: Value } 112 | 113 | export type ActionToggle = '$toggle'; 114 | 115 | export interface ActionReplace { $replace: [string, string] } 116 | export interface ActionSplice { $splice: [number, number, ...Value[]] } 117 | export interface ActionMerge { $merge: Value } 118 | 119 | // Storage info 120 | export interface Info { 121 | map_size: number, 122 | last_page: number, 123 | last_transaction: number, 124 | max_readers: number, 125 | num_readers: number, 126 | } 127 | 128 | // Storage stats 129 | export interface Stats { 130 | page_size: number, 131 | btree_depth: number, 132 | branch_pages: number, 133 | leaf_pages: number, 134 | overflow_pages: number, 135 | data_entries: number, 136 | } 137 | 138 | // Storage options 139 | export interface Options { 140 | // options 141 | map_size?: number, 142 | max_readers?: number, 143 | max_dbs?: number, 144 | // flags 145 | map_async?: boolean, 146 | no_lock?: boolean, 147 | no_mem_init?: boolean, 148 | no_meta_sync?: boolean, 149 | no_read_ahead?: boolean, 150 | no_sub_dir?: boolean, 151 | no_sync?: boolean, 152 | no_tls?: boolean, 153 | read_only?: boolean, 154 | write_map?: boolean, 155 | } 156 | 157 | // Storage handle interface 158 | export class Storage { 159 | constructor(path: string, opts?: Options); 160 | 161 | get_info(): Info; 162 | get_stats(): Stats; 163 | 164 | has_collection(name: string): boolean; 165 | collection(name: string): Collection; 166 | drop_collection(name: string): boolean; 167 | get_collections(): string[]; 168 | } 169 | 170 | // Collection handle interface 171 | export class Collection { 172 | constructor(storage: Storage, name: string); 173 | 174 | insert(doc: T): Primary; 175 | find(filter: Filter, order?: Order): Documents; 176 | update(filter: Filter, modify: Modify): number; 177 | remove(filter: Filter): number; 178 | 179 | dump(): Documents; 180 | load(docs: Documents): number; 181 | 182 | purge(): void; 183 | 184 | has(id: Primary): boolean; 185 | get(id: Primary): T | null; 186 | put(doc: T): void; 187 | delete(id: Primary): boolean; 188 | 189 | get_indexes(): KeyFields; 190 | set_indexes(indexes: KeyFields): void; 191 | has_index(path: string): void; 192 | ensure_index(path: string, kind: IndexKind, key: KeyType): boolean; 193 | drop_index(path: string): boolean; 194 | } 195 | 196 | // Get openned databases 197 | export function openned(): string[]; 198 | -------------------------------------------------------------------------------- /ledb-node/native/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "ledb-node" 3 | version = "0.4.0" 4 | authors = ["Kayo "] 5 | license = "MIT" 6 | build = "build.rs" 7 | exclude = ["artifacts.json", "index.node"] 8 | edition = "2018" 9 | 10 | [lib] 11 | name = "ledb_node" 12 | crate-type = ["cdylib"] 13 | 14 | [build-dependencies] 15 | neon-build = "0.4" 16 | 17 | [dependencies] 18 | ledb = "0.4" 19 | neon = "0.4" 20 | neon-serde = "0.4" 21 | 22 | [profile.release] 23 | opt-level = 3 24 | codegen-units = 1 25 | lto = true 26 | 27 | [patch.crates-io] 28 | ledb = { path = "../../ledb" } 29 | #ledb = { git = "https://github.com/katyo/ledb" } 30 | -------------------------------------------------------------------------------- /ledb-node/native/build.rs: -------------------------------------------------------------------------------- 1 | extern crate neon_build; 2 | 3 | fn main() { 4 | neon_build::setup(); // must be called in build.rs 5 | 6 | // add project-specific build logic here... 7 | } 8 | -------------------------------------------------------------------------------- /ledb-node/native/src/collection.rs: -------------------------------------------------------------------------------- 1 | use std::u32; 2 | 3 | use neon::prelude::*; 4 | use neon_serde::{from_value, to_value}; 5 | 6 | use ledb::{Collection, Filter, Identifier, IndexKind, KeyType, Modify, Order, Primary, Value}; 7 | 8 | use super::{JsDocuments, JsStorage, refine}; 9 | 10 | declare_types! { 11 | /// A collection class 12 | pub class JsCollection for Collection { 13 | init(mut cx) { 14 | let storage = cx.argument::(0)?; 15 | let name = cx.argument::(1)?.value(); 16 | let collection = js_try!(cx, { 17 | let guard = cx.lock(); 18 | let storage = storage.borrow(&guard); 19 | storage.collection(&name) 20 | }); 21 | Ok(collection) 22 | } 23 | 24 | method insert(mut cx) { 25 | let raw = cx.argument(0)?; 26 | let doc: Value = from_value(&mut cx, raw)?; 27 | 28 | let this = cx.this(); 29 | 30 | let id = js_try!(cx, { 31 | let guard = cx.lock(); 32 | let collection = this.borrow(&guard); 33 | collection.insert(&doc) 34 | }); 35 | 36 | Ok(cx.number(id).upcast()) 37 | } 38 | 39 | method find(mut cx) { 40 | let filter: Option = if let Some(filter) = cx.argument_opt(0) { 41 | from_value(&mut cx, filter)? 42 | } else { 43 | None 44 | }; 45 | 46 | let order: Order = if let Some(order) = cx.argument_opt(1) { 47 | from_value(&mut cx, order)? 48 | } else { 49 | Order::default() 50 | }; 51 | 52 | let this = cx.this(); 53 | 54 | let iter = js_try!(cx, { 55 | let guard = cx.lock(); 56 | let collection = this.borrow(&guard); 57 | collection.find(filter, order) 58 | }); 59 | 60 | let mut docs = JsDocuments::new(&mut cx, vec![JsUndefined::new()])?; 61 | 62 | { 63 | let guard = cx.lock(); 64 | let mut docs = docs.borrow_mut(&guard); 65 | docs.0 = Some(Box::new(iter)); 66 | } 67 | 68 | Ok(docs.upcast()) 69 | } 70 | 71 | method update(mut cx) { 72 | let filter: Option = if let Some(filter) = cx.argument_opt(0) { 73 | from_value(&mut cx, filter)? 74 | } else { 75 | None 76 | }; 77 | 78 | let modify_raw = cx.argument(1)?; 79 | let modify: Modify = from_value(&mut cx, modify_raw)?; 80 | 81 | let this = cx.this(); 82 | 83 | let affected = js_try!(cx, { 84 | let guard = cx.lock(); 85 | let collection = this.borrow(&guard); 86 | collection.update(filter, modify) 87 | }); 88 | 89 | Ok(cx.number(affected as u32).upcast()) 90 | } 91 | 92 | method remove(mut cx) { 93 | let filter: Option = if let Some(filter) = cx.argument_opt(0) { 94 | from_value(&mut cx, filter)? 95 | } else { 96 | None 97 | }; 98 | 99 | let this = cx.this(); 100 | 101 | let affected = js_try!(cx, { 102 | let guard = cx.lock(); 103 | let collection = this.borrow(&guard); 104 | collection.remove(filter) 105 | }); 106 | 107 | Ok(cx.number(affected as u32).upcast()) 108 | } 109 | 110 | method dump(mut cx) { 111 | let this = cx.this(); 112 | 113 | let iter = js_try!(cx, { 114 | let guard = cx.lock(); 115 | let collection = this.borrow(&guard); 116 | collection.dump() 117 | }); 118 | 119 | let mut docs = JsDocuments::new(&mut cx, vec![JsUndefined::new()])?; 120 | 121 | { 122 | let guard = cx.lock(); 123 | let mut docs = docs.borrow_mut(&guard); 124 | docs.0 = Some(Box::new(iter)); 125 | } 126 | 127 | Ok(docs.upcast()) 128 | } 129 | 130 | //method load(mut cx) {} 131 | 132 | method purge(mut cx) { 133 | let this = cx.this(); 134 | 135 | js_try!(cx, { 136 | let guard = cx.lock(); 137 | let collection = this.borrow(&guard); 138 | collection.purge() 139 | }); 140 | 141 | Ok(cx.undefined().upcast()) 142 | } 143 | 144 | method has(mut cx) { 145 | let id = cx.argument::(0)?.value(); 146 | 147 | if id < 1.0 || id > f64::from(u32::MAX) { 148 | return cx.throw_range_error("Document id must be in range 1..N"); 149 | } 150 | 151 | let id: Primary = id as Primary; 152 | 153 | let this = cx.this(); 154 | 155 | let status = js_try!(cx, { 156 | let guard = cx.lock(); 157 | let collection = this.borrow(&guard); 158 | collection.has(id) 159 | }); 160 | 161 | Ok(cx.boolean(status).upcast()) 162 | } 163 | 164 | method get(mut cx) { 165 | let id = cx.argument::(0)?.value(); 166 | 167 | if id < 1.0 || id > f64::from(u32::MAX) { 168 | return cx.throw_range_error("Document id must be in range 1..N"); 169 | } 170 | 171 | let id: Primary = id as Primary; 172 | 173 | let this = cx.this(); 174 | 175 | let doc: Option = js_try!(cx, { 176 | let guard = cx.lock(); 177 | let collection = this.borrow(&guard); 178 | collection.get(id) 179 | }); 180 | 181 | let doc = doc.map(refine); 182 | 183 | Ok(js_try!(cx, to_value(&mut cx, &doc)).upcast()) 184 | } 185 | 186 | method put(mut cx) { 187 | let raw = cx.argument(0)?; 188 | let doc: Value = from_value(&mut cx, raw)?; 189 | 190 | let this = cx.this(); 191 | 192 | js_try!(cx, { 193 | let guard = cx.lock(); 194 | let collection = this.borrow(&guard); 195 | collection.put(&doc) 196 | }); 197 | 198 | Ok(cx.undefined().upcast()) 199 | } 200 | 201 | method delete(mut cx) { 202 | let id = cx.argument::(0)?.value(); 203 | 204 | if id < 1.0 || id > f64::from(u32::MAX) { 205 | return cx.throw_range_error("Document id must be in range 1..N"); 206 | } 207 | 208 | let id: Primary = id as Primary; 209 | 210 | let this = cx.this(); 211 | 212 | let status = js_try!(cx, { 213 | let guard = cx.lock(); 214 | let collection = this.borrow(&guard); 215 | collection.delete(id) 216 | }); 217 | 218 | Ok(cx.boolean(status).upcast()) 219 | } 220 | 221 | method get_indexes(mut cx) { 222 | let this = cx.this(); 223 | 224 | let indexes = js_try!(cx, { 225 | let guard = cx.lock(); 226 | let collection = this.borrow(&guard); 227 | collection.get_indexes() 228 | }); 229 | 230 | Ok(js_try!(cx, to_value(&mut cx, &indexes)).upcast()) 231 | } 232 | 233 | method set_indexes(mut cx) { 234 | let indexes = cx.argument(0)?; 235 | let indexes: Vec<(String, IndexKind, KeyType)> = from_value(&mut cx, indexes)?; 236 | let indexes: Vec<(Identifier, IndexKind, KeyType)> = indexes.into_iter().map(|(name, kind, key)| (name.into(), kind, key)).collect(); 237 | 238 | let this = cx.this(); 239 | 240 | js_try!(cx, { 241 | let guard = cx.lock(); 242 | let collection = this.borrow(&guard); 243 | collection.set_indexes(&indexes) 244 | }); 245 | 246 | Ok(cx.undefined().upcast()) 247 | } 248 | 249 | method has_index(mut cx) { 250 | let path = cx.argument::(0)?.value(); 251 | let this = cx.this(); 252 | let has = js_try!(cx, { 253 | let guard = cx.lock(); 254 | let collection = this.borrow(&guard); 255 | collection.has_index(&path) 256 | }); 257 | Ok(cx.boolean(has).upcast()) 258 | } 259 | 260 | method ensure_index(mut cx) { 261 | let path = cx.argument::(0)?.value(); 262 | let kind = cx.argument(1)?; 263 | let kind = from_value(&mut cx, kind)?; 264 | let key = cx.argument(2)?; 265 | let key = from_value(&mut cx, key)?; 266 | 267 | let this = cx.this(); 268 | 269 | let status = js_try!(cx, { 270 | let guard = cx.lock(); 271 | let collection = this.borrow(&guard); 272 | collection.ensure_index(path, kind, key) 273 | }); 274 | 275 | Ok(cx.boolean(status).upcast()) 276 | } 277 | 278 | method drop_index(mut cx) { 279 | let path = cx.argument::(0)?.value(); 280 | let this = cx.this(); 281 | let status = js_try!(cx, { 282 | let guard = cx.lock(); 283 | let collection = this.borrow(&guard); 284 | collection.drop_index(&path) 285 | }); 286 | Ok(cx.boolean(status).upcast()) 287 | } 288 | } 289 | } 290 | -------------------------------------------------------------------------------- /ledb-node/native/src/documents.rs: -------------------------------------------------------------------------------- 1 | use std::usize; 2 | 3 | use neon::prelude::*; 4 | use neon_serde::to_value; 5 | 6 | use ledb::{Result, Value}; 7 | 8 | use super::{refine}; 9 | 10 | pub struct Documents(pub(crate) Option>>>); 11 | 12 | static INVALID_RANGE: &str = "Argument not in range 0..N"; 13 | static INVALID_ITERATOR: &str = "Invalid documents iterator"; 14 | 15 | declare_types! { 16 | /// An iterable documents 17 | pub class JsDocuments for Documents { 18 | init(_cx) { 19 | Ok(Documents(None)) 20 | } 21 | 22 | method skip(mut cx) { 23 | let num = cx.argument::(0)?.value(); 24 | 25 | if num < 0.0 || num > usize::MAX as f64 { 26 | return cx.throw_range_error(INVALID_RANGE); 27 | } 28 | 29 | let num: usize = num as usize; 30 | 31 | let mut this = cx.this(); 32 | 33 | js_try!(cx, { 34 | let guard = cx.lock(); 35 | let mut this = this.borrow_mut(&guard); 36 | 37 | if let Some(iter) = this.0.take() { 38 | this.0 = Some(Box::new(iter.skip(num))); 39 | Ok(()) 40 | } else { 41 | Err(INVALID_ITERATOR) 42 | } 43 | }); 44 | 45 | Ok(this.upcast()) 46 | } 47 | 48 | method take(mut cx) { 49 | let num = cx.argument::(0)?.value(); 50 | 51 | if num < 0.0 || num > usize::MAX as f64 { 52 | return cx.throw_range_error(INVALID_RANGE); 53 | } 54 | 55 | let num: usize = num as usize; 56 | 57 | let mut this = cx.this(); 58 | 59 | js_try!(cx, { 60 | let guard = cx.lock(); 61 | let mut this = this.borrow_mut(&guard); 62 | 63 | if let Some(iter) = this.0.take() { 64 | this.0 = Some(Box::new(iter.take(num))); 65 | Ok(()) 66 | } else { 67 | Err(INVALID_ITERATOR) 68 | } 69 | }); 70 | 71 | Ok(this.upcast()) 72 | } 73 | 74 | method end(mut cx) { 75 | let this = cx.this(); 76 | 77 | let status = { 78 | let guard = cx.lock(); 79 | let this = this.borrow(&guard); 80 | this.0.is_none() 81 | }; 82 | 83 | Ok(cx.boolean(status).upcast()) 84 | } 85 | 86 | method next(mut cx) { 87 | let mut this = cx.this(); 88 | 89 | let doc: Option = js_try!(cx, { 90 | let guard = cx.lock(); 91 | let mut this = this.borrow_mut(&guard); 92 | 93 | let doc = if let Some(iter) = &mut this.0 { 94 | iter.next().map_or(Ok(None), |res| res.map(Some)) 95 | } else { 96 | Err(INVALID_ITERATOR.into()) 97 | }; 98 | 99 | match doc { 100 | Ok(None) => { 101 | // invalidate iterator 102 | this.0 = None; 103 | Ok(None) 104 | }, 105 | Ok(Some(doc)) => Ok(Some(doc)), 106 | Err(err) => Err(err), 107 | } 108 | }); 109 | 110 | let doc = doc.map(refine); 111 | 112 | Ok(js_try!(cx, to_value(&mut cx, &doc)).upcast()) 113 | } 114 | 115 | method collect(mut cx) { 116 | let mut this = cx.this(); 117 | 118 | let docs: Vec = js_try!(cx, { 119 | let guard = cx.lock(); 120 | let mut this = this.borrow_mut(&guard); 121 | 122 | if let Some(iter) = this.0.take() { 123 | iter.collect::>>() 124 | } else { 125 | Err(INVALID_ITERATOR.into()) 126 | } 127 | }); 128 | 129 | let docs = docs.into_iter().map(refine).collect::>(); 130 | 131 | Ok(js_try!(cx, to_value(&mut cx, &docs)).upcast()) 132 | } 133 | 134 | method count(mut cx) { 135 | let this = cx.this(); 136 | 137 | let count = js_try!(cx, { 138 | let guard = cx.lock(); 139 | let this = this.borrow(&guard); 140 | if let Some(iter) = &this.0 { 141 | Ok(iter.size_hint().0) 142 | } else { 143 | Err(INVALID_ITERATOR) 144 | } 145 | }); 146 | 147 | Ok(cx.number(count as f64).upcast()) 148 | } 149 | } 150 | } 151 | -------------------------------------------------------------------------------- /ledb-node/native/src/helper.rs: -------------------------------------------------------------------------------- 1 | macro_rules! js_try { 2 | ($ctx:expr, $res:expr) => { 3 | match $res { 4 | Ok(val) => val, 5 | Err(err) => return $ctx.throw_error(format!("LEDB {}", err)), 6 | } 7 | }; 8 | } 9 | -------------------------------------------------------------------------------- /ledb-node/native/src/lib.rs: -------------------------------------------------------------------------------- 1 | #[macro_use] 2 | mod helper; 3 | mod collection; 4 | mod documents; 5 | mod storage; 6 | mod refine; 7 | 8 | use collection::JsCollection; 9 | use documents::JsDocuments; 10 | use ledb::Storage; 11 | use neon::prelude::*; 12 | use neon_serde::to_value; 13 | use storage::JsStorage; 14 | use refine::refine; 15 | 16 | fn list_openned_storages(mut cx: FunctionContext) -> JsResult { 17 | let list = js_try!(cx, Storage::openned()); 18 | Ok(js_try!(cx, to_value(&mut cx, &list))) 19 | } 20 | 21 | register_module!(mut cx, { 22 | cx.export_function("openned", list_openned_storages)?; 23 | cx.export_class::("Storage")?; 24 | cx.export_class::("Collection")?; 25 | cx.export_class::("Documents")?; 26 | Ok(()) 27 | }); 28 | -------------------------------------------------------------------------------- /ledb-node/native/src/refine.rs: -------------------------------------------------------------------------------- 1 | use ledb::{Value}; 2 | 3 | pub fn refine(value: Value) -> Value { 4 | use self::Value::*; 5 | 6 | match value { 7 | Integer(n) => Float(n as f64), 8 | Array(v) => Array(v.into_iter().map(refine).collect()), 9 | Map(h) => Map(h.into_iter().map(|(k, v)| (refine(k), refine(v))).collect()), 10 | _ => value, 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /ledb-node/native/src/storage.rs: -------------------------------------------------------------------------------- 1 | use neon::prelude::*; 2 | use neon_serde::{from_value, to_value}; 3 | 4 | use ledb::{Options, Storage}; 5 | 6 | use super::JsCollection; 7 | 8 | declare_types! { 9 | /// A storage class 10 | pub class JsStorage for Storage { 11 | init(mut cx) { 12 | let path = cx.argument::(0)?.value(); 13 | let opts = if let Some(opts) = cx.argument_opt(1) { 14 | from_value(&mut cx, opts)? 15 | } else { 16 | Options::default() 17 | }; 18 | Ok(js_try!(cx, Storage::new(&path, opts))) 19 | } 20 | 21 | method get_stats(mut cx) { 22 | let this = cx.this(); 23 | let stats = js_try!(cx, { 24 | let guard = cx.lock(); 25 | let storage = this.borrow(&guard); 26 | storage.get_stats() 27 | }); 28 | Ok(to_value(&mut cx, &stats)?) 29 | } 30 | 31 | method get_info(mut cx) { 32 | let this = cx.this(); 33 | let stats = js_try!(cx, { 34 | let guard = cx.lock(); 35 | let storage = this.borrow(&guard); 36 | storage.get_info() 37 | }); 38 | Ok(to_value(&mut cx, &stats)?) 39 | } 40 | 41 | method has_collection(mut cx) { 42 | let name = cx.argument::(0)?.value(); 43 | let this = cx.this(); 44 | let has = js_try!(cx, { 45 | let guard = cx.lock(); 46 | let storage = this.borrow(&guard); 47 | storage.has_collection(&name) 48 | }); 49 | Ok(cx.boolean(has).upcast()) 50 | } 51 | 52 | method collection(mut cx) { 53 | let name = cx.argument::(0)?; 54 | let this = cx.this(); 55 | Ok(JsCollection::new(&mut cx, vec![this.upcast::(), name.upcast::()])?.upcast()) 56 | } 57 | 58 | method drop_collection(mut cx) { 59 | let name = cx.argument::(0)?.value(); 60 | let this = cx.this(); 61 | let has = js_try!(cx, { 62 | let guard = cx.lock(); 63 | let storage = this.borrow(&guard); 64 | storage.drop_collection(&name) 65 | }); 66 | Ok(cx.boolean(has).upcast()) 67 | } 68 | 69 | method get_collections(mut cx) { 70 | let this = cx.this(); 71 | let list = js_try!(cx, { 72 | let guard = cx.lock(); 73 | let storage = this.borrow(&guard); 74 | storage.get_collections() 75 | }); 76 | Ok(js_try!(cx, to_value(&mut cx, &list))) 77 | } 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /ledb-node/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ledb", 3 | "version": "0.4.0", 4 | "description": "LEDB interface for NodeJS", 5 | "main": "native/index.node", 6 | "typings": "index.d.ts", 7 | "repository": { 8 | "type": "git", 9 | "url": "https://github.com/katyo/ledb" 10 | }, 11 | "author": "Kayo ", 12 | "license": "MIT", 13 | "dependencies": { 14 | "neon-cli": "^0.4" 15 | }, 16 | "scripts": { 17 | "install": "neon build --release", 18 | "test": "mocha -r ts-node/register test/index.ts -R spec" 19 | }, 20 | "devDependencies": { 21 | "@types/fs-extra": "^9", 22 | "@types/mocha": "^7", 23 | "@types/node": "^10", 24 | "fs-extra": "^9", 25 | "mocha": "^7", 26 | "ts-node": "^8", 27 | "typescript": "^3" 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /ledb-node/test/index.ts: -------------------------------------------------------------------------------- 1 | import { deepStrictEqual as dse } from 'assert'; 2 | import { removeSync } from 'fs-extra'; 3 | import { Storage, Collection, KeyFields } from '../'; 4 | 5 | removeSync("test_db"); 6 | 7 | describe('storage', () => { 8 | const storage = new Storage("test_db/storage"); 9 | 10 | it('get_info', () => { 11 | const info = storage.get_info(); 12 | 13 | dse(typeof info, "object"); 14 | dse(typeof info.map_size, "number"); 15 | }); 16 | 17 | it('get_stats', () => { 18 | const stats = storage.get_stats(); 19 | 20 | dse(typeof stats, "object"); 21 | dse(typeof stats.page_size, "number"); 22 | }); 23 | }); 24 | 25 | describe('collection', () => { 26 | const storage = new Storage("test_db/collection"); 27 | 28 | it('create', () => { 29 | dse(storage.has_collection("post"), false); 30 | dse(storage.get_collections(), []); 31 | 32 | const coll = storage.collection("post"); 33 | 34 | dse(coll.constructor, Collection); 35 | dse(storage.has_collection("post"), true); 36 | dse(storage.get_collections(), ["post"]); 37 | }); 38 | 39 | it('insert', () => { 40 | const coll = storage.collection("post"); 41 | 42 | dse(coll.has(1), false); 43 | dse(coll.has(2), false); 44 | dse(coll.get(1), null); 45 | dse(coll.get(2), null); 46 | 47 | const doc1 = { title: "Foo", tag: ["Bar", "Baz"], timestamp: 1234567890 }; 48 | const id1 = coll.insert(doc1); 49 | 50 | dse(id1, 1); 51 | dse(coll.has(1), true); 52 | dse(coll.has(2), false); 53 | dse(coll.get(1), { $: 1, ...doc1 }); 54 | dse(coll.get(2), null); 55 | 56 | const doc2 = { title: "Bar", tag: ["Foo", "Baz"], timestamp: 1234567899 }; 57 | const id2 = coll.insert(doc2); 58 | 59 | dse(id2, 2); 60 | dse(coll.has(1), true); 61 | dse(coll.has(2), true); 62 | dse(coll.get(1), { $: 1, ...doc1 }); 63 | dse(coll.get(2), { $: 2, ...doc2 }); 64 | 65 | const doc3 = { title: "Baz", tag: ["Bar", "Foo"], timestamp: 1234567819 }; 66 | const id3 = coll.insert(doc3); 67 | 68 | dse(id3, 3); 69 | 70 | const doc4 = { title: "Act", tag: ["Foo", "Eff"], timestamp: 1234567819 }; 71 | const id4 = coll.insert(doc4); 72 | 73 | dse(id4, 4); 74 | }); 75 | 76 | it('ensure_index', () => { 77 | const coll = storage.collection("post"); 78 | 79 | dse(coll.has_index("title"), false); 80 | dse(coll.has_index("tag"), false); 81 | dse(coll.has_index("timestamp"), false); 82 | dse(coll.get_indexes(), []); 83 | 84 | dse(coll.ensure_index("title", "unique", "string"), true); 85 | 86 | dse(coll.has_index("title"), true); 87 | dse(coll.has_index("tag"), false); 88 | dse(coll.has_index("timestamp"), false); 89 | dse(coll.get_indexes(), [{ path: "title", kind: "unique", key: "string" }] as KeyFields); 90 | 91 | dse(coll.ensure_index("tag", "index", "string"), true); 92 | 93 | dse(coll.has_index("title"), true); 94 | dse(coll.has_index("tag"), true); 95 | dse(coll.has_index("timestamp"), false); 96 | dse(coll.get_indexes(), [{ path: "title", kind: "unique", key: "string" }, 97 | { path: "tag", kind: "index", key: "string" }] as KeyFields); 98 | 99 | dse(coll.ensure_index("timestamp", "index", "int"), true); 100 | 101 | dse(coll.has_index("title"), true); 102 | dse(coll.has_index("tag"), true); 103 | dse(coll.has_index("timestamp"), true); 104 | dse(coll.get_indexes(), [{ path: "title", kind: "unique", key: "string" }, 105 | { path: "tag", kind: "index", key: "string" }, 106 | { path: "timestamp", kind: "index", key: "int" }] as KeyFields); 107 | }); 108 | 109 | it('find', () => { 110 | const coll = storage.collection("post"); 111 | 112 | dse(coll.find(null).count(), 4); 113 | dse(coll.find({ title: { $eq: "Foo" } }).count(), 1); 114 | dse(coll.find({ tag: { $eq: "Baz" } }).count(), 2); 115 | dse(coll.find({ tag: { $eq: "Foo" } }).count(), 3); 116 | dse(coll.find({ $or: [{ title: { $eq: "Foo" } }, { title: { $eq: "Bar" } }] }).count(), 2); 117 | dse(coll.find({ $not: { title: { $eq: "Foo" } } }).count(), 3); 118 | }); 119 | 120 | // TODO: more tests 121 | }); 122 | 123 | describe('documents', () => { 124 | const storage = new Storage("test_db/collection"); 125 | 126 | it('next', () => { 127 | const coll = storage.collection("post"); 128 | let docs = coll.find(null); 129 | 130 | dse(docs.count(), 4); 131 | dse(docs.next(), { $: 1, title: "Foo", tag: ["Bar", "Baz"], timestamp: 1234567890 }); 132 | dse(docs.next(), { $: 2, title: "Bar", tag: ["Foo", "Baz"], timestamp: 1234567899 }); 133 | dse(docs.next(), { $: 3, title: "Baz", tag: ["Bar", "Foo"], timestamp: 1234567819 }); 134 | dse(docs.next(), { $: 4, title: "Act", tag: ["Foo", "Eff"], timestamp: 1234567819 }); 135 | dse(docs.next(), null); 136 | }); 137 | 138 | it('skip', () => { 139 | const coll = storage.collection("post"); 140 | 141 | dse(coll.find(null).skip(0).count(), 4); 142 | dse(coll.find(null).skip(1).count(), 3); 143 | dse(coll.find(null).skip(2).count(), 2); 144 | dse(coll.find(null).skip(3).count(), 1); 145 | dse(coll.find(null).skip(4).count(), 0); 146 | dse(coll.find(null).skip(5).count(), 0); 147 | }); 148 | 149 | it('take', () => { 150 | const coll = storage.collection("post"); 151 | 152 | dse(coll.find(null).take(0).count(), 0); 153 | dse(coll.find(null).take(1).count(), 1); 154 | dse(coll.find(null).take(2).count(), 2); 155 | dse(coll.find(null).take(3).count(), 3); 156 | dse(coll.find(null).take(4).count(), 4); 157 | dse(coll.find(null).take(5).count(), 4); 158 | }); 159 | 160 | it('skip take', () => { 161 | const coll = storage.collection("post"); 162 | 163 | dse(coll.find(null).skip(1).take(2).count(), 2); 164 | dse(coll.find(null).skip(2).take(3).count(), 2); 165 | dse(coll.find(null).skip(3).take(1).count(), 1); 166 | dse(coll.find(null).skip(3).take(2).count(), 1); 167 | dse(coll.find(null).take(3).skip(1).count(), 2); 168 | dse(coll.find(null).take(2).skip(1).count(), 1); 169 | }); 170 | }); 171 | -------------------------------------------------------------------------------- /ledb-node/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "module": "commonjs", 4 | "moduleResolution": "node" 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /ledb-types/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "ledb-types" 3 | version = "0.4.0" 4 | authors = ["Kayo "] 5 | license = "MIT" 6 | readme = "README.md" 7 | repository = "https://github.com/katyo/ledb" 8 | homepage = "https://github.com/katyo/ledb/tree/master/ledb-types" 9 | keywords = ["storage", "document", "json", "cbor"] 10 | categories = ["database"] 11 | description = "Basic types for storable documents" 12 | edition = "2018" 13 | 14 | [badges] 15 | travis-ci = { repository = "katyo/ledb" } 16 | appveyor = { repository = "katyo/ledb" } 17 | 18 | [dependencies] 19 | serde = { version = "^1", features = ["derive"] } 20 | serde_json = { version = "^1", optional = true } 21 | serde_cbor = { version = "^0.11", optional = true } 22 | bytes = { version = "^0.5", optional = true } 23 | 24 | [features] 25 | default = [] 26 | json = ["serde_json"] 27 | cbor = ["serde_cbor"] 28 | 29 | [package.metadata.docs.rs] 30 | features = ["json", "cbor"] 31 | -------------------------------------------------------------------------------- /ledb-types/README.md: -------------------------------------------------------------------------------- 1 | # Types for defining storable documents 2 | 3 | [![License: MIT](https://img.shields.io/badge/License-MIT-brightgreen.svg)](https://opensource.org/licenses/MIT) 4 | [![Travis-CI Build Status](https://travis-ci.org/katyo/ledb.svg?branch=master)](https://travis-ci.org/katyo/ledb) 5 | [![Appveyor Build status](https://ci.appveyor.com/api/projects/status/1wrmhivii22emfxg)](https://ci.appveyor.com/project/katyo/ledb) 6 | [![Crates.io Package](https://img.shields.io/crates/v/ledb.svg?style=popout)](https://crates.io/crates/ledb) 7 | [![Docs.rs API Documentation](https://docs.rs/ledb/badge.svg)](https://docs.rs/ledb) 8 | 9 | This types and traits widely used for documents which can be managed using persistent storages like *LEDB*. 10 | 11 | The **LEDB** is an attempt to implement simple but efficient, lightweight but powerful document storage. 12 | 13 | The abbreviation *LEDB* may be treated as an Lightweight Embedded DB, also Low End DB, also Literium Engine DB, also LitE DB, and so on. 14 | 15 | ## Links 16 | 17 | * [ledb-types Crate on crates.io](https://crates.io/crates/ledb-types) 18 | * [ledb-types API Docs on docs.rs](https://docs.rs/ledb-types) 19 | * [ledb-derive Crate on crates.io](https://crates.io/crates/ledb-derive) 20 | * [ledb-derive API Docs on docs.rs](https://docs.rs/ledb-derive) 21 | * [ledb Crate on crates.io](https://crates.io/crates/ledb) 22 | * [ledb API Docs on docs.rs](https://docs.rs/ledb) 23 | 24 | ## Usage example 25 | 26 | ```rust 27 | use serde::{Serialize, Deserialize}; 28 | use ledb_types::{Document, Identifier, Primary, KeyFields, KeyType, IndexKind}; 29 | 30 | #[derive(Serialize, Deserialize)] 31 | struct MyDoc { 32 | // define optional primary key field 33 | id: Option, 34 | // define other fields 35 | title: String, 36 | tag: Vec, 37 | timestamp: u32, 38 | // define nested document 39 | meta: MetaData, 40 | } 41 | 42 | #[derive(Serialize, Deserialize)] 43 | struct MetaData { 44 | // define index field 45 | keywords: Vec, 46 | // define other fields 47 | description: String, 48 | } 49 | 50 | impl Document for MyDoc { 51 | // declare primary key field name 52 | fn primary_field() -> Identifier { 53 | "id".into() 54 | } 55 | 56 | // declare other key fields 57 | fn key_fields() -> KeyFields { 58 | KeyFields::new() 59 | // add key fields of document 60 | .with_field(("title", KeyType::String, IndexKind::Unique)) 61 | .with_field(("tag", KeyType::String, IndexKind::Index)) 62 | .with_field(("timestamp", KeyType::Int, IndexKind::Unique)) 63 | // add key fields from nested document 64 | .with_fields(MetaData::key_fields().with_parent("meta")) 65 | } 66 | } 67 | 68 | impl Document for MetaData { 69 | // declare key fields for index 70 | fn key_fields() -> KeyFields { 71 | KeyFields::new() 72 | // add key fields of document 73 | .with_field(("keywords", KeyType::String, IndexKind::Index)) 74 | } 75 | } 76 | ``` 77 | -------------------------------------------------------------------------------- /ledb-types/src/document.rs: -------------------------------------------------------------------------------- 1 | use super::{Identifier, KeyFields}; 2 | use std::{ 3 | borrow::Cow, 4 | hash::BuildHasher, 5 | collections::{BTreeMap, BTreeSet, HashMap, HashSet, VecDeque}, 6 | rc::{Rc, Weak as RcWeak}, 7 | sync::{Arc, Mutex, RwLock, Weak as ArcWeak}, 8 | }; 9 | 10 | /// Primary key (document identifier) 11 | pub type Primary = u32; 12 | 13 | /// Identified document representation 14 | pub trait Document { 15 | /// Get the name of primary field 16 | fn primary_field() -> Identifier { 17 | "$".into() 18 | } 19 | 20 | /// Get other key fields (indexes) 21 | fn key_fields() -> KeyFields { 22 | KeyFields::new() 23 | } 24 | } 25 | 26 | impl<'a, T: Document> Document for &'a T { 27 | fn primary_field() -> Identifier { 28 | T::primary_field() 29 | } 30 | 31 | fn key_fields() -> KeyFields { 32 | T::key_fields() 33 | } 34 | } 35 | 36 | impl<'a, T: Document> Document for &'a mut T { 37 | fn primary_field() -> Identifier { 38 | T::primary_field() 39 | } 40 | 41 | fn key_fields() -> KeyFields { 42 | T::key_fields() 43 | } 44 | } 45 | 46 | impl<'a, T: Document> Document for &'a [T] { 47 | fn primary_field() -> Identifier { 48 | T::primary_field() 49 | } 50 | 51 | fn key_fields() -> KeyFields { 52 | T::key_fields() 53 | } 54 | } 55 | 56 | impl<'a, T: Document> Document for &'a mut [T] { 57 | fn primary_field() -> Identifier { 58 | T::primary_field() 59 | } 60 | 61 | fn key_fields() -> KeyFields { 62 | T::key_fields() 63 | } 64 | } 65 | 66 | impl Document for [T] { 67 | fn primary_field() -> Identifier { 68 | T::primary_field() 69 | } 70 | 71 | fn key_fields() -> KeyFields { 72 | T::key_fields() 73 | } 74 | } 75 | 76 | impl Document for Vec { 77 | fn primary_field() -> Identifier { 78 | T::primary_field() 79 | } 80 | 81 | fn key_fields() -> KeyFields { 82 | T::key_fields() 83 | } 84 | } 85 | 86 | impl Document for VecDeque { 87 | fn primary_field() -> Identifier { 88 | T::primary_field() 89 | } 90 | 91 | fn key_fields() -> KeyFields { 92 | T::key_fields() 93 | } 94 | } 95 | 96 | impl Document for HashSet { 97 | fn primary_field() -> Identifier { 98 | T::primary_field() 99 | } 100 | 101 | fn key_fields() -> KeyFields { 102 | T::key_fields() 103 | } 104 | } 105 | 106 | impl Document for HashMap { 107 | fn primary_field() -> Identifier { 108 | T::primary_field() 109 | } 110 | 111 | fn key_fields() -> KeyFields { 112 | T::key_fields().with_parent("*") 113 | } 114 | } 115 | 116 | impl Document for BTreeSet { 117 | fn primary_field() -> Identifier { 118 | T::primary_field() 119 | } 120 | 121 | fn key_fields() -> KeyFields { 122 | T::key_fields() 123 | } 124 | } 125 | 126 | impl Document for BTreeMap { 127 | fn primary_field() -> Identifier { 128 | T::primary_field() 129 | } 130 | 131 | fn key_fields() -> KeyFields { 132 | T::key_fields().with_parent("*") 133 | } 134 | } 135 | 136 | impl<'a, T: Document> Document for Box { 137 | fn primary_field() -> Identifier { 138 | T::primary_field() 139 | } 140 | 141 | fn key_fields() -> KeyFields { 142 | T::key_fields() 143 | } 144 | } 145 | 146 | impl<'a, T: Document> Document for Rc { 147 | fn primary_field() -> Identifier { 148 | T::primary_field() 149 | } 150 | 151 | fn key_fields() -> KeyFields { 152 | T::key_fields() 153 | } 154 | } 155 | 156 | impl<'a, T: Document> Document for RcWeak { 157 | fn primary_field() -> Identifier { 158 | T::primary_field() 159 | } 160 | 161 | fn key_fields() -> KeyFields { 162 | T::key_fields() 163 | } 164 | } 165 | 166 | impl<'a, T: Document> Document for Arc { 167 | fn primary_field() -> Identifier { 168 | T::primary_field() 169 | } 170 | 171 | fn key_fields() -> KeyFields { 172 | T::key_fields() 173 | } 174 | } 175 | 176 | impl<'a, T: Document> Document for ArcWeak { 177 | fn primary_field() -> Identifier { 178 | T::primary_field() 179 | } 180 | 181 | fn key_fields() -> KeyFields { 182 | T::key_fields() 183 | } 184 | } 185 | 186 | impl<'a, T: Document> Document for Mutex { 187 | fn primary_field() -> Identifier { 188 | T::primary_field() 189 | } 190 | 191 | fn key_fields() -> KeyFields { 192 | T::key_fields() 193 | } 194 | } 195 | 196 | impl<'a, T: Document> Document for RwLock { 197 | fn primary_field() -> Identifier { 198 | T::primary_field() 199 | } 200 | 201 | fn key_fields() -> KeyFields { 202 | T::key_fields() 203 | } 204 | } 205 | 206 | impl<'a, T: Document + Clone> Document for Cow<'a, T> { 207 | fn primary_field() -> Identifier { 208 | T::primary_field() 209 | } 210 | 211 | fn key_fields() -> KeyFields { 212 | T::key_fields() 213 | } 214 | } 215 | 216 | impl Document for Option { 217 | fn primary_field() -> Identifier { 218 | T::primary_field() 219 | } 220 | 221 | fn key_fields() -> KeyFields { 222 | T::key_fields() 223 | } 224 | } 225 | 226 | #[cfg(feature = "json")] 227 | impl Document for serde_json::Value {} 228 | 229 | #[cfg(feature = "cbor")] 230 | impl Document for serde_cbor::Value {} 231 | -------------------------------------------------------------------------------- /ledb-types/src/identifier.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | borrow::Borrow, 3 | fmt::{Display, Formatter, Result as FmtResult}, 4 | hash::{Hash, Hasher}, 5 | ops::Deref, 6 | }; 7 | 8 | use serde::{Serialize, Deserialize}; 9 | 10 | /// Generic string indentifier 11 | /// 12 | /// This type is used as name for collections and fields 13 | #[derive(Debug, Clone, Serialize, Deserialize)] 14 | #[serde(untagged)] 15 | pub enum Identifier { 16 | Owned(String), 17 | Refer(&'static str), 18 | } 19 | 20 | impl Default for Identifier { 21 | fn default() -> Self { 22 | Identifier::Refer("") 23 | } 24 | } 25 | 26 | impl Display for Identifier { 27 | fn fmt(&self, f: &mut Formatter) -> FmtResult { 28 | f.write_str(&self) 29 | } 30 | } 31 | 32 | impl Eq for Identifier {} 33 | 34 | impl PartialEq for Identifier { 35 | fn eq(&self, other: &Self) -> bool { 36 | self.as_ref() == other.as_ref() 37 | } 38 | } 39 | 40 | impl Hash for Identifier { 41 | fn hash(&self, state: &mut H) { 42 | use self::Identifier::*; 43 | match self { 44 | Owned(s) => s.hash(state), 45 | Refer(s) => s.hash(state), 46 | } 47 | } 48 | } 49 | 50 | impl AsRef for Identifier { 51 | fn as_ref(&self) -> &str { 52 | use self::Identifier::*; 53 | match self { 54 | Owned(s) => &s, 55 | Refer(s) => s, 56 | } 57 | } 58 | } 59 | 60 | impl Borrow for Identifier { 61 | #[inline] 62 | fn borrow(&self) -> &str { 63 | self.as_ref() 64 | } 65 | } 66 | 67 | impl Deref for Identifier { 68 | type Target = str; 69 | 70 | fn deref(&self) -> &str { 71 | self.as_ref() 72 | } 73 | } 74 | 75 | impl From<&'static str> for Identifier { 76 | fn from(s: &'static str) -> Self { 77 | Identifier::Refer(s) 78 | } 79 | } 80 | 81 | impl From for Identifier { 82 | fn from(s: String) -> Self { 83 | Identifier::Owned(s) 84 | } 85 | } 86 | 87 | impl<'a> From<&'a String> for Identifier { 88 | fn from(s: &String) -> Self { 89 | Identifier::Owned(s.clone()) 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /ledb-types/src/index.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | borrow::Cow, 3 | hash::BuildHasher, 4 | collections::{BTreeMap, BTreeSet, HashMap, HashSet, VecDeque}, 5 | iter::IntoIterator, 6 | ops::{Deref, DerefMut}, 7 | rc::{Rc, Weak as RcWeak}, 8 | sync::{Arc, Mutex, RwLock, Weak as ArcWeak}, 9 | vec::IntoIter as VecIntoIter, 10 | }; 11 | 12 | use serde::{Serialize, Deserialize}; 13 | 14 | #[cfg(feature = "bytes")] 15 | use bytes::{Bytes, BytesMut}; 16 | 17 | /// Indexed field definition 18 | #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] 19 | pub struct KeyField { 20 | pub path: String, 21 | #[serde(default)] 22 | pub key: KeyType, 23 | #[serde(default)] 24 | pub kind: IndexKind, 25 | } 26 | 27 | impl KeyField { 28 | /// Create key field from field name 29 | #[inline] 30 | pub fn new(path: S) -> Self { 31 | Self { 32 | path: path.to_string(), 33 | key: KeyType::default(), 34 | kind: IndexKind::default(), 35 | } 36 | } 37 | 38 | /// Add key type 39 | #[inline] 40 | pub fn with_type(mut self, key: KeyType) -> Self { 41 | self.key = key; 42 | self 43 | } 44 | 45 | /// Add index kind 46 | #[inline] 47 | pub fn with_kind(mut self, kind: IndexKind) -> Self { 48 | self.kind = kind; 49 | self 50 | } 51 | 52 | /// Set parent path 53 | /// 54 | /// This makes key field to be child for parent path 55 | #[inline] 56 | pub fn set_parent>(&mut self, parent: S) { 57 | self.path.insert_str(0, "."); 58 | self.path.insert_str(0, parent.as_ref()); 59 | } 60 | 61 | /// Add parent path 62 | /// 63 | /// This makes key field to be child for parent path 64 | #[inline] 65 | pub fn with_parent>(mut self, parent: S) -> Self { 66 | self.set_parent(parent); 67 | self 68 | } 69 | } 70 | 71 | impl From<(S,)> for KeyField { 72 | fn from((path,): (S,)) -> Self { 73 | Self::new(path) 74 | } 75 | } 76 | 77 | impl<'a, S: ToString> From<&'a (S,)> for KeyField { 78 | fn from((path,): &(S,)) -> Self { 79 | Self::new(path.to_string()) 80 | } 81 | } 82 | 83 | impl From<(S, KeyType)> for KeyField { 84 | fn from((path, key): (S, KeyType)) -> Self { 85 | Self::new(path).with_type(key) 86 | } 87 | } 88 | 89 | impl<'a, S: ToString> From<&'a (S, KeyType)> for KeyField { 90 | fn from((path, key): &(S, KeyType)) -> Self { 91 | Self::new(path.to_string()).with_type(*key) 92 | } 93 | } 94 | 95 | impl From<(S, IndexKind)> for KeyField { 96 | fn from((path, kind): (S, IndexKind)) -> Self { 97 | Self::new(path).with_kind(kind) 98 | } 99 | } 100 | 101 | impl<'a, S: ToString> From<&'a (S, IndexKind)> for KeyField { 102 | fn from((path, kind): &(S, IndexKind)) -> Self { 103 | Self::new(path.to_string()).with_kind(*kind) 104 | } 105 | } 106 | 107 | impl From<(S, KeyType, IndexKind)> for KeyField { 108 | fn from((path, key, kind): (S, KeyType, IndexKind)) -> Self { 109 | Self { 110 | path: path.to_string(), 111 | key, 112 | kind, 113 | } 114 | } 115 | } 116 | 117 | impl<'a, S: ToString> From<&'a (S, KeyType, IndexKind)> for KeyField { 118 | fn from((path, key, kind): &(S, KeyType, IndexKind)) -> Self { 119 | Self { 120 | path: path.to_string(), 121 | key: *key, 122 | kind: *kind, 123 | } 124 | } 125 | } 126 | 127 | impl From<(S, IndexKind, KeyType)> for KeyField { 128 | fn from((path, kind, key): (S, IndexKind, KeyType)) -> Self { 129 | Self { 130 | path: path.to_string(), 131 | key, 132 | kind, 133 | } 134 | } 135 | } 136 | 137 | impl<'a, S: ToString> From<&'a (S, IndexKind, KeyType)> for KeyField { 138 | fn from((path, kind, key): &(S, IndexKind, KeyType)) -> Self { 139 | Self { 140 | path: path.to_string(), 141 | key: *key, 142 | kind: *kind, 143 | } 144 | } 145 | } 146 | 147 | impl Into<(String, KeyType, IndexKind)> for KeyField { 148 | fn into(self) -> (String, KeyType, IndexKind) { 149 | let KeyField { path, key, kind } = self; 150 | (path, key, kind) 151 | } 152 | } 153 | 154 | impl Into<(String, IndexKind, KeyType)> for KeyField { 155 | fn into(self) -> (String, IndexKind, KeyType) { 156 | let KeyField { path, key, kind } = self; 157 | (path, kind, key) 158 | } 159 | } 160 | 161 | /// Indexed fields definition 162 | #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] 163 | pub struct KeyFields(Vec); 164 | 165 | impl Default for KeyFields { 166 | fn default() -> Self { 167 | KeyFields(Vec::default()) 168 | } 169 | } 170 | 171 | impl KeyFields { 172 | /// Create new key fields set 173 | #[inline] 174 | pub fn new() -> Self { 175 | KeyFields::default() 176 | } 177 | 178 | /// Add key field to set 179 | #[inline] 180 | pub fn with_field(mut self, field: T) -> Self 181 | where 182 | KeyField: From, 183 | { 184 | self.push(KeyField::from(field)); 185 | self 186 | } 187 | 188 | /// Add key fields to set 189 | #[inline] 190 | pub fn with_fields(mut self, mut fields: KeyFields) -> Self { 191 | self.append(&mut *fields); 192 | self 193 | } 194 | 195 | /// Set parent path 196 | /// 197 | /// This makes key fields in set to be children for parent path 198 | pub fn set_parent>(&mut self, parent: S) { 199 | for field in &mut self.0 { 200 | field.set_parent(&parent); 201 | } 202 | } 203 | 204 | /// Add parent path 205 | /// 206 | /// This makes key fields in set to be children for parent path 207 | pub fn with_parent>(mut self, parent: S) -> Self { 208 | self.set_parent(&parent); 209 | self 210 | } 211 | } 212 | 213 | /* 214 | impl From> for KeyFields { 215 | fn from(vec: Vec) -> Self { 216 | KeyFields(vec) 217 | } 218 | } 219 | 220 | impl<'a> From<&'a [KeyField]> for KeyFields { 221 | fn from(arr: &[KeyField]) -> Self { 222 | KeyFields(arr.into()) 223 | } 224 | } 225 | */ 226 | 227 | impl<'a, T> From<&'a [T]> for KeyFields 228 | where 229 | T: Clone, 230 | KeyField: From, 231 | { 232 | fn from(arr: &[T]) -> Self { 233 | KeyFields(arr.iter().cloned().map(KeyField::from).collect()) 234 | } 235 | } 236 | 237 | impl From> for KeyFields 238 | where 239 | KeyField: From, 240 | { 241 | fn from(vec: Vec) -> Self { 242 | KeyFields(vec.into_iter().map(KeyField::from).collect()) 243 | } 244 | } 245 | 246 | impl AsRef<[KeyField]> for KeyFields { 247 | fn as_ref(&self) -> &[KeyField] { 248 | self.0.as_ref() 249 | } 250 | } 251 | 252 | impl AsMut<[KeyField]> for KeyFields { 253 | fn as_mut(&mut self) -> &mut [KeyField] { 254 | self.0.as_mut() 255 | } 256 | } 257 | 258 | impl Deref for KeyFields { 259 | type Target = Vec; 260 | 261 | fn deref(&self) -> &Self::Target { 262 | &self.0 263 | } 264 | } 265 | 266 | impl DerefMut for KeyFields { 267 | fn deref_mut(&mut self) -> &mut Self::Target { 268 | &mut self.0 269 | } 270 | } 271 | 272 | impl IntoIterator for KeyFields { 273 | type Item = KeyField; 274 | type IntoIter = VecIntoIter; 275 | 276 | fn into_iter(self) -> Self::IntoIter { 277 | self.0.into_iter() 278 | } 279 | } 280 | 281 | /// The type of key 282 | #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] 283 | pub enum KeyType { 284 | #[serde(rename = "int")] 285 | Int, 286 | #[serde(rename = "float")] 287 | Float, 288 | #[serde(rename = "string")] 289 | String, 290 | #[serde(rename = "binary")] 291 | Binary, 292 | #[serde(rename = "bool")] 293 | Bool, 294 | } 295 | 296 | impl Default for KeyType { 297 | fn default() -> Self { 298 | KeyType::Binary 299 | } 300 | } 301 | 302 | /// The kind of index 303 | #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] 304 | pub enum IndexKind { 305 | /// Index which may contains duplicates 306 | #[serde(rename = "index")] 307 | Index, 308 | /// Index which contains unique keys only 309 | #[serde(rename = "unique")] 310 | Unique, 311 | } 312 | 313 | impl Default for IndexKind { 314 | fn default() -> Self { 315 | IndexKind::Index 316 | } 317 | } 318 | 319 | /// Field key type inference 320 | pub trait DocumentKeyType { 321 | /// Get type of field key by field type 322 | fn key_type() -> KeyType { 323 | KeyType::default() 324 | } 325 | } 326 | 327 | impl DocumentKeyType for bool { 328 | fn key_type() -> KeyType { 329 | KeyType::Bool 330 | } 331 | } 332 | 333 | impl DocumentKeyType for u8 { 334 | fn key_type() -> KeyType { 335 | KeyType::Int 336 | } 337 | } 338 | 339 | impl DocumentKeyType for i8 { 340 | fn key_type() -> KeyType { 341 | KeyType::Int 342 | } 343 | } 344 | 345 | impl DocumentKeyType for u16 { 346 | fn key_type() -> KeyType { 347 | KeyType::Int 348 | } 349 | } 350 | 351 | impl DocumentKeyType for i16 { 352 | fn key_type() -> KeyType { 353 | KeyType::Int 354 | } 355 | } 356 | 357 | impl DocumentKeyType for u32 { 358 | fn key_type() -> KeyType { 359 | KeyType::Int 360 | } 361 | } 362 | 363 | impl DocumentKeyType for i32 { 364 | fn key_type() -> KeyType { 365 | KeyType::Int 366 | } 367 | } 368 | 369 | impl DocumentKeyType for u64 { 370 | fn key_type() -> KeyType { 371 | KeyType::Int 372 | } 373 | } 374 | 375 | impl DocumentKeyType for i64 { 376 | fn key_type() -> KeyType { 377 | KeyType::Int 378 | } 379 | } 380 | 381 | impl DocumentKeyType for f32 { 382 | fn key_type() -> KeyType { 383 | KeyType::Float 384 | } 385 | } 386 | 387 | impl DocumentKeyType for f64 { 388 | fn key_type() -> KeyType { 389 | KeyType::Float 390 | } 391 | } 392 | 393 | impl DocumentKeyType for String { 394 | fn key_type() -> KeyType { 395 | KeyType::String 396 | } 397 | } 398 | 399 | #[cfg(feature = "bytes")] 400 | impl DocumentKeyType for Bytes { 401 | fn key_type() -> KeyType { 402 | KeyType::Binary 403 | } 404 | } 405 | 406 | #[cfg(feature = "bytes")] 407 | impl DocumentKeyType for BytesMut { 408 | fn key_type() -> KeyType { 409 | KeyType::Binary 410 | } 411 | } 412 | 413 | impl<'a, T: DocumentKeyType> DocumentKeyType for &'a T { 414 | fn key_type() -> KeyType { 415 | T::key_type() 416 | } 417 | } 418 | 419 | impl<'a, T: DocumentKeyType> DocumentKeyType for &'a mut T { 420 | fn key_type() -> KeyType { 421 | T::key_type() 422 | } 423 | } 424 | 425 | impl DocumentKeyType for Box { 426 | fn key_type() -> KeyType { 427 | T::key_type() 428 | } 429 | } 430 | 431 | impl DocumentKeyType for Rc { 432 | fn key_type() -> KeyType { 433 | T::key_type() 434 | } 435 | } 436 | 437 | impl DocumentKeyType for RcWeak { 438 | fn key_type() -> KeyType { 439 | T::key_type() 440 | } 441 | } 442 | 443 | impl DocumentKeyType for Arc { 444 | fn key_type() -> KeyType { 445 | T::key_type() 446 | } 447 | } 448 | 449 | impl DocumentKeyType for ArcWeak { 450 | fn key_type() -> KeyType { 451 | T::key_type() 452 | } 453 | } 454 | 455 | impl DocumentKeyType for Mutex { 456 | fn key_type() -> KeyType { 457 | T::key_type() 458 | } 459 | } 460 | 461 | impl DocumentKeyType for RwLock { 462 | fn key_type() -> KeyType { 463 | T::key_type() 464 | } 465 | } 466 | 467 | impl<'a, T: DocumentKeyType> DocumentKeyType for &'a [T] { 468 | fn key_type() -> KeyType { 469 | T::key_type() 470 | } 471 | } 472 | 473 | impl<'a, T: DocumentKeyType> DocumentKeyType for &'a mut [T] { 474 | fn key_type() -> KeyType { 475 | T::key_type() 476 | } 477 | } 478 | 479 | impl<'a, T: DocumentKeyType + Clone> DocumentKeyType for Cow<'a, T> { 480 | fn key_type() -> KeyType { 481 | T::key_type() 482 | } 483 | } 484 | 485 | impl DocumentKeyType for [T] { 486 | fn key_type() -> KeyType { 487 | T::key_type() 488 | } 489 | } 490 | 491 | impl DocumentKeyType for Vec { 492 | fn key_type() -> KeyType { 493 | T::key_type() 494 | } 495 | } 496 | 497 | impl DocumentKeyType for VecDeque { 498 | fn key_type() -> KeyType { 499 | T::key_type() 500 | } 501 | } 502 | 503 | impl DocumentKeyType for HashSet { 504 | fn key_type() -> KeyType { 505 | T::key_type() 506 | } 507 | } 508 | 509 | impl DocumentKeyType for HashMap { 510 | fn key_type() -> KeyType { 511 | T::key_type() 512 | } 513 | } 514 | 515 | impl DocumentKeyType for BTreeSet { 516 | fn key_type() -> KeyType { 517 | T::key_type() 518 | } 519 | } 520 | 521 | impl DocumentKeyType for BTreeMap { 522 | fn key_type() -> KeyType { 523 | T::key_type() 524 | } 525 | } 526 | 527 | impl DocumentKeyType for Option { 528 | fn key_type() -> KeyType { 529 | T::key_type() 530 | } 531 | } 532 | -------------------------------------------------------------------------------- /ledb-types/src/lib.rs: -------------------------------------------------------------------------------- 1 | /*! 2 | 3 | # Types and traits for storable documents 4 | 5 | ## Document trait 6 | 7 | The basic trait which should be implemented for structs which designed to be handled as documents. 8 | 9 | ```rust 10 | use serde::{Serialize, Deserialize}; 11 | use ledb_types::{Document, Identifier, Primary, KeyFields, KeyType, IndexKind}; 12 | 13 | #[derive(Serialize, Deserialize)] 14 | struct MyDoc { 15 | // define optional primary key field 16 | id: Option, 17 | // define other fields 18 | title: String, 19 | tag: Vec, 20 | timestamp: u32, 21 | // define nested document 22 | meta: MetaData, 23 | } 24 | 25 | #[derive(Serialize, Deserialize)] 26 | struct MetaData { 27 | // define index field 28 | keywords: Vec, 29 | // define other fields 30 | description: String, 31 | } 32 | 33 | impl Document for MyDoc { 34 | // declare primary key field name 35 | fn primary_field() -> Identifier { 36 | "id".into() 37 | } 38 | 39 | // declare other key fields 40 | fn key_fields() -> KeyFields { 41 | KeyFields::new() 42 | // add key fields of document 43 | .with_field(("title", KeyType::String, IndexKind::Unique)) 44 | .with_field(("tag", KeyType::String, IndexKind::Index)) 45 | .with_field(("timestamp", KeyType::Int, IndexKind::Unique)) 46 | // add key fields from nested document 47 | .with_fields(MetaData::key_fields().with_parent("meta")) 48 | } 49 | } 50 | 51 | impl Document for MetaData { 52 | // declare key fields for index 53 | fn key_fields() -> KeyFields { 54 | KeyFields::new() 55 | // add key fields of document 56 | .with_field(("keywords", KeyType::String, IndexKind::Index)) 57 | } 58 | } 59 | ``` 60 | 61 | ## DocumentKeyType trait 62 | 63 | This trait maps rust types to key types. 64 | 65 | */ 66 | 67 | //#[cfg(feature = "json")] 68 | //extern crate serde_json; 69 | 70 | //#[cfg(feature = "cbor")] 71 | //extern crate serde_cbor; 72 | 73 | //#[cfg(feature = "bytes")] 74 | //extern crate bytes; 75 | 76 | mod document; 77 | mod identifier; 78 | mod index; 79 | 80 | pub use self::document::*; 81 | pub use self::identifier::*; 82 | pub use self::index::*; 83 | -------------------------------------------------------------------------------- /ledb/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "ledb" 3 | version = "0.4.0" 4 | authors = ["Kayo "] 5 | license = "MIT" 6 | readme = "README.md" 7 | repository = "https://github.com/katyo/ledb" 8 | homepage = "https://github.com/katyo/ledb/tree/master/ledb" 9 | keywords = ["storage", "json", "cbor", "lmdb", "btree"] 10 | categories = ["database"] 11 | description = "Lightweight embedded database built over LMDB" 12 | edition = "2018" 13 | 14 | [badges] 15 | travis-ci = { repository = "katyo/ledb" } 16 | appveyor = { repository = "katyo/ledb" } 17 | 18 | [dependencies] 19 | byteorder = "^1" 20 | ordered-float = { version = "^1", features = ["serde"] } 21 | serde = { version = "^1", features = ["derive"] } 22 | serde_cbor = "^0.11" 23 | ron = "^0.6" 24 | ledb-types = { version = "0.4", path = "../ledb-types", features = ["json", "cbor"] } 25 | ledb-derive = { version = "0.4", path = "../ledb-derive", optional = true } 26 | lmdb = { package = "lmdb-zero", version = "0.4" } 27 | regex = "^1" 28 | supercow = "^0.1" 29 | dirs = "^2" 30 | dunce = "^1" 31 | 32 | [dev-dependencies] 33 | serde_json = "^1" 34 | ledb-derive = { version = "0.4", path = "../ledb-derive", optional = false } 35 | 36 | [features] 37 | default = ["derive"] 38 | derive = ["ledb-derive"] 39 | json = ["ledb-types/json"] 40 | cbor = ["ledb-types/cbor"] 41 | 42 | [package.metadata.docs.rs] 43 | features = ["json", "cbor"] 44 | -------------------------------------------------------------------------------- /ledb/README.md: -------------------------------------------------------------------------------- 1 | # Lightweight embedded database 2 | 3 | [![License: MIT](https://img.shields.io/badge/License-MIT-brightgreen.svg)](https://opensource.org/licenses/MIT) 4 | [![Travis-CI Build Status](https://travis-ci.org/katyo/ledb.svg?branch=master)](https://travis-ci.org/katyo/ledb) 5 | [![Appveyor Build status](https://ci.appveyor.com/api/projects/status/1wrmhivii22emfxg)](https://ci.appveyor.com/project/katyo/ledb) 6 | [![Crates.io Package](https://img.shields.io/crates/v/ledb.svg?style=popout)](https://crates.io/crates/ledb) 7 | [![Docs.rs API Documentation](https://docs.rs/ledb/badge.svg)](https://docs.rs/ledb) 8 | 9 | The **LEDB** is an attempt to implement simple but efficient, lightweight but powerful document storage. 10 | 11 | The abbreviation *LEDB* may be treated as an Lightweight Embedded DB, also Low End DB, also Literium Engine DB, also LitE DB, and so on. 12 | 13 | ## Links 14 | 15 | * [ledb Crate on crates.io](https://crates.io/crates/ledb) 16 | * [ledb API Docs on docs.rs](https://docs.rs/ledb) 17 | * [ledb-types Crate on crates.io](https://crates.io/crates/ledb-types) 18 | * [ledb-types API Docs on docs.rs](https://docs.rs/ledb-types) 19 | * [ledb-derive Crate on crates.io](https://crates.io/crates/ledb-derive) 20 | * [ledb-derive API Docs on docs.rs](https://docs.rs/ledb-derive) 21 | * [ledb-actix Crate on crates.io](https://crates.io/crates/ledb-actix) 22 | * [ledb-actix API Docs on docs.rs](https://docs.rs/ledb-actix) 23 | * [ledb NodeJS addon on npmjs.com](https://npmjs.com/package/ledb) 24 | 25 | ## Key features 26 | 27 | * Processing documents which implements `Serialize` and `Deserialize` traits from [serde](https://serde.rs/). 28 | * Identifying documents using auto-incrementing integer primary keys. 29 | * Indexing any fields of documents using unique or duplicated keys. 30 | * Searching and ordering documents using indexed fields or primary key. 31 | * Selecting documents using complex filters with fields comparing and logical operations. 32 | * Updating documents using rich set of modifiers. 33 | * Storing documents into independent storages so called collections. 34 | * Flexible `query!` macro which helps write clear and readable queries. 35 | * Using [LMDB](https://en.wikipedia.org/wiki/Lightning_Memory-Mapped_Database) as backend for document storage and indexing engine. 36 | 37 | ## Usage example 38 | 39 | ```rust 40 | use serde::{Serialize, Deserialize}; 41 | use ledb::{Options, Storage, IndexKind, KeyType, Filter, Comp, Order, OrderKind, Primary}; 42 | 43 | #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Document)] 44 | struct MyDoc { 45 | #[document(primary)] 46 | id: Option, 47 | title: String, 48 | #[document(index)] 49 | tag: Vec, 50 | #[document(unique)] 51 | timestamp: u32, 52 | } 53 | 54 | fn main() { 55 | let db_path = ".test_dbs/my_temp_db"; 56 | let _ = std::fs::remove_dir_all(&db_path); 57 | 58 | // Open storage 59 | let storage = Storage::new(&db_path, Options::default()).unwrap(); 60 | 61 | // Get collection 62 | let collection = storage.collection("my-docs").unwrap(); 63 | 64 | // Ensure indexes 65 | query!(index for collection 66 | title str unique, 67 | tag str, 68 | timestamp int unique, 69 | ).unwrap(); 70 | 71 | // Insert JSON document 72 | let first_id = query!(insert into collection { 73 | "title": "First title", 74 | "tag": ["some tag", "other tag"], 75 | "timestamp": 1234567890, 76 | }).unwrap(); 77 | 78 | // Insert typed document 79 | let second_id = collection.insert(&MyDoc { 80 | title: "Second title".into(), 81 | tag: vec![], 82 | timestamp: 1234567657, 83 | }).unwrap(); 84 | 85 | // Find documents 86 | let found_docs = query!( 87 | find MyDoc in collection 88 | where title == "First title" 89 | ).unwrap().collect::, _>>().unwrap(); 90 | 91 | // Update documents 92 | let n_affected = query!( 93 | update in collection modify title = "Other title" 94 | where title == "First title" 95 | ).unwrap(); 96 | 97 | // Find documents with descending ordering 98 | let found_docs = query!( 99 | find MyDoc in collection order desc 100 | ).unwrap().collect::, _>>().unwrap(); 101 | 102 | // Remove documents 103 | let n_affected = query!( 104 | remove from collection where title == "Other title" 105 | ).unwrap(); 106 | } 107 | ``` 108 | -------------------------------------------------------------------------------- /ledb/src/document.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | convert::TryInto, 3 | ops::{Deref, DerefMut}, 4 | }; 5 | 6 | use serde::{de::DeserializeOwned, Serialize}; 7 | pub use serde_cbor::Value; 8 | 9 | use super::{Document, Primary, Result, ResultWrap}; 10 | 11 | /// Raw document with id representation 12 | #[derive(Debug, Clone, PartialEq)] 13 | pub struct RawDocument(Option, Value); 14 | 15 | impl RawDocument { 16 | /// Create document using raw data 17 | #[inline] 18 | pub fn new(doc: Value) -> Self { 19 | RawDocument(None, doc) 20 | } 21 | 22 | /// Add id to document 23 | #[inline] 24 | pub fn with_id(mut self, id: Primary) -> Self { 25 | self.0 = Some(id); 26 | self 27 | } 28 | 29 | /// Remove id from document 30 | #[inline] 31 | pub fn without_id(mut self) -> Self { 32 | self.0 = None; 33 | self 34 | } 35 | 36 | /// Checks when document has primary key/identifier 37 | #[inline] 38 | pub fn has_id(&self) -> bool { 39 | self.0.is_some() 40 | } 41 | 42 | /// Get the primary key/identifier of document 43 | #[inline] 44 | pub fn get_id(&self) -> &Option { 45 | &self.0 46 | } 47 | 48 | /// Require the primary key/identifier of document 49 | #[inline] 50 | pub fn req_id(&self) -> Result { 51 | self.get_id() 52 | .ok_or_else(|| "Missing document id") 53 | .wrap_err() 54 | } 55 | 56 | /// Unwrap document value 57 | #[inline] 58 | pub fn into_inner(self) -> Value { 59 | self.1 60 | } 61 | 62 | /// Convert document to binary representation 63 | /// 64 | /// At this moment we use [CBOR](https://cbor.io/) for effectively store documents into DB backend. 65 | /// Since the internal representation does not contains primary identifier, it adds on reading documents from DB. 66 | /// 67 | pub fn to_bin(&self) -> Result> { 68 | serde_cbor::to_vec(&self.1).wrap_err() 69 | } 70 | 71 | /// Restore document from binary representation 72 | /// 73 | /// At this moment we use [CBOR](https://cbor.io/) for effectively store documents into DB backend. 74 | /// Since the internal representation does not contains primary identifier, it adds on reading documents from DB. 75 | /// 76 | pub fn from_bin(raw: &[u8]) -> Result { 77 | serde_cbor::from_slice(raw).map(Self::new).wrap_err() 78 | } 79 | 80 | /// Convert typed document to raw representation 81 | /// 82 | /// Typically the application deals with typed documents which represented by specific structures. 83 | /// The database backend processes generic document representation which is CBOR Value. 84 | pub fn from_doc(doc: &T) -> Result 85 | where 86 | T: Serialize + Document + Sized, 87 | { 88 | let mut raw = to_value(doc)?; 89 | 90 | let id = if let Value::Map(ref mut obj) = &mut raw { 91 | // split primary field value 92 | obj.remove(&Value::Text(T::primary_field().as_ref().into())) 93 | } else { 94 | return Err("Document must be represented as an object").wrap_err(); 95 | }; 96 | 97 | let id = match id { 98 | None => None, 99 | Some(Value::Null) => None, 100 | Some(Value::Integer(id)) => Some(id as u32), 101 | _ => return Err("Document primary must be an integer").wrap_err(), 102 | }; 103 | 104 | Ok(RawDocument(id, raw)) 105 | } 106 | 107 | /// Restore typed document from raw representation 108 | /// 109 | /// Typically the application deals with typed documents which represented by specific structures. 110 | /// The database backend processes generic document representation which is CBOR Value. 111 | pub fn into_doc(self) -> Result 112 | where 113 | T: DeserializeOwned + Document, 114 | { 115 | let RawDocument(id, mut raw) = self; 116 | if let Value::Map(ref mut obj) = &mut raw { 117 | if let Some(id) = &id { 118 | obj.insert( 119 | Value::Text(T::primary_field().as_ref().into()), 120 | Value::Integer(u64::from(*id).try_into().unwrap()), 121 | ); 122 | } 123 | } else { 124 | return Err("Document must be represented as an object").wrap_err(); 125 | } 126 | 127 | serde_cbor::value::from_value(raw).wrap_err() 128 | } 129 | } 130 | 131 | impl Deref for RawDocument { 132 | type Target = Value; 133 | 134 | fn deref(&self) -> &Self::Target { 135 | &self.1 136 | } 137 | } 138 | 139 | impl DerefMut for RawDocument { 140 | fn deref_mut(&mut self) -> &mut Value { 141 | &mut self.1 142 | } 143 | } 144 | 145 | #[inline] 146 | pub fn to_value(value: T) -> Result { 147 | serde_cbor::value::to_value(value).wrap_err() 148 | } 149 | 150 | #[cfg(test)] 151 | mod test { 152 | use super::super::{Document, Identifier, Primary, RawDocument}; 153 | use serde::{Deserialize, Serialize}; 154 | 155 | #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] 156 | pub struct User { 157 | id: Option, 158 | name: String, 159 | email: Option, 160 | } 161 | 162 | impl Document for User { 163 | fn primary_field() -> Identifier { 164 | "id".into() 165 | } 166 | } 167 | 168 | #[test] 169 | fn raw_doc() { 170 | let src = User { 171 | id: None, 172 | name: "elen".into(), 173 | email: None, 174 | }; 175 | let raw = RawDocument::from_doc(&src).unwrap(); 176 | 177 | let res = raw.clone().into_doc::().unwrap(); 178 | assert_eq!(res, src); 179 | assert_eq!(RawDocument::from_doc(&res).unwrap(), raw); 180 | } 181 | 182 | #[test] 183 | fn gen_doc() { 184 | let src = User { 185 | id: None, 186 | name: "elen".into(), 187 | email: None, 188 | }; 189 | let bin = RawDocument::from_doc(&src).unwrap().to_bin().unwrap(); 190 | 191 | let raw = RawDocument::from_bin(&bin).unwrap(); 192 | let res = RawDocument::from_doc(&src).unwrap(); 193 | let doc = res.clone().into_doc::().unwrap(); 194 | 195 | assert_eq!(res, raw); 196 | assert_eq!(doc, src); 197 | assert_eq!(res.to_bin().unwrap(), raw.to_bin().unwrap()); 198 | } 199 | 200 | #[test] 201 | #[ignore] 202 | fn duplicate_id() { 203 | let src = User { 204 | id: Some(1), 205 | name: "ivan".into(), 206 | email: None, 207 | }; 208 | let res = RawDocument::from_doc(&src).unwrap().into_doc::(); 209 | 210 | assert!(res.is_err()); 211 | } 212 | } 213 | -------------------------------------------------------------------------------- /ledb/src/enumerate.rs: -------------------------------------------------------------------------------- 1 | use std::sync::atomic::{AtomicUsize, Ordering}; 2 | 3 | pub(crate) type Serial = usize; 4 | 5 | pub(crate) trait Enumerable { 6 | fn enumerate(&mut self, new_serial: Serial); 7 | } 8 | 9 | /// Serial generator 10 | pub(crate) struct SerialGenerator { 11 | serial: AtomicUsize, 12 | } 13 | 14 | impl SerialGenerator { 15 | pub(crate) fn new() -> Self { 16 | Self { 17 | serial: AtomicUsize::new(0), 18 | } 19 | } 20 | 21 | pub(crate) fn set(&self, value: Serial) { 22 | self.serial.store(value, Ordering::SeqCst); 23 | } 24 | 25 | pub(crate) fn gen(&self) -> Serial { 26 | self.serial.fetch_add(1, Ordering::SeqCst) 27 | } 28 | 29 | pub(crate) fn enumerate(&self, mut data: E) -> E { 30 | data.enumerate(self.gen()); 31 | data 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /ledb/src/error.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | fmt::{self, Display}, 3 | io::Error as IoError, 4 | result::Result as StdResult, 5 | str::Utf8Error, 6 | sync::PoisonError, 7 | error::Error as StdError, 8 | }; 9 | 10 | use lmdb::error::Error as DbError; 11 | use ron::Error as RonError; 12 | use serde_cbor::error::Error as CborError; 13 | 14 | /// Database error type 15 | #[derive(Debug)] 16 | pub enum Error { 17 | DocError(String), 18 | DbError(DbError), 19 | StrError(Utf8Error), 20 | DataError(CborError), 21 | StorageError(String), 22 | IoError(IoError), 23 | SyncError(String), 24 | } 25 | 26 | impl Display for Error { 27 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 28 | use Error::*; 29 | match self { 30 | DocError(s) => write!(f, "Document error: {}", s), 31 | DbError(e) => write!(f, "Database error: {}", e), 32 | StrError(e) => write!(f, "String error: {}", e), 33 | DataError(e) => write!(f, "Data coding error: {}", e), 34 | StorageError(s) => write!(f, "Storage error: {}", s), 35 | IoError(e) => write!(f, "I/O Error: {}", e), 36 | SyncError(s) => write!(f, "Sync error: {}", s), 37 | } 38 | } 39 | } 40 | 41 | impl StdError for Error { 42 | fn source(&self) -> Option<&(dyn StdError + 'static)> { 43 | use Error::*; 44 | match self { 45 | DocError(_) => None, 46 | DbError(e) => Some(e), 47 | StrError(e) => Some(e), 48 | DataError(e) => Some(e), 49 | StorageError(_) => None, 50 | IoError(e) => Some(e), 51 | SyncError(_) => None, 52 | } 53 | } 54 | } 55 | 56 | impl Into for Error { 57 | fn into(self) -> String { 58 | self.to_string() 59 | } 60 | } 61 | 62 | /// Database result type 63 | pub type Result = StdResult; 64 | 65 | impl From for Error { 66 | fn from(e: CborError) -> Self { 67 | Error::DataError(e) 68 | } 69 | } 70 | 71 | impl From for Error { 72 | fn from(e: RonError) -> Self { 73 | Error::StorageError(format!("{}", e)) 74 | } 75 | } 76 | 77 | impl From for Error { 78 | fn from(e: DbError) -> Self { 79 | Error::DbError(e) 80 | } 81 | } 82 | 83 | impl From for Error { 84 | fn from(e: IoError) -> Self { 85 | Error::IoError(e) 86 | } 87 | } 88 | 89 | impl From> for Error 90 | where 91 | PoisonError: Display, 92 | { 93 | fn from(e: PoisonError) -> Self { 94 | Error::SyncError(format!("{}", e)) 95 | } 96 | } 97 | 98 | impl From for Error { 99 | fn from(e: Utf8Error) -> Self { 100 | Error::StrError(e) 101 | } 102 | } 103 | 104 | impl From for Error { 105 | fn from(e: String) -> Self { 106 | Error::DocError(e) 107 | } 108 | } 109 | 110 | impl<'a> From<&'a str> for Error { 111 | fn from(e: &'a str) -> Self { 112 | Error::DocError(e.into()) 113 | } 114 | } 115 | 116 | /// The helper for converting results with different error types into generic result 117 | pub trait ResultWrap { 118 | fn wrap_err(self) -> Result; 119 | } 120 | 121 | impl ResultWrap for StdResult 122 | where 123 | Error: From, 124 | { 125 | /// Convert result 126 | fn wrap_err(self) -> Result { 127 | self.map_err(Error::from) 128 | } 129 | } 130 | -------------------------------------------------------------------------------- /ledb/src/filter.rs: -------------------------------------------------------------------------------- 1 | use std::iter::once; 2 | 3 | use serde::{Serialize, Deserialize}; 4 | use lmdb::{ReadTransaction}; 5 | 6 | use super::{Identifier, Result, KeyData, Selection, Collection}; 7 | 8 | /// Comparison operator of filter 9 | #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] 10 | pub enum Comp { 11 | /// Equal 12 | #[serde(rename = "$eq")] 13 | Eq(KeyData), 14 | /// In set (equal one of) 15 | #[serde(rename = "$in")] 16 | In(Vec), 17 | /// Less than 18 | #[serde(rename = "$lt")] 19 | Lt(KeyData), 20 | /// Less than or equal 21 | #[serde(rename = "$le")] 22 | Le(KeyData), 23 | /// Greater than 24 | #[serde(rename = "$gt")] 25 | Gt(KeyData), 26 | /// Greater than or equal 27 | #[serde(rename = "$ge")] 28 | Ge(KeyData), 29 | /// Between (in range) 30 | #[serde(rename = "$bw")] 31 | Bw(KeyData, bool, KeyData, bool), 32 | /// Field exists (not null) 33 | #[serde(rename = "$has")] 34 | Has, 35 | } 36 | 37 | /// Condition operator of filter 38 | #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] 39 | pub enum Cond { 40 | /// Not (sub-condition is false) 41 | #[serde(rename = "$not")] 42 | Not(Box), 43 | /// And (all of sub-conditions is true) 44 | #[serde(rename = "$and")] 45 | And(Vec), 46 | /// Or (any of sub-conditions is true) 47 | #[serde(rename = "$or")] 48 | Or(Vec), 49 | } 50 | 51 | /// Filter operator 52 | #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] 53 | #[serde(untagged)] 54 | pub enum Filter { 55 | /// Condition operator 56 | Cond(Cond), 57 | /// Comparison operator 58 | #[serde(with = "comp")] 59 | Comp(Identifier, Comp), 60 | } 61 | 62 | impl Filter { 63 | pub fn cond(cond: Cond) -> Self { 64 | Filter::Cond(cond) 65 | } 66 | 67 | pub fn comp>(field: F, comp: Comp) -> Self { 68 | Filter::Comp(field.into(), comp) 69 | } 70 | 71 | pub(crate) fn apply(&self, txn: &ReadTransaction<'static>, coll: &Collection) -> Result { 72 | match self { 73 | Filter::Cond(cond) => { 74 | use self::Cond::*; 75 | Ok(match cond { 76 | Not(filter) => !filter.apply(txn, coll)?, 77 | And(filters) => { 78 | let mut res = !Selection::default(); // universe 79 | for filter in filters { 80 | res = res & filter.apply(txn, coll)?; 81 | } 82 | res 83 | }, 84 | Or(filters) => { 85 | let mut res = Selection::default(); // empty 86 | for filter in filters { 87 | res = res | filter.apply(txn, coll)?; 88 | } 89 | res 90 | }, 91 | }) 92 | }, 93 | Filter::Comp(path, comp) => { 94 | let index = coll.req_index(path)?; 95 | let access = txn.access(); 96 | use self::Comp::*; 97 | Ok(match comp { 98 | Eq(val) => Selection::new(index.query_set(&txn, &access, once(val))?, false), 99 | In(vals) => Selection::new(index.query_set(&txn, &access, vals.iter())?, false), 100 | Gt(val) => Selection::new(index.query_range(&txn, &access, Some((val, false)), None)?, false), 101 | Ge(val) => Selection::new(index.query_range(&txn, &access, Some((val, true)), None)?, false), 102 | Lt(val) => Selection::new(index.query_range(&txn, &access, None, Some((val, false)))?, false), 103 | Le(val) => Selection::new(index.query_range(&txn, &access, None, Some((val, true)))?, false), 104 | Bw(val1, inc1, val2, inc2) => Selection::new(index.query_range(&txn, &access, Some((val1, *inc1)), Some((val2, *inc2)))?, false), 105 | Has => Selection::new(index.query_range(&txn, &access, None, None)?, false), 106 | }) 107 | }, 108 | } 109 | } 110 | } 111 | 112 | /// The kind ot order 113 | #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] 114 | pub enum OrderKind { 115 | /// Ascending ordering 116 | #[serde(rename="$asc")] 117 | Asc, 118 | /// Descending ordering 119 | #[serde(rename="$desc")] 120 | Desc, 121 | } 122 | 123 | impl Default for OrderKind { 124 | fn default() -> Self { OrderKind::Asc } 125 | } 126 | 127 | /// Ordering operator 128 | #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] 129 | #[serde(untagged)] 130 | pub enum Order { 131 | /// Order by primary key/identifier of document 132 | /// 133 | /// This is default ordering 134 | /// 135 | Primary(OrderKind), 136 | 137 | /// Order by specified indexed field 138 | #[serde(with = "order")] 139 | Field(Identifier, OrderKind), 140 | } 141 | 142 | impl Default for Order { 143 | fn default() -> Self { Order::Primary(OrderKind::default()) } 144 | } 145 | 146 | impl Order { 147 | pub fn primary(kind: OrderKind) -> Self { 148 | Order::Primary(kind) 149 | } 150 | 151 | pub fn field>(field: F, kind: OrderKind) -> Self { 152 | Order::Field(field.into(), kind) 153 | } 154 | } 155 | 156 | mod comp { 157 | use super::{Identifier, Comp}; 158 | use std::collections::HashMap; 159 | use serde::{Serializer, Deserializer, Deserialize, de::{self}, ser::{SerializeMap}}; 160 | 161 | pub fn serialize(field: &Identifier, op: &Comp, serializer: S) -> Result { 162 | let mut map = serializer.serialize_map(Some(1))?; 163 | map.serialize_entry(&field, &op)?; 164 | map.end() 165 | } 166 | 167 | pub fn deserialize<'de, D: Deserializer<'de>>(deserializer: D) -> Result<(Identifier, Comp), D::Error> { 168 | let map: HashMap = HashMap::deserialize(deserializer)?; 169 | let mut it = map.into_iter(); 170 | match (it.next(), it.next()) { 171 | (Some((field, op)), None) => Ok((field.into(), op)), 172 | _ => Err(de::Error::custom("Not a comp op")) 173 | } 174 | } 175 | } 176 | 177 | mod order { 178 | use super::{Identifier, OrderKind}; 179 | use std::collections::HashMap; 180 | use serde::{Serializer, Deserializer, Deserialize, de::{self}, ser::{SerializeMap}}; 181 | 182 | #[allow(clippy::trivially_copy_pass_by_ref)] 183 | pub fn serialize(field: &Identifier, op: &OrderKind, serializer: S) -> Result { 184 | let mut map = serializer.serialize_map(Some(1))?; 185 | map.serialize_entry(&field, &op)?; 186 | map.end() 187 | } 188 | 189 | pub fn deserialize<'de, D: Deserializer<'de>>(deserializer: D) -> Result<(Identifier, OrderKind), D::Error> { 190 | let map: HashMap = HashMap::deserialize(deserializer)?; 191 | let mut it = map.into_iter(); 192 | match (it.next(), it.next()) { 193 | (Some((field, op)), None) => Ok((field.into(), op)), 194 | _ => Err(de::Error::custom("Not an order kind")) 195 | } 196 | } 197 | } 198 | 199 | #[cfg(test)] 200 | mod test { 201 | use super::{Filter, Comp, Cond, KeyData, Order, OrderKind}; 202 | use serde_json::{from_str, to_string, Value, json}; 203 | 204 | #[test] 205 | fn parse_comp_eq() { 206 | test_parse!(Filter, json!({ "field": { "$eq": 0 } }), 207 | Filter::Comp("field".into(), 208 | Comp::Eq(KeyData::Int(0)) 209 | )); 210 | test_parse!(Filter, json!({ "name": { "$eq": "vlada" } }), 211 | Filter::Comp("name".into(), 212 | Comp::Eq(KeyData::String("vlada".into())) 213 | )); 214 | } 215 | 216 | #[test] 217 | fn build_comp_eq() { 218 | test_build!(Filter::Comp("field".into(), 219 | Comp::Eq(KeyData::Int(0))), 220 | json!({ "field": { "$eq": 0 } })); 221 | test_build!(Filter::Comp("name".into(), 222 | Comp::Eq(KeyData::String("vlada".into()))), 223 | json!({ "name": { "$eq": "vlada" } })); 224 | } 225 | 226 | #[test] 227 | fn parse_cond_not() { 228 | test_parse!(Filter, json!({ "$not": { "a":{ "$gt": 9 } } }), 229 | Filter::Cond(Cond::Not( 230 | Box::new(Filter::Comp("a".into(), Comp::Gt(KeyData::Int(9)))), 231 | ))); 232 | } 233 | 234 | #[test] 235 | fn build_cond_not() { 236 | test_build!(Filter::Cond(Cond::Not( 237 | Box::new(Filter::Comp("a".into(), Comp::Gt(KeyData::Int(9)))) 238 | )), json!({ "$not": { "a": { "$gt": 9 } } })); 239 | } 240 | 241 | #[test] 242 | fn parse_cond_and() { 243 | test_parse!(Filter, json!({ "$and": [ { "a": { "$eq": 11 } }, { "b": { "$lt": -1 } } ] }), 244 | Filter::Cond(Cond::And(vec![ 245 | Filter::Comp("a".into(), Comp::Eq(KeyData::Int(11))), 246 | Filter::Comp("b".into(), Comp::Lt(KeyData::Int(-1))), 247 | ]))); 248 | } 249 | 250 | #[test] 251 | fn build_cond_and() { 252 | test_build!(Filter::Cond(Cond::And(vec![ 253 | Filter::Comp("a".into(), Comp::Eq(KeyData::Int(11))), 254 | Filter::Comp("b".into(), Comp::Lt(KeyData::Int(-1))), 255 | ])), json!({ "$and": [ { "a": { "$eq": 11 } }, { "b": { "$lt": -1 } } ] })); 256 | } 257 | 258 | #[test] 259 | fn parse_cond_or() { 260 | test_parse!(Filter, json!({ "$or": [ { "a": { "$eq": 11 } }, { "b": { "$lt": -1 } } ] }), 261 | Filter::Cond(Cond::Or(vec![ 262 | Filter::Comp("a".into(), Comp::Eq(KeyData::Int(11))), 263 | Filter::Comp("b".into(), Comp::Lt(KeyData::Int(-1))), 264 | ]))); 265 | } 266 | 267 | #[test] 268 | fn build_cond_or() { 269 | test_build!(Filter::Cond(Cond::Or(vec![ 270 | Filter::Comp("a".into(), Comp::Eq(KeyData::Int(11))), 271 | Filter::Comp("b".into(), Comp::Lt(KeyData::Int(-1))), 272 | ])), json!({ "$or": [ { "a": { "$eq": 11 } }, { "b": { "$lt": -1 } } ] })); 273 | } 274 | 275 | #[test] 276 | fn parse_order_primary() { 277 | test_parse!(Order, json!("$asc"), 278 | Order::Primary(OrderKind::Asc)); 279 | test_parse!(Order, json!("$desc"), 280 | Order::Primary(OrderKind::Desc)); 281 | } 282 | 283 | #[test] 284 | fn build_order_primary() { 285 | test_build!(Order::Primary(OrderKind::Asc), 286 | json!("$asc")); 287 | test_build!(Order::Primary(OrderKind::Desc), 288 | json!("$desc")); 289 | } 290 | 291 | #[test] 292 | fn parse_order_field() { 293 | test_parse!(Order, json!({ "name": "$asc" }), 294 | Order::Field("name".into(), OrderKind::Asc)); 295 | test_parse!(Order, json!({ "time": "$desc" }), 296 | Order::Field("time".into(), OrderKind::Desc)); 297 | } 298 | 299 | #[test] 300 | fn build_order_field() { 301 | test_build!(Order::Field("name".into(), OrderKind::Asc), 302 | json!({ "name": "$asc" })); 303 | test_build!(Order::Field("time".into(), OrderKind::Desc), 304 | json!({ "time": "$desc" })); 305 | } 306 | } 307 | -------------------------------------------------------------------------------- /ledb/src/float.rs: -------------------------------------------------------------------------------- 1 | use lmdb::traits::{LmdbOrdKeyIfUnaligned, LmdbRawIfUnaligned}; 2 | use ordered_float::OrderedFloat; 3 | 4 | #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] 5 | #[repr(transparent)] 6 | pub struct F64(pub OrderedFloat); 7 | 8 | unsafe impl LmdbRawIfUnaligned for F64 { 9 | fn reported_type() -> String { 10 | f64::reported_type() 11 | } 12 | } 13 | 14 | unsafe impl LmdbOrdKeyIfUnaligned for F64 {} 15 | -------------------------------------------------------------------------------- /ledb/src/pool.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | collections::HashMap, 3 | path::{Path, PathBuf}, 4 | sync::{Arc, Once, RwLock, Weak}, 5 | }; 6 | 7 | use super::{Result, ResultWrap, StorageData}; 8 | 9 | type Storages = Arc>>>; 10 | 11 | static mut STORAGES: Option = None; 12 | static INITIALIZE_STORAGES: Once = Once::new(); 13 | 14 | #[inline] 15 | fn init_storages() { 16 | INITIALIZE_STORAGES.call_once(|| unsafe { 17 | STORAGES = Some(Arc::new(RwLock::new(HashMap::new()))); 18 | }); 19 | } 20 | 21 | fn get_storages() -> Storages { 22 | init_storages(); 23 | 24 | if let Some(storages) = unsafe { &STORAGES } { 25 | storages.clone() 26 | } else { 27 | unreachable!(); 28 | } 29 | } 30 | 31 | pub(crate) struct Pool; 32 | 33 | impl Pool { 34 | #[inline] 35 | pub(crate) fn get>(path: P) -> Result>> { 36 | let path = path.as_ref(); 37 | let storages = get_storages(); 38 | let map = storages.read().wrap_err()?; 39 | Ok(map.get(path).and_then(|env| env.upgrade())) 40 | } 41 | 42 | #[inline] 43 | pub(crate) fn put(path: PathBuf, storage: &Arc) -> Result<()> { 44 | let storages = get_storages(); 45 | let mut map = storages.write().wrap_err()?; 46 | map.insert(path, Arc::downgrade(storage)); 47 | Ok(()) 48 | } 49 | 50 | #[inline] 51 | pub(crate) fn del>(path: P) -> Result<()> { 52 | let path = path.as_ref(); 53 | let storages = get_storages(); 54 | let mut map = storages.write().wrap_err()?; 55 | map.remove(path); 56 | Ok(()) 57 | } 58 | 59 | #[inline] 60 | pub(crate) fn lst() -> Result> { 61 | let storages = get_storages(); 62 | let map = storages.read().wrap_err()?; 63 | Ok(map.keys().cloned().collect()) 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /ledb/src/selection.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | collections::HashSet, 3 | ops::{Not, BitAnd, BitOr}, 4 | }; 5 | 6 | use super::{Primary, Result}; 7 | 8 | #[derive(Debug, Clone, Default, PartialEq, Eq)] 9 | pub struct Selection { 10 | pub(crate) ids: HashSet, 11 | pub(crate) inv: bool 12 | } 13 | 14 | impl Selection { 15 | pub fn new(ids: HashSet, inv: bool) -> Self { 16 | Selection { ids, inv } 17 | } 18 | 19 | #[allow(clippy::trivially_copy_pass_by_ref)] 20 | pub fn has(&self, id: &Primary) -> bool { 21 | self.inv ^ self.ids.contains(id) 22 | } 23 | 24 | pub fn filter>>(self, iter: I) -> impl Iterator> { 25 | iter.filter(move |res| if let Ok(id) = res { 26 | self.has(id) 27 | } else { 28 | true 29 | }) 30 | } 31 | } 32 | 33 | impl> From for Selection { 34 | fn from(v: T) -> Self { 35 | Selection::new(v.as_ref().iter().cloned().collect(), false) 36 | } 37 | } 38 | 39 | impl Not for Selection { 40 | type Output = Self; 41 | 42 | fn not(self) -> Self::Output { 43 | let Selection { ids, inv } = self; 44 | Selection { ids, inv: !inv } 45 | } 46 | } 47 | 48 | impl BitAnd for Selection { 49 | type Output = Self; 50 | 51 | fn bitand(self, other: Self) -> Self::Output { 52 | let (ids, inv) = match (self.inv, self.ids.len(), other.inv, other.ids.len()) { 53 | // a & b 54 | (false, _, false, _) => (self.ids.intersection(&other.ids).cloned().collect(), false), 55 | // a & universe == a 56 | (false, _, true, 0) => (self.ids, false), 57 | // a & !b 58 | (false, n, true, m) if n < m => (self.ids.difference(&other.ids).cloned().collect(), false), 59 | // a & !b == !(b | !a) 60 | (false, _, true, _) => (other.ids.difference(&self.ids).cloned().collect(), true), 61 | // universe & b == b 62 | (true, 0, false, _) => (other.ids, false), 63 | // !a & b == b & !a 64 | (true, n, false, m) if m < n => (other.ids.difference(&self.ids).cloned().collect(), false), 65 | // !a & b == !(a | !b) 66 | (true, _, false, _) => (self.ids.difference(&other.ids).cloned().collect(), true), 67 | // !a | !b 68 | (true, _, true, _) => (self.ids.union(&other.ids).cloned().collect(), true), 69 | }; 70 | 71 | Selection::new(ids, inv) 72 | } 73 | } 74 | 75 | impl BitOr for Selection { 76 | type Output = Self; 77 | 78 | fn bitor(self, other: Self) -> Self::Output { 79 | // a | b <=> !(!a & !b) 80 | !(!self & !other) 81 | } 82 | } 83 | 84 | #[cfg(test)] 85 | mod test { 86 | use super::Selection; 87 | 88 | #[test] 89 | fn not_inv_and_empty() { 90 | assert_eq!(Selection::from(&[1, 2, 3, 7, 9]) & 91 | Selection::default(), 92 | Selection::default()); 93 | } 94 | 95 | #[test] 96 | fn not_inv_and_universe() { 97 | assert_eq!(Selection::from(&[1, 2, 3, 7, 9]) & 98 | !Selection::default(), 99 | Selection::from(&[1, 2, 3, 7, 9])); 100 | } 101 | 102 | #[test] 103 | fn not_inv_and_not_inv() { 104 | assert_eq!(Selection::from(&[1, 2, 3, 7, 9]) & 105 | Selection::from(&[2, 7, 5, 0, 4, 1]), 106 | Selection::from(&[1, 2, 7])); 107 | } 108 | 109 | #[test] 110 | fn not_inv_and_inv() { 111 | assert_eq!(Selection::from(&[1, 2, 3, 7, 9]) & 112 | !Selection::from(&[2, 7, 5, 0, 4, 1]), 113 | Selection::from(&[3, 9])); 114 | } 115 | 116 | #[test] 117 | fn inv_and_not_inv() { 118 | assert_eq!(Selection::from(&[2, 7, 5, 0, 4, 1]) & 119 | !Selection::from(&[1, 2, 3, 7, 9]), 120 | !Selection::from(&[9, 3])); 121 | } 122 | 123 | #[test] 124 | fn inv_and_inv() { 125 | assert_eq!(!Selection::from(&[1, 2, 3, 7, 9]) & 126 | !Selection::from(&[2, 7, 5, 0, 4, 1]), 127 | !Selection::from(&[0, 1, 2, 3, 4, 5, 7, 9])); 128 | } 129 | 130 | #[test] 131 | fn not_inv_or_empty() { 132 | assert_eq!(Selection::from(&[1, 2, 3, 7, 9]) | 133 | Selection::default(), 134 | Selection::from(&[1, 2, 3, 7, 9])); 135 | } 136 | 137 | #[test] 138 | fn not_inv_or_universe() { 139 | assert_eq!(Selection::from(&[1, 2, 3, 7, 9]) | 140 | !Selection::default(), 141 | !Selection::default()); 142 | } 143 | 144 | #[test] 145 | fn not_inv_or_not_inv() { 146 | assert_eq!(Selection::from(&[1, 2, 3, 7, 9]) | 147 | Selection::from(&[2, 7, 5, 0, 4, 1]), 148 | Selection::from(&[0, 1, 2, 3, 4, 5, 7, 9])); 149 | } 150 | 151 | #[test] 152 | fn not_inv_or_inv() { 153 | assert_eq!(Selection::from(&[1, 2, 3, 7, 9]) | 154 | !Selection::from(&[2, 7, 5, 0, 4, 1]), 155 | Selection::from(&[3, 9])); 156 | } 157 | 158 | #[test] 159 | fn inv_or_not_inv() { 160 | assert_eq!(!Selection::from(&[2, 7, 5, 0, 4, 1]) | 161 | Selection::from(&[1, 2, 3, 7, 9]), 162 | Selection::from(&[3, 9])); 163 | } 164 | 165 | #[test] 166 | fn inv_or_inv() { 167 | assert_eq!(!Selection::from(&[1, 2, 3, 7, 9]) | 168 | !Selection::from(&[2, 7, 5, 0, 4, 1]), 169 | !Selection::from(&[1, 2, 7])); 170 | } 171 | } 172 | -------------------------------------------------------------------------------- /ledb/src/storage.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | collections::HashMap, 3 | env::current_dir, 4 | fs::create_dir_all, 5 | ops::Deref, 6 | path::{Path, PathBuf}, 7 | sync::{Arc, RwLock}, 8 | }; 9 | 10 | use dirs::home_dir; 11 | use dunce::canonicalize; 12 | use lmdb::{ 13 | self, open as OpenFlag, open::Flags as OpenFlags, Cursor, CursorIter, Database, 14 | DatabaseOptions, EnvBuilder, Environment, MaybeOwned, ReadTransaction, 15 | }; 16 | use ron::de::from_str as from_db_name; 17 | use serde::{Deserialize, Serialize}; 18 | use supercow::{ext::ConstDeref, NonSyncSupercow, Supercow}; 19 | 20 | use super::{ 21 | Collection, CollectionDef, Enumerable, IndexDef, Pool, Result, ResultWrap, Serial, 22 | SerialGenerator, 23 | }; 24 | 25 | #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] 26 | pub(crate) enum DatabaseDef { 27 | #[serde(rename = "c")] 28 | Collection(CollectionDef), 29 | #[serde(rename = "i")] 30 | Index(IndexDef), 31 | } 32 | 33 | /// Storage stats data 34 | #[derive(Debug, Clone, Serialize, Deserialize)] 35 | pub struct Stats { 36 | pub page_size: u32, 37 | pub btree_depth: u32, 38 | pub branch_pages: usize, 39 | pub leaf_pages: usize, 40 | pub overflow_pages: usize, 41 | pub data_entries: usize, 42 | } 43 | 44 | impl From for Stats { 45 | fn from( 46 | lmdb::Stat { 47 | psize, 48 | depth, 49 | branch_pages, 50 | leaf_pages, 51 | overflow_pages, 52 | entries, 53 | }: lmdb::Stat, 54 | ) -> Self { 55 | Self { 56 | page_size: psize, 57 | btree_depth: depth, 58 | branch_pages, 59 | leaf_pages, 60 | overflow_pages, 61 | data_entries: entries, 62 | } 63 | } 64 | } 65 | 66 | /// Storage info data 67 | #[derive(Debug, Clone, Serialize, Deserialize)] 68 | pub struct Info { 69 | pub map_size: usize, 70 | pub last_page: usize, 71 | pub last_transaction: usize, 72 | pub max_readers: u32, 73 | pub num_readers: u32, 74 | } 75 | 76 | impl From for Info { 77 | fn from( 78 | lmdb::EnvInfo { 79 | mapsize, 80 | last_pgno, 81 | last_txnid, 82 | maxreaders, 83 | numreaders, 84 | .. 85 | }: lmdb::EnvInfo, 86 | ) -> Self { 87 | Self { 88 | map_size: mapsize, 89 | last_page: last_pgno, 90 | last_transaction: last_txnid, 91 | max_readers: maxreaders, 92 | num_readers: numreaders, 93 | } 94 | } 95 | } 96 | 97 | /// Database options 98 | #[derive(Debug, Clone, Serialize, Deserialize, Default)] 99 | pub struct Options { 100 | // options 101 | #[serde(default)] 102 | map_size: Option, 103 | #[serde(default)] 104 | max_readers: Option, 105 | #[serde(default)] 106 | max_dbs: Option, 107 | // flags 108 | #[serde(default)] 109 | map_async: Option, 110 | #[serde(default)] 111 | no_lock: Option, 112 | #[serde(default)] 113 | no_mem_init: Option, 114 | #[serde(default)] 115 | no_meta_sync: Option, 116 | #[serde(default)] 117 | no_read_ahead: Option, 118 | #[serde(default)] 119 | no_sub_dir: Option, 120 | #[serde(default)] 121 | no_sync: Option, 122 | #[serde(default)] 123 | no_tls: Option, 124 | #[serde(default)] 125 | read_only: Option, 126 | #[serde(default)] 127 | write_map: Option, 128 | } 129 | 130 | impl Options { 131 | fn env_builder(&self) -> Result { 132 | let mut bld = EnvBuilder::new()?; 133 | 134 | bld.set_mapsize(self.map_size.unwrap_or(16 << 20)) 135 | .wrap_err()?; 136 | bld.set_maxreaders(self.max_readers.unwrap_or(126)) 137 | .wrap_err()?; 138 | bld.set_maxdbs(self.max_dbs.unwrap_or(128)).wrap_err()?; 139 | 140 | Ok(bld) 141 | } 142 | 143 | fn open_flags(&self) -> OpenFlags { 144 | self.fill_flags(None) 145 | } 146 | 147 | fn config_env(&self, env: &Environment) -> Result<()> { 148 | if let Some(val) = self.map_size { 149 | unsafe { 150 | env.set_mapsize(val).wrap_err()?; 151 | } 152 | } 153 | 154 | unsafe { 155 | env.set_flags(self.fill_flags(Some(true)), true) 156 | .wrap_err()?; 157 | env.set_flags(self.fill_flags(Some(false)), false) 158 | .wrap_err()?; 159 | } 160 | 161 | Ok(()) 162 | } 163 | 164 | fn fill_flags(&self, onoff: Option) -> OpenFlags { 165 | let mut flags = OpenFlags::empty(); 166 | 167 | if let Some(flag) = self.map_async { 168 | if onoff.map(|onoff| onoff == flag).unwrap_or(true) { 169 | flags.set(OpenFlag::MAPASYNC, flag); 170 | } 171 | } 172 | if let Some(flag) = self.no_lock { 173 | if onoff.map(|onoff| onoff == flag).unwrap_or(true) { 174 | flags.set(OpenFlag::NOLOCK, flag); 175 | } 176 | } 177 | if let Some(flag) = self.no_mem_init { 178 | if onoff.map(|onoff| onoff == flag).unwrap_or(true) { 179 | flags.set(OpenFlag::NOMEMINIT, flag); 180 | } 181 | } 182 | if let Some(flag) = self.no_meta_sync { 183 | if onoff.map(|onoff| onoff == flag).unwrap_or(true) { 184 | flags.set(OpenFlag::NOMETASYNC, flag); 185 | } 186 | } 187 | if let Some(flag) = self.no_read_ahead { 188 | if onoff.map(|onoff| onoff == flag).unwrap_or(true) { 189 | flags.set(OpenFlag::NORDAHEAD, flag); 190 | } 191 | } 192 | if let Some(flag) = self.no_sub_dir { 193 | if onoff.map(|onoff| onoff == flag).unwrap_or(true) { 194 | flags.set(OpenFlag::NOSUBDIR, flag); 195 | } 196 | } 197 | if let Some(flag) = self.no_sync { 198 | if onoff.map(|onoff| onoff == flag).unwrap_or(true) { 199 | flags.set(OpenFlag::NOSYNC, flag); 200 | } 201 | } 202 | if let Some(flag) = self.no_tls { 203 | if onoff.map(|onoff| onoff == flag).unwrap_or(true) { 204 | flags.set(OpenFlag::NOTLS, flag); 205 | } 206 | } 207 | if let Some(flag) = self.read_only { 208 | if onoff.map(|onoff| onoff == flag).unwrap_or(true) { 209 | flags.set(OpenFlag::RDONLY, flag); 210 | } 211 | } 212 | if let Some(flag) = self.write_map { 213 | if onoff.map(|onoff| onoff == flag).unwrap_or(true) { 214 | flags.set(OpenFlag::WRITEMAP, flag); 215 | } 216 | } 217 | 218 | flags 219 | } 220 | } 221 | 222 | pub(crate) struct StorageData { 223 | path: PathBuf, 224 | env: Environment, 225 | gen: SerialGenerator, 226 | collections: RwLock>, 227 | } 228 | 229 | /// Storage of documents 230 | #[derive(Clone)] 231 | pub struct Storage(Arc); 232 | 233 | impl Storage { 234 | /// Open documents storage using path to the database in filesystem 235 | /// 236 | /// When storage does not exists it will be created automatically. 237 | /// 238 | /// On opening storage the existing collections and indexes will be restored automatically. 239 | /// 240 | /// You can open multiple storages using same path, actually all of them will use same storage instance. 241 | /// Also you can clone storage instance, share it and and send it to another threads. 242 | /// 243 | pub fn new>(path: P, opts: Options) -> Result { 244 | let path = realpath(path.as_ref())?; 245 | 246 | if let Some(storage) = Pool::get(&path)? { 247 | opts.config_env(&storage.env)?; 248 | Ok(Storage(storage)) 249 | } else { 250 | Self::open(path, opts) 251 | } 252 | } 253 | 254 | fn open(path: PathBuf, opts: Options) -> Result { 255 | let env = open_env(&path, opts)?; 256 | 257 | let gen = SerialGenerator::new(); 258 | 259 | let collections = RwLock::new(Vec::new()); 260 | 261 | let storage = Storage(Arc::new(StorageData { 262 | path: path.clone(), 263 | env, 264 | gen, 265 | collections, 266 | })); 267 | 268 | storage.load_collections()?; 269 | 270 | Pool::put(path, &storage.0)?; 271 | 272 | Ok(storage) 273 | } 274 | 275 | fn load_collections(&self) -> Result<()> { 276 | let env = &self.0.env; 277 | 278 | let db = Database::open(env, None, &DatabaseOptions::defaults()).wrap_err()?; 279 | 280 | let (last_serial, db_def) = load_databases(&env, &db)?; 281 | 282 | self.0.gen.set(last_serial); 283 | 284 | let mut collections = self.0.collections.write().wrap_err()?; 285 | 286 | *collections = db_def 287 | .into_iter() 288 | .map(|(def, index_defs)| Collection::new(self.clone(), def, index_defs)) 289 | .collect::>>()?; 290 | 291 | Ok(()) 292 | } 293 | 294 | pub(crate) fn enumerate(&self, data: E) -> E { 295 | self.0.gen.enumerate(data) 296 | } 297 | 298 | /// Checks if the collection exists 299 | /// 300 | pub fn has_collection>(&self, name: N) -> Result { 301 | let name = name.as_ref(); 302 | let collections = self.0.collections.read().wrap_err()?; 303 | // search alive collection 304 | Ok(collections 305 | .iter() 306 | .any(|collection| collection.name() == name)) 307 | } 308 | 309 | /// Get collection for documents 310 | /// 311 | /// *Note*: The collection will be created automatically when is does not exists. 312 | /// 313 | pub fn collection>(&self, name: N) -> Result { 314 | let name = name.as_ref(); 315 | 316 | { 317 | let collections = self.0.collections.read().wrap_err()?; 318 | // search alive collection 319 | if let Some(collection) = collections 320 | .iter() 321 | .find(|collection| collection.name() == name) 322 | { 323 | return Ok(collection.clone()); 324 | } 325 | } 326 | 327 | // create new collection 328 | let collection = Collection::new( 329 | self.clone(), 330 | self.enumerate(CollectionDef::new(name)), 331 | Vec::new(), 332 | )?; 333 | 334 | let mut collections = self.0.collections.write().wrap_err()?; 335 | collections.push(collection.clone()); 336 | 337 | Ok(collection) 338 | } 339 | 340 | pub fn drop_collection>(&self, name: N) -> Result { 341 | let name = name.as_ref(); 342 | 343 | let found_pos = { 344 | let collections = self.0.collections.read().wrap_err()?; 345 | collections 346 | .iter() 347 | .position(|collection| collection.name() == name) 348 | }; 349 | 350 | Ok(if let Some(pos) = found_pos { 351 | let mut collections = self.0.collections.write().wrap_err()?; 352 | let collection = collections.remove(pos); 353 | collection.to_delete()?; 354 | true 355 | } else { 356 | false 357 | }) 358 | } 359 | 360 | pub fn get_collections(&self) -> Result> { 361 | let collections = self.0.collections.read().wrap_err()?; 362 | Ok(collections 363 | .iter() 364 | .map(|collection| collection.name().into()) 365 | .collect()) 366 | } 367 | 368 | pub fn get_stats(&self) -> Result { 369 | self.0.env.stat().map(Stats::from).wrap_err() 370 | } 371 | 372 | pub fn get_info(&self) -> Result { 373 | self.0.env.info().map(Info::from).wrap_err() 374 | } 375 | 376 | /// Get openned storages 377 | pub fn openned() -> Result> { 378 | Pool::lst() 379 | } 380 | } 381 | 382 | impl Drop for Storage { 383 | fn drop(&mut self) { 384 | if let Err(e) = Pool::del(&self.0.path) { 385 | eprintln!("Error when dropping storage: {}", e); 386 | } 387 | } 388 | } 389 | 390 | impl Deref for Storage { 391 | type Target = Environment; 392 | 393 | #[inline] 394 | fn deref(&self) -> &Self::Target { 395 | &self.0.env 396 | } 397 | } 398 | 399 | unsafe impl ConstDeref for Storage { 400 | type Target = Environment; 401 | 402 | #[inline] 403 | fn const_deref(&self) -> &Self::Target { 404 | &self.0.env 405 | } 406 | } 407 | 408 | impl<'env> Into> for Storage { 409 | fn into(self) -> Supercow<'env, Environment> { 410 | Supercow::shared(self) 411 | } 412 | } 413 | 414 | impl<'env> Into> for Storage { 415 | fn into(self) -> NonSyncSupercow<'env, Environment> { 416 | Supercow::shared(self) 417 | } 418 | } 419 | 420 | /// The list of collection and index definitions 421 | type Definitions = Vec<(CollectionDef, Vec)>; 422 | 423 | fn load_databases(env: &Environment, db: &Database) -> Result<(Serial, Definitions)> { 424 | let txn = ReadTransaction::new(env).wrap_err()?; 425 | let cursor = txn.cursor(db).wrap_err()?; 426 | let access = txn.access(); 427 | let mut defs: HashMap)> = HashMap::new(); 428 | let mut last_serial: Serial = 0; 429 | 430 | for res in CursorIter::new( 431 | MaybeOwned::Owned(cursor), 432 | &access, 433 | |c, a| c.first(a), 434 | Cursor::next::, 435 | ) 436 | .wrap_err()? 437 | .map(|res| { 438 | res.wrap_err() 439 | .and_then(|(key, _val)| from_db_name(key).wrap_err()) 440 | }) { 441 | match res { 442 | Ok(DatabaseDef::Collection(def)) => { 443 | last_serial = usize::max(last_serial, def.0); 444 | let entry = defs 445 | .entry(def.1.clone()) 446 | .or_insert_with(|| (def.clone(), Vec::new())); 447 | entry.0 = def; 448 | } 449 | Ok(DatabaseDef::Index(def)) => { 450 | last_serial = usize::max(last_serial, def.0); 451 | defs.entry(def.1.clone()) 452 | .or_insert_with(|| (CollectionDef::new(&def.1), Vec::new())) 453 | .1 454 | .push(def); 455 | } 456 | Err(e) => return Err(e), 457 | } 458 | } 459 | 460 | Ok(( 461 | last_serial, 462 | defs.into_iter().map(|(_key, val)| val).collect(), 463 | )) 464 | } 465 | 466 | fn open_env(path: &Path, opts: Options) -> Result { 467 | let path = path.to_str().ok_or("Invalid db path").wrap_err()?; 468 | 469 | let bld = opts.env_builder()?; 470 | let flags = opts.open_flags(); 471 | 472 | create_dir_all(&path).wrap_err()?; 473 | 474 | unsafe { bld.open(path, flags, 0o600) }.wrap_err() 475 | } 476 | 477 | fn realpath(path: &Path) -> Result { 478 | let path = if path.has_root() { 479 | path.to_path_buf() 480 | } else if let Ok(path) = path.strip_prefix("~") { 481 | home_dir() 482 | .ok_or_else(|| "Unable to determine home directory") 483 | .wrap_err()? 484 | .as_path() 485 | .join(path) 486 | } else { 487 | current_dir().wrap_err()?.as_path().join(path) 488 | }; 489 | safe_canonicalize(path.as_path()) 490 | } 491 | 492 | fn safe_canonicalize(path: &Path) -> Result { 493 | match canonicalize(path) { 494 | Ok(canonical) => Ok(canonical), 495 | Err(error) => { 496 | if let Some(parent) = path.parent() { 497 | let child = path.strip_prefix(parent).unwrap(); 498 | safe_canonicalize(parent).map(|canonical_parent| canonical_parent.join(child)) 499 | } else { 500 | Err(error).wrap_err() 501 | } 502 | } 503 | } 504 | } 505 | -------------------------------------------------------------------------------- /ledb/src/test.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | fs::remove_dir_all, 3 | path::Path, 4 | }; 5 | 6 | use super::{Options, Result, Storage}; 7 | 8 | macro_rules! json_val { 9 | ($($json:tt)+) => { 10 | from_value(serde_json::json!($($json)+)).unwrap() 11 | }; 12 | } 13 | 14 | macro_rules! test_parse { 15 | ($val_type:ty, $json_val:expr, $rust_val:expr) => { 16 | assert_eq!( 17 | from_str::<$val_type>(&to_string(&$json_val).unwrap()).unwrap(), 18 | $rust_val 19 | ); 20 | }; 21 | } 22 | 23 | macro_rules! test_build { 24 | ($rust_val:expr, $json_val:expr) => { 25 | assert_eq!( 26 | from_str::(&to_string(&$rust_val).unwrap()).unwrap(), 27 | from_str::(&to_string(&$json_val).unwrap()).unwrap() 28 | ); 29 | }; 30 | } 31 | 32 | static DB_DIR: &'static str = "test_db"; 33 | 34 | pub fn test_db(id: &'static str) -> Result { 35 | let path = Path::new(DB_DIR).join(Path::new(id)); 36 | 37 | let _ = remove_dir_all(&path); 38 | 39 | Storage::new(&path, Options::default()) 40 | } 41 | -------------------------------------------------------------------------------- /ledb/src/value.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | borrow::Cow, 3 | str::from_utf8, 4 | }; 5 | 6 | use byteorder::{ByteOrder, NativeEndian}; 7 | use ordered_float::OrderedFloat; 8 | use serde::{Serialize, Deserialize}; 9 | 10 | use super::{KeyType, Result, ResultWrap, Value}; 11 | 12 | /// The data of key 13 | #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] 14 | #[serde(untagged)] 15 | pub enum KeyData { 16 | Int(i64), 17 | #[serde(with = "float")] 18 | Float(OrderedFloat), 19 | String(String), 20 | Binary(Vec), 21 | Bool(bool), 22 | } 23 | 24 | mod float { 25 | use super::OrderedFloat; 26 | use serde::{Deserialize, Deserializer, Serializer}; 27 | 28 | #[allow(clippy::trivially_copy_pass_by_ref)] 29 | pub fn serialize( 30 | OrderedFloat(val): &OrderedFloat, 31 | serializer: S, 32 | ) -> Result { 33 | serializer.serialize_f64(*val) 34 | } 35 | 36 | pub fn deserialize<'de, D: Deserializer<'de>>( 37 | deserializer: D, 38 | ) -> Result, D::Error> { 39 | f64::deserialize(deserializer).map(OrderedFloat) 40 | } 41 | } 42 | 43 | impl KeyData { 44 | /// Converts binary representation into key data 45 | pub fn from_raw(typ: KeyType, raw: &[u8]) -> Result { 46 | use self::KeyData::*; 47 | Ok(match typ { 48 | KeyType::Int => { 49 | if raw.len() != 8 { 50 | return Err("Int key must be 8 bytes length".into()); 51 | } 52 | Int(NativeEndian::read_i64(raw)) 53 | } 54 | KeyType::Float => { 55 | if raw.len() != 8 { 56 | return Err("Float key must be 8 bytes length".into()); 57 | } 58 | Float(OrderedFloat(NativeEndian::read_f64(raw))) 59 | } 60 | KeyType::String => String(from_utf8(raw).wrap_err()?.into()), 61 | KeyType::Binary => Binary(Vec::from(raw)), 62 | KeyType::Bool => { 63 | if raw.len() != 1 { 64 | return Err("Bool key must be 1 byte length".into()); 65 | } 66 | Bool(raw[0] != 0) 67 | } 68 | }) 69 | } 70 | 71 | /// Converts key data into binary representation 72 | pub fn as_raw(&self) -> &[u8] { 73 | use self::KeyData::*; 74 | match self { 75 | Int(val) => unsafe { &*(val as *const i64 as *const [u8; 8]) }, 76 | Float(val) => unsafe { &*(val as *const ordered_float::OrderedFloat as *const [u8; 8]) }, 77 | String(val) => if val.is_empty() { 78 | b"\0" 79 | } else { 80 | val.as_bytes() 81 | }, 82 | Binary(val) => if val.is_empty() { 83 | &[0u8] 84 | } else { 85 | val.as_slice() 86 | }, 87 | Bool(val) => unsafe { &*(val as *const bool as *const [u8; 1]) }, 88 | } 89 | } 90 | 91 | /// Converts generic value into key data 92 | pub fn from_val(val: &Value) -> Option { 93 | use serde_cbor::Value::*; 94 | Some(match val { 95 | Integer(val) => KeyData::Int(*val as i64), 96 | Float(val) => KeyData::Float(OrderedFloat(*val)), 97 | Bytes(val) => KeyData::Binary(val.clone()), 98 | Text(val) => KeyData::String(val.clone()), 99 | Bool(val) => KeyData::Bool(*val), 100 | _ => return None, 101 | }) 102 | } 103 | 104 | /// Value is empty 105 | pub fn is_empty(&self) -> bool { 106 | use self::KeyData::*; 107 | match self { 108 | String(val) => val.is_empty(), 109 | Binary(val) => val.is_empty(), 110 | _ => false, 111 | } 112 | } 113 | 114 | /// Simple data type casting 115 | pub fn as_type(&self, typ: KeyType) -> Option<&KeyData> { 116 | use self::KeyData::*; 117 | Some(match (typ, self) { 118 | (KeyType::Int, Int(..)) 119 | | (KeyType::Float, Float(..)) 120 | | (KeyType::Binary, Binary(..)) 121 | | (KeyType::String, String(..)) 122 | | (KeyType::Bool, Bool(..)) => self, 123 | _ => return None, 124 | }) 125 | } 126 | 127 | /// Convert key data into specified type 128 | pub fn to_type(&self, typ: KeyType) -> Option> { 129 | use self::KeyData::*; 130 | Some(if let Some(v) = self.as_type(typ) { 131 | Cow::Borrowed(v) 132 | } else { 133 | Cow::Owned(match (typ, self) { 134 | (KeyType::Float, Int(v)) => Float(OrderedFloat(*v as f64)), 135 | (KeyType::Int, Float(v)) => Int(v.round() as i64), 136 | (KeyType::String, Int(v)) => String(v.to_string()), 137 | (KeyType::String, Float(v)) => String(v.to_string()), 138 | (KeyType::String, Bool(v)) => String(v.to_string()), 139 | (KeyType::Int, String(v)) => Int(if let Ok(v) = v.parse() { 140 | v 141 | } else { 142 | return None; 143 | }), 144 | (KeyType::Float, String(v)) => Float(if let Ok(v) = v.parse() { 145 | OrderedFloat(v) 146 | } else { 147 | return None; 148 | }), 149 | (KeyType::Bool, String(v)) => Bool(if let Ok(v) = v.parse() { 150 | v 151 | } else { 152 | return None; 153 | }), 154 | _ => return None, 155 | }) 156 | }) 157 | } 158 | 159 | /// Get the actual type of key data 160 | pub fn get_type(&self) -> KeyType { 161 | use self::KeyData::*; 162 | match self { 163 | Int(..) => KeyType::Int, 164 | Float(..) => KeyType::Float, 165 | String(..) => KeyType::String, 166 | Binary(..) => KeyType::Binary, 167 | Bool(..) => KeyType::Bool, 168 | } 169 | } 170 | } 171 | 172 | impl<'a> From<&'a i64> for KeyData { 173 | fn from(v: &'a i64) -> Self { 174 | KeyData::Int(*v) 175 | } 176 | } 177 | 178 | impl From for KeyData { 179 | fn from(v: i64) -> Self { 180 | KeyData::Int(v) 181 | } 182 | } 183 | 184 | impl<'a> From<&'a f64> for KeyData { 185 | fn from(v: &'a f64) -> Self { 186 | KeyData::Float(OrderedFloat(*v)) 187 | } 188 | } 189 | 190 | impl From for KeyData { 191 | fn from(v: f64) -> Self { 192 | KeyData::Float(OrderedFloat(v)) 193 | } 194 | } 195 | 196 | impl<'a> From<&'a String> for KeyData { 197 | fn from(v: &'a String) -> Self { 198 | KeyData::String(v.clone()) 199 | } 200 | } 201 | 202 | impl From for KeyData { 203 | fn from(v: String) -> Self { 204 | KeyData::String(v) 205 | } 206 | } 207 | 208 | impl<'a> From<&'a str> for KeyData { 209 | fn from(v: &str) -> Self { 210 | KeyData::String(v.into()) 211 | } 212 | } 213 | 214 | impl<'a> From<&'a [u8]> for KeyData { 215 | fn from(v: &[u8]) -> Self { 216 | KeyData::Binary(v.into()) 217 | } 218 | } 219 | 220 | impl<'a> From<&'a Vec> for KeyData { 221 | fn from(v: &'a Vec) -> Self { 222 | KeyData::Binary(v.clone()) 223 | } 224 | } 225 | 226 | impl From> for KeyData { 227 | fn from(v: Vec) -> Self { 228 | KeyData::Binary(v) 229 | } 230 | } 231 | 232 | impl<'a> From<&'a bool> for KeyData { 233 | fn from(v: &'a bool) -> Self { 234 | KeyData::Bool(*v) 235 | } 236 | } 237 | 238 | impl From for KeyData { 239 | fn from(v: bool) -> Self { 240 | KeyData::Bool(v) 241 | } 242 | } 243 | 244 | #[cfg(test)] 245 | mod test { 246 | use super::{KeyData, KeyType}; 247 | 248 | #[test] 249 | fn get_type() { 250 | assert_eq!(KeyData::from(123).get_type(), KeyType::Int); 251 | assert_eq!(KeyData::from(12.3).get_type(), KeyType::Float); 252 | assert_eq!(KeyData::from("abc").get_type(), KeyType::String); 253 | assert_eq!(KeyData::from(vec![1u8, 2, 3]).get_type(), KeyType::Binary); 254 | assert_eq!(KeyData::from(true).get_type(), KeyType::Bool); 255 | } 256 | 257 | #[test] 258 | fn as_type() { 259 | assert_eq!( 260 | KeyData::from("abc") 261 | .as_type(KeyType::String) 262 | .unwrap() 263 | .get_type(), 264 | KeyType::String 265 | ); 266 | assert_eq!(KeyData::from("abc").as_type(KeyType::Int), None); 267 | assert_eq!( 268 | KeyData::from(123).as_type(KeyType::Int).unwrap().get_type(), 269 | KeyType::Int 270 | ); 271 | assert_eq!(KeyData::from(123).as_type(KeyType::Float), None); 272 | assert_eq!( 273 | KeyData::from(12.3) 274 | .as_type(KeyType::Float) 275 | .unwrap() 276 | .get_type(), 277 | KeyType::Float 278 | ); 279 | assert_eq!(KeyData::from(12.3).as_type(KeyType::Int), None); 280 | assert_eq!( 281 | KeyData::from(true) 282 | .as_type(KeyType::Bool) 283 | .unwrap() 284 | .get_type(), 285 | KeyType::Bool 286 | ); 287 | assert_eq!(KeyData::from(true).as_type(KeyType::String), None); 288 | } 289 | 290 | #[test] 291 | fn into_type() { 292 | assert_eq!( 293 | KeyData::from("abc") 294 | .to_type(KeyType::String) 295 | .unwrap() 296 | .get_type(), 297 | KeyType::String 298 | ); 299 | assert_eq!(KeyData::from("abc").to_type(KeyType::Int), None); 300 | assert_eq!( 301 | KeyData::from("123") 302 | .to_type(KeyType::Int) 303 | .unwrap() 304 | .into_owned(), 305 | KeyData::from(123) 306 | ); 307 | assert_eq!( 308 | KeyData::from("12.3") 309 | .to_type(KeyType::Float) 310 | .unwrap() 311 | .into_owned(), 312 | KeyData::from(12.3) 313 | ); 314 | assert_eq!(KeyData::from("12.3").to_type(KeyType::Int), None); 315 | assert_eq!( 316 | KeyData::from(123) 317 | .to_type(KeyType::Int) 318 | .unwrap() 319 | .get_type(), 320 | KeyType::Int 321 | ); 322 | assert_eq!( 323 | KeyData::from(123) 324 | .to_type(KeyType::Float) 325 | .unwrap() 326 | .into_owned(), 327 | KeyData::from(123.0) 328 | ); 329 | assert_eq!( 330 | KeyData::from(123) 331 | .to_type(KeyType::String) 332 | .unwrap() 333 | .into_owned(), 334 | KeyData::from("123") 335 | ); 336 | assert_eq!( 337 | KeyData::from(12.3) 338 | .to_type(KeyType::Float) 339 | .unwrap() 340 | .into_owned(), 341 | KeyData::from(12.3) 342 | ); 343 | assert_eq!( 344 | KeyData::from(12.3) 345 | .to_type(KeyType::Int) 346 | .unwrap() 347 | .into_owned(), 348 | KeyData::from(12) 349 | ); 350 | assert_eq!( 351 | KeyData::from(12.5) 352 | .to_type(KeyType::Int) 353 | .unwrap() 354 | .into_owned(), 355 | KeyData::from(13) 356 | ); 357 | assert_eq!( 358 | KeyData::from(12.3) 359 | .to_type(KeyType::String) 360 | .unwrap() 361 | .into_owned(), 362 | KeyData::from("12.3") 363 | ); 364 | assert_eq!( 365 | KeyData::from(true) 366 | .to_type(KeyType::Bool) 367 | .unwrap() 368 | .get_type(), 369 | KeyType::Bool 370 | ); 371 | assert_eq!( 372 | KeyData::from(true) 373 | .to_type(KeyType::String) 374 | .unwrap() 375 | .into_owned(), 376 | KeyData::from("true") 377 | ); 378 | } 379 | } 380 | --------------------------------------------------------------------------------