├── crates ├── sqlite-vfs │ ├── .dockerignore │ ├── LICENSE │ ├── Cargo.toml │ └── README.md ├── litevfs │ ├── .gitignore │ ├── Cargo.toml │ └── src │ │ ├── lib.rs │ │ ├── ext.rs │ │ ├── sqlite.rs │ │ ├── leaser.rs │ │ ├── http.rs │ │ ├── locks.rs │ │ ├── syncer.rs │ │ ├── lfsc.rs │ │ ├── pager.rs │ │ ├── database.rs │ │ └── vfs.rs ├── emscripten-sys │ ├── Cargo.toml │ └── src │ │ └── lib.rs └── xtask │ ├── Cargo.toml │ └── src │ ├── build_wasm.rs │ ├── main.rs │ └── build_npm.rs ├── npm ├── litevfs │ ├── lib │ │ ├── index.js │ │ └── database.js │ ├── package.json.tmpl │ └── scripts │ │ └── install.js └── package.json.tmpl ├── .gitignore ├── Cargo.toml ├── .cargo └── config.toml ├── .github ├── dependabot.yml └── workflows │ └── push.yml ├── flake.nix ├── README.md ├── flake.lock └── LICENSE /crates/sqlite-vfs/.dockerignore: -------------------------------------------------------------------------------- 1 | target -------------------------------------------------------------------------------- /crates/litevfs/.gitignore: -------------------------------------------------------------------------------- 1 | Cargo.lock 2 | -------------------------------------------------------------------------------- /crates/sqlite-vfs/LICENSE: -------------------------------------------------------------------------------- 1 | MIT OR Apache-2.0 -------------------------------------------------------------------------------- /npm/litevfs/lib/index.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | module.exports = require('./database'); 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | **/target 2 | 3 | # Devenv 4 | .devenv* 5 | devenv.local.nix 6 | .pre-commit-config.yaml 7 | -------------------------------------------------------------------------------- /crates/emscripten-sys/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "emscripten-sys" 3 | version = "0.1.0" 4 | 5 | [dependencies] 6 | -------------------------------------------------------------------------------- /npm/package.json.tmpl: -------------------------------------------------------------------------------- 1 | { 2 | "name": "litevfs-{OS}-{ARCH}{ABI}", 3 | "version": "{VERSION}", 4 | "os": ["{OS}"], 5 | "cpu": ["{ARCH}"], 6 | "files": [ 7 | "lib/**" 8 | ] 9 | } 10 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [workspace] 2 | members = [ 3 | "crates/*", 4 | ] 5 | resolver = "2" 6 | 7 | # optimize for small code size 8 | [profile.release] 9 | opt-level = "z" 10 | lto = true 11 | strip = true 12 | -------------------------------------------------------------------------------- /crates/xtask/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "xtask" 3 | version = "0.1.0" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | 8 | [dependencies] 9 | cargo_metadata = "0.18" 10 | clap = "4.4" 11 | duct = "0.13" 12 | current_platform = "0.2" 13 | -------------------------------------------------------------------------------- /.cargo/config.toml: -------------------------------------------------------------------------------- 1 | [target.wasm32-unknown-emscripten] 2 | rustflags = ["-Clink-args=-sERROR_ON_UNDEFINED_SYMBOLS=0", "-Clink-args=--no-entry"] 3 | 4 | [target.x86_64-unknown-linux-musl] 5 | rustflags = ["-C", "target-feature=-crt-static"] 6 | 7 | [target.aarch64-unknown-linux-musl] 8 | rustflags = ["-C", "target-feature=-crt-static"] 9 | 10 | [alias] 11 | xtask = "run --package xtask --" 12 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "cargo" 4 | directory: "/" 5 | labels: 6 | - "dependencies" 7 | - "rust" 8 | schedule: 9 | interval: "daily" 10 | 11 | - package-ecosystem: "github-actions" 12 | directory: "/" 13 | labels: 14 | - "dependencies" 15 | - "actions" 16 | schedule: 17 | interval: "daily" 18 | -------------------------------------------------------------------------------- /npm/litevfs/package.json.tmpl: -------------------------------------------------------------------------------- 1 | { 2 | "name": "litevfs", 3 | "version": "{VERSION}", 4 | "main": "lib/index.js", 5 | "files": [ 6 | "lib/**", 7 | "scripts/**" 8 | ], 9 | "scripts": { 10 | "install": "scripts/install.js" 11 | }, 12 | "dependencies": { 13 | "better-sqlite3": "^8.6.0" 14 | }, 15 | "optionalDependencies": { 16 | "litevfs-linux-x64-gnu": "{VERSION}", 17 | "litevfs-linux-x64-musl": "{VERSION}", 18 | "litevfs-linux-arm64-gnu": "{VERSION}", 19 | "litevfs-linux-arm64-musl": "{VERSION}", 20 | "litevfs-darwin-x64": "{VERSION}", 21 | "litevfs-darwin-arm64": "{VERSION}", 22 | "litevfs-windows-x64": "{VERSION}" 23 | } 24 | } 25 | 26 | -------------------------------------------------------------------------------- /crates/sqlite-vfs/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "sqlite-vfs" 3 | version = "0.2.0" 4 | authors = ["Markus Ast "] 5 | license = "MIT OR Apache-2.0" 6 | edition = "2021" 7 | description = "Build SQLite virtual file systems (VFS) by implementing a simple Rust trait." 8 | repository = "https://github.com/rkusa/sqlite-vfs" 9 | documentation = "https://docs.rs/sqlite-vfs" 10 | keywords = ["sqlite", "vfs"] 11 | 12 | [dependencies] 13 | log = "0.4" 14 | time = "0.3" 15 | 16 | [features] 17 | default = [] 18 | 19 | # Enable an delegate to parent VFS: `xSetSystemCall`, `xGetSystemCall` and `xNextSystemCall` 20 | syscall = [] 21 | 22 | # Enable an delegate to parent VFS: `xDlOpen`, `xDlError`, `xDlSym` and `xDlClose` 23 | loadext = [] 24 | 25 | # Enable fault injections used by SQLite testsuite 26 | faultinj = [] 27 | -------------------------------------------------------------------------------- /npm/litevfs/lib/database.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | const sqlite = require('better-sqlite3'); 3 | const path = require('path'); 4 | 5 | var requireFunc = 6 | typeof __webpack_require__ === 'function' 7 | ? __non_webpack_require__ 8 | : require; 9 | 10 | const extensionPath = requireFunc.resolve("litevfs/build/litevfs"); 11 | 12 | function Database(filename, options) { 13 | const extdb = sqlite(":memory:"); 14 | extdb.loadExtension(extensionPath, "sqlite3_litevfs_init_default_vfs"); 15 | extdb.close(); 16 | 17 | return new sqlite(filename, options); 18 | } 19 | 20 | sqlite.prototype.acquire_write_lease = function() { 21 | this.pragma('litevfs_acquire_lease'); 22 | }; 23 | sqlite.prototype.release_write_lease = function() { 24 | this.pragma('litevfs_release_lease'); 25 | }; 26 | sqlite.prototype.with_write_lease = function(cb) { 27 | this.acquire_write_lease(); 28 | try { 29 | cb(); 30 | } finally { 31 | this.release_write_lease(); 32 | } 33 | }; 34 | 35 | module.exports = Database; 36 | -------------------------------------------------------------------------------- /flake.nix: -------------------------------------------------------------------------------- 1 | { 2 | description = "litevfs-dev"; 3 | 4 | inputs = { 5 | nixpkgs.url = "github:nixos/nixpkgs"; 6 | flake-utils.url = "github:numtide/flake-utils"; 7 | fenix = { 8 | url = "github:nix-community/fenix"; 9 | inputs.nixpkgs.follows = "nixpkgs"; 10 | }; 11 | }; 12 | 13 | outputs = { 14 | flake-utils, 15 | nixpkgs, 16 | fenix, 17 | ... 18 | }: 19 | flake-utils.lib.eachDefaultSystem (system: let 20 | pkgs = import nixpkgs {inherit system;}; 21 | toolchain = with fenix.packages.${system}; 22 | combine [ 23 | stable.toolchain 24 | targets.wasm32-unknown-emscripten.stable.rust-std 25 | ]; 26 | in { 27 | devShells.default = pkgs.mkShell { 28 | buildInputs = [ 29 | toolchain 30 | pkgs.cargo-nextest 31 | pkgs.rust-bindgen 32 | 33 | # Emscripten target 34 | pkgs.emscripten 35 | pkgs.wabt 36 | 37 | # NPM packaging 38 | pkgs.nodejs 39 | ]; 40 | }; 41 | }); 42 | } 43 | -------------------------------------------------------------------------------- /crates/litevfs/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "litevfs" 3 | version = "0.1.1" 4 | edition = "2021" 5 | 6 | [lib] 7 | crate-type = ["staticlib", "cdylib"] 8 | 9 | [dependencies] 10 | bytesize = "1.2" 11 | caches = "0.2" 12 | crossbeam-channel = "0.5" 13 | env_logger = { version = "0.10", default-features = false } 14 | hex = "0.4" 15 | humantime = "2.1" 16 | log = "0.4" 17 | litetx = "0.1" 18 | rand = "0.8" 19 | read_write_at = "0.1" 20 | serde = { version = "1.0", features = ["derive"] } 21 | serde_json = "1.0" 22 | serde_with = { version = "3.2", features = ["base64", "macros"] } 23 | string-interner = "0.14" 24 | sqlite-vfs = { path = "../sqlite-vfs", features = ["syscall", "loadext"] } 25 | time = { version = "0.3", features = ["std", "macros", "serde", "parsing"] } 26 | thiserror = "1.0" 27 | url = "2.4" 28 | 29 | [target.'cfg(unix)'.dependencies] 30 | libc = "0.2" 31 | 32 | [target.'cfg(windows)'.dependencies] 33 | winapi = { version = "0.3", features = ["std", "fileapi"] } 34 | 35 | [target.'cfg(not(target_os = "emscripten"))'.dependencies] 36 | ureq = { version = "2.8", features = ["json"] } 37 | 38 | [target.'cfg(target_os = "emscripten")'.dependencies] 39 | emscripten-sys = { path = "../emscripten-sys" } 40 | 41 | [dev-dependencies] 42 | serde_test = "1.0" 43 | -------------------------------------------------------------------------------- /crates/sqlite-vfs/README.md: -------------------------------------------------------------------------------- 1 | # `sqlite-vfs` 2 | 3 | Build SQLite virtual file systems (VFS) by implementing a simple Rust trait. 4 | 5 | [Documentation](https://docs.rs/sqlite-vfs) | [Example](https://github.com/rkusa/wasm-sqlite/blob/main/wasm/src/vfs.rs) 6 | 7 | This library is build for my own use-case. It doesn't expose everything a SQLite VFS provides (e.g. memory mapped files). Feel free to propose additions if the current state doesn't work for your use-case. 8 | 9 | ## Status 10 | 11 | This library is still in _prototype_ state and not ready to be used (except for maybe prototypes). While progress will be slow, it is actively worked on. 12 | 13 | - ✅ It passes most of SQLite's TCL test harness. 14 | - ⚠️ CI only runs `full.test` and not `all.test`. 15 | - ⚠️ [Some tests](./test-vfs/patch.sh) are skipped. 16 | - ✅ Successfully runs experiments like [`do-sqlite`](https://github.com/rkusa/do-sqlite). 17 | - ⚠️ It uses `unsafe` Rust, which hasn't been peer-reviewed yet. 18 | - ⚠️ It is not used in any production-capacity yet. 19 | 20 | ## Limitations 21 | 22 | - WAL is not supported (but in progress) 23 | - Memory mapping not supported (`xFetch`/`xUnfetch`) 24 | - Loading extensions not supported (`xDl*`) 25 | - Tests run only on UNIX right now (due to `std::os::unix` usage in tests) 26 | - Directory sync is not supported 27 | - Sector size is always 1024 28 | - Custom device characteristic are not supported (`xDeviceCharacteristics`) 29 | -------------------------------------------------------------------------------- /crates/litevfs/src/lib.rs: -------------------------------------------------------------------------------- 1 | mod database; 2 | mod ext; 3 | mod http; 4 | mod leaser; 5 | mod lfsc; 6 | mod locks; 7 | mod pager; 8 | mod sqlite; 9 | mod syncer; 10 | mod vfs; 11 | 12 | use litetx as ltx; 13 | use sqlite_vfs::ffi; 14 | use std::{collections::HashMap, fmt}; 15 | 16 | /// A custom SQLite error code to indicate that LFSC no longer have the 17 | /// required state and LiteVFS can't recover from this in the middle of 18 | /// a transaction. 'POS' in hex, which is hopefully large enough to never 19 | /// collide with an upstream's error code. 20 | const LITEVFS_IOERR_POS_MISMATCH: i32 = ffi::SQLITE_IOERR | (0x504F53 << 8); 21 | 22 | struct OptionLogger<'a, T>(&'a Option); 23 | 24 | impl<'a, T> fmt::Display for OptionLogger<'a, T> 25 | where 26 | T: fmt::Display, 27 | { 28 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { 29 | if let Some(inner) = self.0 { 30 | inner.fmt(f) 31 | } else { 32 | write!(f, "") 33 | } 34 | } 35 | } 36 | 37 | struct IterLogger(T); 38 | 39 | impl fmt::Display for IterLogger 40 | where 41 | T: IntoIterator + Copy, 42 | I: fmt::Display, 43 | { 44 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { 45 | write!(f, "[")?; 46 | for (i, pgno) in self.0.into_iter().enumerate() { 47 | if i > 0 { 48 | write!(f, ", ")?; 49 | } 50 | write!(f, "{}", pgno)?; 51 | } 52 | 53 | write!(f, "]") 54 | } 55 | } 56 | 57 | struct PositionsLogger<'a>(&'a HashMap>); 58 | 59 | impl<'a> fmt::Display for PositionsLogger<'a> { 60 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 61 | write!(f, "[")?; 62 | 63 | for (i, (db, pos)) in self.0.iter().enumerate() { 64 | if i > 0 { 65 | write!(f, ", ")?; 66 | } 67 | write!(f, "{}={}", db, OptionLogger(pos))?; 68 | } 69 | 70 | write!(f, "]") 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /crates/xtask/src/build_wasm.rs: -------------------------------------------------------------------------------- 1 | use duct::cmd; 2 | use std::{env, fs}; 3 | 4 | use crate::DynError; 5 | 6 | pub fn build_wasm(version: &str) -> Result<(), DynError> { 7 | let metadata = cargo_metadata::MetadataCommand::new().exec()?; 8 | let sqlite_dir = metadata 9 | .target_directory 10 | .join(format!("sqlite-src-{}", version)); 11 | let wasm_dir = metadata.target_directory.join("sqlite3-wasm"); 12 | let zip_name = metadata 13 | .target_directory 14 | .join(format!("sqlite-src-{}.zip", version)); 15 | 16 | cmd!( 17 | "cargo", 18 | "build", 19 | "--target", 20 | "wasm32-unknown-emscripten", 21 | "--package", 22 | "litevfs", 23 | "--release" 24 | ) 25 | .run()?; 26 | 27 | if !zip_name.exists() { 28 | println!("Downloading SQLite v{}", version); 29 | cmd!( 30 | "curl", 31 | "-L", 32 | "-o", 33 | &zip_name, 34 | format!("https://sqlite.org/2023/sqlite-src-{}.zip", version) 35 | ) 36 | .run()?; 37 | } 38 | 39 | if sqlite_dir.exists() { 40 | fs::remove_dir_all(&sqlite_dir)?; 41 | } 42 | 43 | cmd!("unzip", "-d", &metadata.target_directory, &zip_name).run()?; 44 | 45 | env::set_current_dir(sqlite_dir)?; 46 | 47 | cmd!("./configure", "--enable-all").run()?; 48 | cmd!("make", "sqlite3.c").run()?; 49 | 50 | env::set_current_dir("ext/wasm")?; 51 | 52 | cmd!( 53 | "make", 54 | format!( 55 | "sqlite3_wasm_extra_init.c={}/wasm32-unknown-emscripten/release/liblitevfs.a", 56 | metadata.target_directory 57 | ), 58 | "emcc.flags=-s EXTRA_EXPORTED_RUNTIME_METHODS=['ENV'] -s FETCH", 59 | "release" 60 | ) 61 | .run()?; 62 | 63 | fs::create_dir_all(&wasm_dir)?; 64 | fs::copy("jswasm/sqlite3.js", wasm_dir.join("sqlite3.js"))?; 65 | fs::copy("jswasm/sqlite3.wasm", wasm_dir.join("sqlite3.wasm"))?; 66 | 67 | println!("!!!!!!!!!! DONE !!!!!!!!!!"); 68 | println!("The artifacts are in {}", wasm_dir); 69 | 70 | Ok(()) 71 | } 72 | -------------------------------------------------------------------------------- /npm/litevfs/scripts/install.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | 'use strict'; 4 | const path = require('path'); 5 | const process = require('node:process'); 6 | const fs = require('node:fs'); 7 | 8 | const supportedPlatforms = [ 9 | ["darwin", "x64"], 10 | ["darwin", "arm64"], 11 | ["linux", "x64"], 12 | ["linux", "arm64"], 13 | ["windows", "x64"], 14 | ]; 15 | 16 | function validPlatform(platform, arch) { 17 | return ( 18 | supportedPlatforms.find(([p, a]) => platform == p && arch === a) !== null 19 | ); 20 | } 21 | 22 | function extensionPrefix(platform) { 23 | if (platform == "win32") return ""; 24 | return "lib"; 25 | } 26 | 27 | function extensionSuffix(platform) { 28 | if (platform === "win32") return "dll"; 29 | if (platform === "darwin") return "dylib"; 30 | return "so"; 31 | } 32 | 33 | function platformPackageName(platform, arch) { 34 | function isMusl() { 35 | if (!process.report || typeof process.report.getReport !== 'function') { 36 | try { 37 | return readFileSync('/usr/bin/ldd', 'utf8').includes('musl') 38 | } catch (e) { 39 | return true 40 | } 41 | } else { 42 | const { glibcVersionRuntime } = process.report.getReport().header 43 | return !glibcVersionRuntime 44 | } 45 | } 46 | 47 | const os = platform === "win32" ? "windows" : platform; 48 | const abi = platform == "linux" ? (isMusl() ? "-musl" : "-gnu") : ""; 49 | 50 | return `litevfs-${os}-${arch}${abi}`; 51 | } 52 | 53 | var requireFunc = 54 | typeof __webpack_require__ === 'function' 55 | ? __non_webpack_require__ 56 | : require; 57 | 58 | function getLoadablePath() { 59 | if (!validPlatform(process.platform, process.arch)) { 60 | throw new Error( 61 | `Unsupported platform for litevfs, on a ${platform}-${arch} machine, but not in supported platforms (${supportedPlatforms 62 | .map(([p, a]) => `${p}-${a}`) 63 | .join(",")}). Consult the litevfs NPM package README for details. ` 64 | ); 65 | } 66 | 67 | const packageName = platformPackageName(process.platform, process.arch); 68 | const fileName = `${extensionPrefix(process.platform)}litevfs.${extensionSuffix(process.platform)}`; 69 | const loadablePath = requireFunc.resolve(packageName + "/lib/" + fileName); 70 | if (!fs.statSync(loadablePath, { throwIfNoEntry: false })) { 71 | throw new Error( 72 | `Loadble extension for litevfs not found. Was the ${packageName} package installed? Avoid using the --no-optional flag, as the optional dependencies for litevfs are required.` 73 | ); 74 | } 75 | 76 | return loadablePath; 77 | } 78 | 79 | const outPath = path.join(__dirname, "..", "build"); 80 | const extensionPath = getLoadablePath(); 81 | 82 | fs.mkdirSync(outPath); 83 | fs.copyFileSync(extensionPath, path.join(outPath, "litevfs")); 84 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # LiteVFS - LiteFS VFS implementation for serverless environments (WIP) 2 | 3 | LiteVFS is a Virtual Filesystem extension for SQLite that uses [LiteFS Cloud][litefs-cloud] as a backing store. 4 | 5 | ## SQLite CLI 6 | 7 | To test with SQLite CLI: 8 | 9 | 1) Build the extension: 10 | ``` 11 | $ cargo build --release 12 | ``` 13 | 14 | 1) Provide `LITEFS_CLOUD_TOKEN` env variable and load the extension 15 | 16 | ``` 17 | $ LITEFS_CLOUD_TOKEN= sqlite3 18 | sqlite> .load target/release/liblitevfs.so 19 | ``` 20 | 21 | 1) Open the database 22 | ``` 23 | sqlite> .open file:db1?vfs=litevfs 24 | ``` 25 | 26 | That's it. It should work now. The database is stored under `tmp` in a random directory. 27 | 28 | To enable debug logging, run `sqlite3` binary like this: 29 | 30 | ``` 31 | $ RUST_LOG=trace sqlite3 32 | ``` 33 | 34 | The following environment variable are handled by LiteVFS: 35 | 36 | - `LITEFS_CLOUD_TOKEN` - LiteFS Cloud token (mandatory) 37 | - `LITEFS_CLOUD_CLUSTER` - LiteFS Cloud cluster (optional for cluster-scoped tokens, mandatory otherwise) 38 | - `LITEFS_CLOUD_HOST` - LiteFS Cloud host (optional, defaults to https://litefs.fly.io) 39 | - `LITEVFS_CACHE_DIR` - cache directory for databases (optional, random directory under `/tmp` if not specified) 40 | - `LITEVFS_LOG_FILE` - log into the given file instead of stderr 41 | 42 | The same shared library can be loaded from any language using their SQLite bindings. 43 | 44 | ### Modifying the database through LiteVFS 45 | 46 | In order to modify a database, the LiteVFS instance must host a write lease to the database. A write lease can be obtained 47 | via a pragma statement: 48 | 49 | ``` 50 | sqlite> pragma litevfs_acquire_lease; 51 | sqlite> 52 | sqlite> pragma litevfs_release_lease; 53 | ``` 54 | 55 | Only one LiteVFS instance can hold a write lease for speficic database at a time. 56 | 57 | ### Limitations 58 | 59 | * Databases with `journal_mode=wal` cannot be modified via LiteVFS (but can be read) 60 | * Databases with auto-vacuum cannon be opened via LiteVFS at all 61 | * `VACUUM` is not supported 62 | 63 | ## Building LiteVFS for browsers 64 | 65 | The build process uses Emscripten target, thus, Emscripten SDK needs to be installed and configured on the system. 66 | Refer to Emscripted docs on how to do this. Alternatively, `devenv.nix` file in this repo includes all the 67 | required dependencies. 68 | 69 | To build simply do: 70 | 71 | ```sh 72 | $ cargo xtask build-wasm 73 | ``` 74 | 75 | The command will build LiteVFS with Emscripten, download SQLite3 sources, build it with Emscripten and link it with LiteVFS. 76 | At this point you should have `target/sqlite3-wasm/sqlite3.{js,wasm}` files. 77 | 78 | Note that since LiteVFS uses synchronous Emscripten's FETCH API, SQLite3 can only be used from a Worker thread, not from the 79 | main browser UI thread. 80 | 81 | [litefs-cloud]: https://fly.io/docs/litefs/ -------------------------------------------------------------------------------- /crates/xtask/src/main.rs: -------------------------------------------------------------------------------- 1 | mod build_npm; 2 | mod build_wasm; 3 | 4 | use clap::{Arg, Command}; 5 | use std::{error::Error, path::PathBuf}; 6 | 7 | type DynError = Box; 8 | 9 | const DEFAULT_SQLITE_VERSION: &str = "3430000"; 10 | 11 | fn main() -> Result<(), DynError> { 12 | let matches = Command::new("xtask") 13 | .subcommand_required(true) 14 | .arg_required_else_help(true) 15 | .subcommand( 16 | Command::new("build-wasm") 17 | .about("Build SQLite3 + LiteVFS WASM distribution") 18 | .arg( 19 | Arg::new("version") 20 | .short('v') 21 | .long("version") 22 | .default_value(DEFAULT_SQLITE_VERSION) 23 | .help("SQLite3 version"), 24 | ), 25 | ) 26 | .subcommand(Command::new("build-npm-meta").about("Build LiteVFS NPM meta package")) 27 | .subcommand( 28 | Command::new("build-npm-binary") 29 | .about("Build LiteVFS binary NPM package") 30 | .arg( 31 | Arg::new("lib") 32 | .short('l') 33 | .long("lib") 34 | .required(true) 35 | .value_parser(clap::builder::ValueParser::path_buf()) 36 | .help("Path to LiteVFS shared library"), 37 | ) 38 | .arg( 39 | Arg::new("cpu") 40 | .short('c') 41 | .long("cpu") 42 | .required(true) 43 | .help("CPU architecture"), 44 | ) 45 | .arg( 46 | Arg::new("os") 47 | .short('o') 48 | .long("os") 49 | .required(true) 50 | .help("Target OS"), 51 | ) 52 | .arg(Arg::new("abi").short('a').long("abi").help("System ABI")), 53 | ) 54 | .get_matches(); 55 | 56 | match matches.subcommand() { 57 | Some(("build-wasm", sub_matches)) => crate::build_wasm::build_wasm( 58 | sub_matches 59 | .get_one::("version") 60 | .expect("`version` is required"), 61 | )?, 62 | Some(("build-npm-meta", _)) => crate::build_npm::build_npm_meta()?, 63 | Some(("build-npm-binary", sub_matches)) => { 64 | crate::build_npm::build_npm_binary( 65 | sub_matches.get_one::("lib").cloned().unwrap(), 66 | sub_matches.get_one::("cpu").cloned().unwrap(), 67 | sub_matches.get_one::("os").cloned().unwrap(), 68 | sub_matches.get_one::("abi").cloned(), 69 | )?; 70 | } 71 | _ => unreachable!(""), 72 | }; 73 | 74 | Ok(()) 75 | } 76 | -------------------------------------------------------------------------------- /flake.lock: -------------------------------------------------------------------------------- 1 | { 2 | "nodes": { 3 | "fenix": { 4 | "inputs": { 5 | "nixpkgs": [ 6 | "nixpkgs" 7 | ], 8 | "rust-analyzer-src": "rust-analyzer-src" 9 | }, 10 | "locked": { 11 | "lastModified": 1712125370, 12 | "narHash": "sha256-hrfohhBMoeG+cZIt5hr+QDo5Y4ZU52pVu9lGh0Efong=", 13 | "owner": "nix-community", 14 | "repo": "fenix", 15 | "rev": "97d6dbee004b24057fa2a6f2e4a5c535259d8aed", 16 | "type": "github" 17 | }, 18 | "original": { 19 | "owner": "nix-community", 20 | "repo": "fenix", 21 | "type": "github" 22 | } 23 | }, 24 | "flake-utils": { 25 | "inputs": { 26 | "systems": "systems" 27 | }, 28 | "locked": { 29 | "lastModified": 1710146030, 30 | "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=", 31 | "owner": "numtide", 32 | "repo": "flake-utils", 33 | "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a", 34 | "type": "github" 35 | }, 36 | "original": { 37 | "owner": "numtide", 38 | "repo": "flake-utils", 39 | "type": "github" 40 | } 41 | }, 42 | "nixpkgs": { 43 | "locked": { 44 | "lastModified": 1712131175, 45 | "narHash": "sha256-NHTMPMafvYn4ZVIAmfNvjJldruAEBnsGmvLLEyeDDxs=", 46 | "owner": "nixos", 47 | "repo": "nixpkgs", 48 | "rev": "05b38103f1c38374dd0b09643681ee6ed55d74b7", 49 | "type": "github" 50 | }, 51 | "original": { 52 | "owner": "nixos", 53 | "repo": "nixpkgs", 54 | "type": "github" 55 | } 56 | }, 57 | "root": { 58 | "inputs": { 59 | "fenix": "fenix", 60 | "flake-utils": "flake-utils", 61 | "nixpkgs": "nixpkgs" 62 | } 63 | }, 64 | "rust-analyzer-src": { 65 | "flake": false, 66 | "locked": { 67 | "lastModified": 1712067202, 68 | "narHash": "sha256-pisa+RuBVSf6D7YtUymrT9WVKcy1D3FLE1Ty9MIOrFo=", 69 | "owner": "rust-lang", 70 | "repo": "rust-analyzer", 71 | "rev": "c3b8c2a25413e2aa58295d18c12902a624471b74", 72 | "type": "github" 73 | }, 74 | "original": { 75 | "owner": "rust-lang", 76 | "ref": "nightly", 77 | "repo": "rust-analyzer", 78 | "type": "github" 79 | } 80 | }, 81 | "systems": { 82 | "locked": { 83 | "lastModified": 1681028828, 84 | "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", 85 | "owner": "nix-systems", 86 | "repo": "default", 87 | "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", 88 | "type": "github" 89 | }, 90 | "original": { 91 | "owner": "nix-systems", 92 | "repo": "default", 93 | "type": "github" 94 | } 95 | } 96 | }, 97 | "root": "root", 98 | "version": 7 99 | } 100 | -------------------------------------------------------------------------------- /crates/xtask/src/build_npm.rs: -------------------------------------------------------------------------------- 1 | use duct::cmd; 2 | use std::{env, fs, path::PathBuf}; 3 | 4 | use crate::DynError; 5 | 6 | pub fn build_npm_binary( 7 | lib: PathBuf, 8 | cpu: String, 9 | os: String, 10 | abi: Option, 11 | ) -> Result<(), DynError> { 12 | let metadata = cargo_metadata::MetadataCommand::new().exec()?; 13 | let pkg_dir = env::temp_dir().join(if let Some(ref abi) = abi { 14 | format!("litevfs-{}-{}-{}", os, cpu, abi) 15 | } else { 16 | format!("litevfs-{}-{}", os, cpu) 17 | }); 18 | let lib_dir = pkg_dir.join("lib"); 19 | let npm_dir = metadata.target_directory.join("npm"); 20 | 21 | let version = metadata 22 | .packages 23 | .iter() 24 | .find(|p| p.name == "litevfs") 25 | .map(|p| p.version.to_string()) 26 | .ok_or("Can't find LiteVFS version")?; 27 | 28 | fs::create_dir_all(&pkg_dir)?; 29 | fs::create_dir_all(&lib_dir)?; 30 | fs::create_dir_all(&npm_dir)?; 31 | 32 | fs::copy(&lib, lib_dir.join(lib.file_name().unwrap()))?; 33 | let package_json = fs::read_to_string( 34 | metadata 35 | .workspace_root 36 | .join("npm") 37 | .join("package.json.tmpl"), 38 | )?; 39 | let package_json = package_json 40 | .replace("{OS}", &os) 41 | .replace("{ARCH}", &cpu) 42 | .replace("{VERSION}", &version); 43 | let package_json = if let Some(abi) = abi { 44 | package_json.replace("{ABI}", &format!("-{}", abi)) 45 | } else { 46 | package_json.replace("{ABI}", "") 47 | }; 48 | 49 | fs::write(pkg_dir.join("package.json"), package_json)?; 50 | 51 | env::set_current_dir(npm_dir)?; 52 | 53 | cmd!("npm", "pack", pkg_dir).run()?; 54 | 55 | Ok(()) 56 | } 57 | 58 | pub fn build_npm_meta() -> Result<(), DynError> { 59 | let metadata = cargo_metadata::MetadataCommand::new().exec()?; 60 | let pkg_dir = env::temp_dir().join("litevfs-meta"); 61 | let lib_dir = pkg_dir.join("lib"); 62 | let scripts_dir = pkg_dir.join("scripts"); 63 | let npm_dir = metadata.target_directory.join("npm"); 64 | 65 | let version = metadata 66 | .packages 67 | .iter() 68 | .find(|p| p.name == "litevfs") 69 | .map(|p| p.version.to_string()) 70 | .ok_or("Can't find LiteVFS version")?; 71 | 72 | fs::create_dir_all(&pkg_dir)?; 73 | fs::create_dir_all(&lib_dir)?; 74 | fs::create_dir_all(&scripts_dir)?; 75 | fs::create_dir_all(&npm_dir)?; 76 | 77 | for file in fs::read_dir( 78 | metadata 79 | .workspace_root 80 | .join("npm") 81 | .join("litevfs") 82 | .join("lib"), 83 | )? { 84 | let file = file?; 85 | fs::copy(&file.path(), lib_dir.join(file.file_name()))?; 86 | } 87 | 88 | for file in fs::read_dir( 89 | metadata 90 | .workspace_root 91 | .join("npm") 92 | .join("litevfs") 93 | .join("scripts"), 94 | )? { 95 | let file = file?; 96 | fs::copy(&file.path(), scripts_dir.join(file.file_name()))?; 97 | } 98 | 99 | let package_json = fs::read_to_string( 100 | metadata 101 | .workspace_root 102 | .join("npm") 103 | .join("litevfs") 104 | .join("package.json.tmpl"), 105 | )?; 106 | let package_json = package_json.replace("{VERSION}", &version); 107 | 108 | fs::write(pkg_dir.join("package.json"), package_json)?; 109 | 110 | env::set_current_dir(npm_dir)?; 111 | 112 | cmd!("npm", "pack", pkg_dir).run()?; 113 | 114 | Ok(()) 115 | } 116 | -------------------------------------------------------------------------------- /crates/litevfs/src/ext.rs: -------------------------------------------------------------------------------- 1 | use crate::{lfsc, vfs::LiteVfs}; 2 | use rand::distributions::{Alphanumeric, DistString}; 3 | use sqlite_vfs::{ffi, RegisterError}; 4 | use std::{env, fs, process}; 5 | 6 | fn init_logger() { 7 | let target = if let Ok(filename) = env::var("LITEVFS_LOG_FILE") { 8 | env_logger::Target::Pipe(Box::new( 9 | fs::File::create(filename).expect("can't open log file"), 10 | )) 11 | } else { 12 | env_logger::Target::Stderr 13 | }; 14 | 15 | env_logger::Builder::from_env(env_logger::Env::default()) 16 | .target(target) 17 | .try_init() 18 | .ok(); 19 | } 20 | 21 | fn prepare() -> Result<(lfsc::Client, String), Box> { 22 | let client = lfsc::Client::from_env()?; 23 | 24 | let cache_dir = env::var("LITEVFS_CACHE_DIR").unwrap_or(format!( 25 | "/tmp/litevfs-{}-{}", 26 | process::id(), 27 | Alphanumeric.sample_string(&mut rand::thread_rng(), 8) 28 | )); 29 | fs::create_dir_all(&cache_dir)?; 30 | 31 | Ok((client, cache_dir)) 32 | } 33 | 34 | #[no_mangle] 35 | #[cfg(not(target_os = "emscripten"))] 36 | #[allow(non_snake_case)] 37 | pub extern "C" fn sqlite3_litevfs_init( 38 | _db: *mut ffi::sqlite3, 39 | pzErrMsg: *mut *mut std::ffi::c_char, 40 | pApi: *mut ffi::sqlite3_api_routines, 41 | ) -> std::ffi::c_int { 42 | litevfs_init(pzErrMsg, pApi, false) 43 | } 44 | 45 | #[no_mangle] 46 | #[cfg(not(target_os = "emscripten"))] 47 | #[allow(non_snake_case)] 48 | pub extern "C" fn sqlite3_litevfs_init_default_vfs( 49 | _db: *mut ffi::sqlite3, 50 | pzErrMsg: *mut *mut std::ffi::c_char, 51 | pApi: *mut ffi::sqlite3_api_routines, 52 | ) -> std::ffi::c_int { 53 | litevfs_init(pzErrMsg, pApi, true) 54 | } 55 | 56 | #[cfg(not(target_os = "emscripten"))] 57 | #[allow(non_snake_case)] 58 | fn litevfs_init( 59 | pzErrMsg: *mut *mut std::ffi::c_char, 60 | pApi: *mut ffi::sqlite3_api_routines, 61 | as_default: bool, 62 | ) -> std::ffi::c_int { 63 | use std::{ffi::CString, ptr}; 64 | 65 | init_logger(); 66 | 67 | log::info!("registering LiteVFS"); 68 | let (client, cache_dir) = match prepare() { 69 | Ok(ret) => ret, 70 | Err(err) if !pzErrMsg.is_null() => { 71 | let msg = CString::new(err.to_string()).unwrap(); 72 | let msg_slice = msg.to_bytes_with_nul(); 73 | unsafe { 74 | *pzErrMsg = 75 | (*pApi).malloc64.unwrap()(msg_slice.len() as u64) as *mut std::ffi::c_char; 76 | ptr::copy( 77 | msg_slice.as_ptr() as *const std::ffi::c_char, 78 | *pzErrMsg, 79 | msg_slice.len(), 80 | ); 81 | }; 82 | return ffi::SQLITE_ERROR; 83 | } 84 | Err(err) => { 85 | log::warn!("{}", err); 86 | return ffi::SQLITE_ERROR; 87 | } 88 | }; 89 | 90 | let code = match unsafe { sqlite_vfs::DynamicExtension::build(pApi) }.register( 91 | "litevfs", 92 | LiteVfs::new(cache_dir, client), 93 | as_default, 94 | ) { 95 | Ok(_) => ffi::SQLITE_OK_LOAD_PERMANENTLY, 96 | Err(RegisterError::Nul(_)) => ffi::SQLITE_ERROR, 97 | Err(RegisterError::Register(code)) => code, 98 | }; 99 | log::debug!("register(litevfs) -> {}", code); 100 | 101 | code 102 | } 103 | 104 | #[no_mangle] 105 | #[cfg(target_os = "emscripten")] 106 | pub extern "C" fn sqlite3_wasm_extra_init(_unused: *const std::ffi::c_char) -> std::ffi::c_int { 107 | init_logger(); 108 | 109 | log::info!("registering LiteVFS"); 110 | let (client, cache_dir) = match prepare() { 111 | Ok(ret) => ret, 112 | Err(err) => { 113 | log::warn!("{}", err); 114 | return ffi::SQLITE_ERROR; 115 | } 116 | }; 117 | 118 | let code = match sqlite_vfs::LinkedExtension::build().register( 119 | "litevfs", 120 | LiteVfs::new(cache_dir, client), 121 | true, 122 | ) { 123 | Ok(_) => ffi::SQLITE_OK, 124 | Err(RegisterError::Nul(_)) => ffi::SQLITE_ERROR, 125 | Err(RegisterError::Register(code)) => code, 126 | }; 127 | log::debug!("register(litevfs) -> {}", code); 128 | 129 | code 130 | } 131 | -------------------------------------------------------------------------------- /crates/litevfs/src/sqlite.rs: -------------------------------------------------------------------------------- 1 | use std::{collections::BTreeSet, ops}; 2 | 3 | use litetx as ltx; 4 | 5 | pub(crate) const HEADER_SIZE: usize = 100; 6 | pub(crate) const WRITE_VERSION_OFFSET: usize = 18; 7 | pub(crate) const READ_VERSION_OFFSET: usize = 19; 8 | pub(crate) const COMMIT_RANGE: ops::Range = 28..32; 9 | 10 | pub(crate) fn prefetch_candidates( 11 | data: &[u8], 12 | pgno: ltx::PageNum, 13 | ) -> Option> { 14 | let bh = if pgno == ltx::PageNum::ONE { 15 | &data[HEADER_SIZE..] 16 | } else { 17 | data 18 | }; 19 | 20 | let num_cells = u16::from_be_bytes(bh[3..5].try_into().unwrap()); 21 | match bh[0] { 22 | 0x0d if pgno == ltx::PageNum::ONE => Some(master_table(&bh[8..], data, num_cells)), 23 | 0x02 | 0x05 => { 24 | let rightmost_pointer = u32::from_be_bytes(bh[8..12].try_into().unwrap()); 25 | let mut pgnos = interior_table_or_index(&bh[12..], data, num_cells); 26 | if let Ok(pgno) = ltx::PageNum::new(rightmost_pointer) { 27 | pgnos.insert(pgno); 28 | } 29 | 30 | Some(pgnos) 31 | } 32 | _ => None, 33 | } 34 | } 35 | 36 | // Returns the page numbers of the roots of all tables/indices/etc. 37 | fn master_table(pointers: &[u8], data: &[u8], num_cells: u16) -> BTreeSet { 38 | pointers[..num_cells as usize * 2] 39 | .chunks_exact(2) 40 | .map(|c| u16::from_be_bytes(c.try_into().unwrap()) as usize) 41 | .filter_map(|cell| { 42 | if cell >= data.len() { 43 | return None; 44 | } 45 | let cell = &data[cell..]; 46 | let (length, cell) = read_varint(cell); 47 | let (_rowid, cell) = read_varint(cell); 48 | 49 | // Has overflow page, ignore for now. 50 | if length as usize > data.len() - 35 { 51 | return None; 52 | } 53 | 54 | let (hsize, mut header) = read_varint(cell); 55 | let body = &cell[hsize as usize..]; 56 | 57 | // skip type/name/tbl_name 58 | let mut pgno_offset: usize = 0; 59 | for _ in 0..3 { 60 | let (typ, header2) = read_varint(header); 61 | pgno_offset += type_size(typ); 62 | 63 | header = header2; 64 | } 65 | 66 | let (pgno, _) = read_varint(&body[pgno_offset..]); 67 | 68 | ltx::PageNum::new(pgno as u32).ok() 69 | }) 70 | .collect() 71 | } 72 | 73 | // Returns the page numbers of the pages referenced by an interior table or index page. 74 | fn interior_table_or_index(pointers: &[u8], data: &[u8], num_cells: u16) -> BTreeSet { 75 | pointers[..num_cells as usize * 2] 76 | .chunks_exact(2) 77 | .map(|c| u16::from_be_bytes(c.try_into().unwrap()) as usize) 78 | .filter_map(|cell| { 79 | if cell >= data.len() { 80 | return None; 81 | } 82 | let cell = &data[cell..]; 83 | 84 | let pgno = u32::from_be_bytes(cell[0..4].try_into().unwrap()); 85 | ltx::PageNum::new(pgno).ok() 86 | }) 87 | .collect() 88 | } 89 | 90 | fn read_varint(data: &[u8]) -> (i64, &[u8]) { 91 | let mut n: i64 = 0; 92 | for (i, &b) in data.iter().enumerate() { 93 | if i == 8 { 94 | n = (n << 8) | (b as i64); 95 | return (n, &data[i + 1..]); 96 | } 97 | 98 | n = (n << 7) | ((b as i64) & 0x7f); 99 | if b < 0x80 { 100 | return (n, &data[i + 1..]); 101 | } 102 | } 103 | 104 | unreachable!(); 105 | } 106 | 107 | fn type_size(typ: i64) -> usize { 108 | match typ { 109 | // NULL, 0 or 1 110 | 0 | 8 | 9 => 0, 111 | // 8-bit int 112 | 1 => 1, 113 | // 16-bit int 114 | 2 => 2, 115 | // 24-bit int 116 | 3 => 3, 117 | // 32-bit int 118 | 4 => 4, 119 | // 48-bit int 120 | 5 => 6, 121 | // 64-bit int 122 | 6 => 8, 123 | // float 124 | 7 => 8, 125 | // internal, should not be present in valid DBs 126 | 10 | 11 => unreachable!(), 127 | n if n % 2 == 0 => ((n - 12) / 2) as usize, 128 | n => ((n - 13) / 2) as usize, 129 | } 130 | } 131 | -------------------------------------------------------------------------------- /crates/litevfs/src/leaser.rs: -------------------------------------------------------------------------------- 1 | #[cfg(not(target_os = "emscripten"))] 2 | pub(crate) use native::Leaser; 3 | 4 | #[cfg(target_os = "emscripten")] 5 | pub(crate) use emscripten::Leaser; 6 | 7 | #[cfg(not(target_os = "emscripten"))] 8 | mod native { 9 | use crate::lfsc; 10 | use std::{ 11 | collections::HashMap, 12 | io, 13 | sync::{Arc, Mutex}, 14 | thread, 15 | }; 16 | 17 | pub(crate) struct Leaser { 18 | client: Arc, 19 | leases: Mutex>, 20 | 21 | duration: std::time::Duration, 22 | notifier: crossbeam_channel::Sender<()>, 23 | } 24 | 25 | impl Leaser { 26 | pub(crate) fn new(client: Arc, duration: std::time::Duration) -> Arc { 27 | let (tx, rx) = crossbeam_channel::unbounded(); 28 | let leaser = Arc::new(Leaser { 29 | client, 30 | leases: Mutex::new(HashMap::new()), 31 | duration, 32 | notifier: tx, 33 | }); 34 | 35 | thread::spawn({ 36 | let leaser = Arc::clone(&leaser); 37 | 38 | move || leaser.run(rx) 39 | }); 40 | 41 | leaser 42 | } 43 | 44 | pub(crate) fn acquire_lease(&self, db: &str) -> io::Result<()> { 45 | let lease = match self 46 | .client 47 | .acquire_lease(db, lfsc::LeaseOp::Acquire(self.duration)) 48 | { 49 | Ok(lease) => { 50 | log::debug!("[leaser] acquire_lease: db = {}: {}", db, lease); 51 | lease 52 | } 53 | Err(err) => { 54 | log::warn!("[leaser] acquire_lease: db = {}: {}", db, err); 55 | return Err(err.into()); 56 | } 57 | }; 58 | 59 | self.leases.lock().unwrap().insert(db.into(), lease); 60 | self.notify(); 61 | 62 | Ok(()) 63 | } 64 | 65 | pub(crate) fn release_lease(&self, db: &str) -> io::Result<()> { 66 | if let Some(lease) = self.leases.lock().unwrap().remove(db) { 67 | match self.client.release_lease(db, lease) { 68 | Ok(()) => { 69 | log::debug!("[leaser] release_lease: db = {}", db); 70 | } 71 | Err(err) => { 72 | log::warn!("[leaser] release_lease: db = {}: {}", db, err); 73 | return Err(err.into()); 74 | } 75 | }; 76 | self.notify(); 77 | } 78 | 79 | Ok(()) 80 | } 81 | 82 | pub(crate) fn get_lease(&self, db: &str) -> io::Result { 83 | self.leases 84 | .lock() 85 | .unwrap() 86 | .get(db) 87 | .map(|lease| lease.id.clone()) 88 | .ok_or_else(|| io::Error::new(io::ErrorKind::PermissionDenied, "lease not found")) 89 | } 90 | 91 | fn notify(&self) { 92 | self.notifier.send(()).unwrap(); 93 | } 94 | 95 | fn run(&self, rx: crossbeam_channel::Receiver<()>) { 96 | use crossbeam_channel::{after, select}; 97 | use time::OffsetDateTime; 98 | 99 | let min_period = self.duration / 3; 100 | 101 | loop { 102 | // TODO: we probably not gonna have a lot of leases, but might need to optimize later 103 | let first = { 104 | let leases = self.leases.lock().unwrap(); 105 | leases 106 | .iter() 107 | .min_by_key(|(_, lease)| lease.expires_at) 108 | .map(|(db, lease)| (db.clone(), lease.clone())) 109 | }; 110 | let (db, lease) = if let Some((db, lease)) = first { 111 | (db, lease) 112 | } else { 113 | // No active leases, wait to get notified 114 | rx.recv().unwrap(); 115 | continue; 116 | }; 117 | 118 | let until_expires = lease.expires_at - OffsetDateTime::now_utc(); 119 | let wait_for = 120 | if until_expires.is_negative() || until_expires.unsigned_abs() < min_period { 121 | std::time::Duration::from_micros(1) 122 | } else { 123 | min_period 124 | }; 125 | 126 | select! { 127 | recv(rx) -> _ => continue, 128 | recv(after(wait_for)) -> _ => { 129 | // Check if we are still holding the lease 130 | if !self.leases.lock().unwrap().contains_key(&db) { 131 | continue 132 | } 133 | 134 | // This can potentially reacquire a released lease, but since it won't be 135 | // in map anymore, it will expire by itself. 136 | log::debug!("[leaser] refreshing lease: db = {}, lease = {}", db, lease); 137 | match self.client.acquire_lease(&db, lfsc::LeaseOp::Refresh(&lease.id, self.duration)) { 138 | Ok(lease) => { 139 | self.leases.lock().unwrap().entry(db).and_modify(|old_lease| *old_lease = lease); 140 | }, 141 | Err(err) => { 142 | log::warn!("[leaser] failed to refresh lease: db = {}, lease = {}: {}", db, lease, err); 143 | // It's possible a new one has been acquired 144 | let mut leases = self.leases.lock().unwrap(); 145 | match leases.get(&db) { 146 | Some(l) if l.id == lease.id => { 147 | leases.remove(&db); 148 | }, 149 | _ => (), 150 | }; 151 | }, 152 | }; 153 | }, 154 | }; 155 | } 156 | } 157 | } 158 | } 159 | 160 | #[cfg(target_os = "emscripten")] 161 | mod emscripten { 162 | use crate::lfsc; 163 | use std::{io, sync::Arc}; 164 | 165 | pub(crate) struct Leaser; 166 | 167 | impl Leaser { 168 | pub(crate) fn new( 169 | _client: Arc, 170 | _duration: std::time::Duration, 171 | ) -> Arc { 172 | Arc::new(Leaser) 173 | } 174 | 175 | pub(crate) fn acquire_lease(&self, _db: &str) -> io::Result<()> { 176 | Err(io::Error::new( 177 | io::ErrorKind::Unsupported, 178 | "lease management is not supported", 179 | )) 180 | } 181 | 182 | pub(crate) fn release_lease(&self, _db: &str) -> io::Result<()> { 183 | Err(io::Error::new( 184 | io::ErrorKind::Unsupported, 185 | "lease management is not supported", 186 | )) 187 | } 188 | 189 | pub(crate) fn get_lease(&self, _db: &str) -> io::Result { 190 | Err(io::Error::new( 191 | io::ErrorKind::Unsupported, 192 | "lease management is not supported", 193 | )) 194 | } 195 | } 196 | } 197 | -------------------------------------------------------------------------------- /.github/workflows/push.yml: -------------------------------------------------------------------------------- 1 | name: "Push" 2 | on: 3 | release: 4 | types: 5 | - created 6 | push: 7 | branches: 8 | - main 9 | pull_request: 10 | types: 11 | - opened 12 | - synchronize 13 | - reopened 14 | 15 | env: 16 | VERSION: "${{ github.event_name == 'release' && github.event.release.name || github.sha }}" 17 | 18 | jobs: 19 | lint: 20 | name: "Lint" 21 | runs-on: ubuntu-latest 22 | steps: 23 | - uses: actions/checkout@v4 24 | - uses: dtolnay/rust-toolchain@stable 25 | with: 26 | components: clippy, rustfmt 27 | - uses: Swatinem/rust-cache@v2 28 | 29 | - name: cargo fmt 30 | run: cargo fmt --all --check 31 | - name: cargo clippy 32 | run: cargo clippy --all-features -- --deny warnings 33 | - name: cargo check 34 | run: cargo check 35 | 36 | test: 37 | name: "Unit Tests" 38 | runs-on: "ubuntu-latest" 39 | steps: 40 | - uses: actions/checkout@v4 41 | - uses: dtolnay/rust-toolchain@stable 42 | with: 43 | components: clippy, rustfmt 44 | - uses: Swatinem/rust-cache@v2 45 | 46 | - name: Run unit tests 47 | run: cargo test --lib 48 | 49 | build: 50 | name: "Build" 51 | strategy: 52 | matrix: 53 | include: 54 | - os: "ubuntu-latest" 55 | target: "x86_64-unknown-linux-gnu" 56 | soname: "liblitevfs.so" 57 | - os: "ubuntu-latest" 58 | target: "x86_64-unknown-linux-musl" 59 | soname: "liblitevfs.so" 60 | - os: "ubuntu-latest" 61 | target: "aarch64-unknown-linux-gnu" 62 | soname: "liblitevfs.so" 63 | - os: "ubuntu-latest" 64 | target: "aarch64-unknown-linux-musl" 65 | soname: "liblitevfs.so" 66 | - os: "macos-latest" 67 | target: "x86_64-apple-darwin" 68 | soname: "liblitevfs.dylib" 69 | - os: "macos-latest" 70 | target: "aarch64-apple-darwin" 71 | soname: "liblitevfs.dylib" 72 | - os: "windows-latest" 73 | target: "x86_64-pc-windows-msvc" 74 | soname: "litevfs.dll" 75 | runs-on: ${{ matrix.os }} 76 | steps: 77 | - uses: actions/checkout@v4 78 | - id: release 79 | uses: bruceadams/get-release@v1.3.2 80 | if: github.event_name == 'release' 81 | env: 82 | GITHUB_TOKEN: ${{ github.token }} 83 | - uses: dtolnay/rust-toolchain@stable 84 | with: 85 | targets: ${{ matrix.target }} 86 | components: clippy, rustfmt 87 | 88 | - uses: Swatinem/rust-cache@v2 89 | with: 90 | key: ${{ matrix.target }} 91 | 92 | - name: Install cross 93 | if: matrix.os == 'ubuntu-latest' 94 | uses: taiki-e/install-action@v2 95 | with: 96 | tool: cross 97 | 98 | - name: Build (cross) 99 | if: matrix.os == 'ubuntu-latest' 100 | run: cross build --package litevfs --release --target ${{ matrix.target }} 101 | 102 | - name: Build 103 | if: matrix.os != 'ubuntu-latest' 104 | run: cargo build --package litevfs --release --target ${{ matrix.target }} 105 | 106 | - name: Package 107 | run: | 108 | cd target/${{ matrix.target }}/release 109 | tar -czvf litevfs-${{ env.VERSION }}-${{ matrix.target }}.tar.gz ${{ matrix.soname }} 110 | 111 | - name: Upload binary artifact 112 | uses: actions/upload-artifact@v3 113 | with: 114 | name: litevfs-${{ env.VERSION }}-${{ matrix.target }} 115 | path: target/${{ matrix.target }}/release/${{ matrix.soname }} 116 | if-no-files-found: error 117 | 118 | - name: Upload release 119 | uses: actions/upload-release-asset@v1.0.2 120 | if: github.event_name == 'release' 121 | env: 122 | GITHUB_TOKEN: ${{ github.token }} 123 | with: 124 | upload_url: ${{ steps.release.outputs.upload_url }} 125 | asset_path: target/${{ matrix.target }}/release/litevfs-${{ env.VERSION }}-${{ matrix.target }}.tar.gz 126 | asset_name: litevfs-${{ env.VERSION }}-${{ matrix.target }}.tar.gz 127 | asset_content_type: application/gzip 128 | 129 | build-wasm: 130 | name: "Build WASM" 131 | runs-on: "ubuntu-latest" 132 | env: 133 | EM_VERSION: 3.1.42 134 | EM_CACHE_FOLDER: 'emsdk-cache' 135 | 136 | steps: 137 | - uses: actions/checkout@v4 138 | - id: release 139 | uses: bruceadams/get-release@v1.3.2 140 | if: github.event_name == 'release' 141 | env: 142 | GITHUB_TOKEN: ${{ github.token }} 143 | - uses: dtolnay/rust-toolchain@stable 144 | with: 145 | targets: wasm32-unknown-emscripten 146 | components: clippy, rustfmt 147 | - uses: mymindstorm/setup-emsdk@v12 148 | with: 149 | version: ${{ env.EM_VERSION }} 150 | actions-cache-folder: ${{env.EM_CACHE_FOLDER}} 151 | - name: Install WABT 152 | run: sudo apt-get install -y wabt 153 | - uses: Swatinem/rust-cache@v2 154 | - name: Setup cache 155 | uses: actions/cache@v3 156 | with: 157 | path: ${{ env.EM_CACHE_FOLDER }} 158 | key: ${{ env.EM_VERSION }}-${{ runner.os }} 159 | 160 | - name: Build 161 | run: | 162 | cargo xtask build-wasm 163 | cd target 164 | tar -czvf sqlite3-wasm-${{ env.VERSION }}.tar.gz sqlite3-wasm 165 | 166 | - name: Upload binary artifact 167 | uses: actions/upload-artifact@v3 168 | with: 169 | name: sqlite3-wasm-${{ env.VERSION }} 170 | path: target/sqlite3-wasm 171 | if-no-files-found: error 172 | 173 | - name: Upload release 174 | uses: actions/upload-release-asset@v1.0.2 175 | if: github.event_name == 'release' 176 | env: 177 | GITHUB_TOKEN: ${{ github.token }} 178 | with: 179 | upload_url: ${{ steps.release.outputs.upload_url }} 180 | asset_path: target/sqlite3-wasm-${{ env.VERSION }}.tar.gz 181 | asset_name: sqlite3-wasm-${{ env.VERSION }}.tar.gz 182 | asset_content_type: application/gzip 183 | 184 | build-npm: 185 | name: "Build NPM packages" 186 | runs-on: "ubuntu-latest" 187 | needs: "build" 188 | 189 | steps: 190 | - uses: actions/checkout@v4 191 | - uses: dtolnay/rust-toolchain@stable 192 | with: 193 | components: clippy, rustfmt 194 | - uses: Swatinem/rust-cache@v2 195 | - uses: actions/setup-node@v3 196 | with: 197 | node-version: 18 198 | registry-url: 'https://registry.npmjs.org' 199 | - uses: actions/download-artifact@v3 200 | with: 201 | path: /tmp/litevfs 202 | - name: "Build NPM packages" 203 | run: | 204 | cargo xtask build-npm-meta 205 | cargo xtask build-npm-binary --lib /tmp/litevfs/litevfs-${{ env.VERSION }}-x86_64-unknown-linux-gnu/liblitevfs.so --cpu x64 --os linux --abi gnu 206 | cargo xtask build-npm-binary --lib /tmp/litevfs/litevfs-${{ env.VERSION }}-x86_64-unknown-linux-musl/liblitevfs.so --cpu x64 --os linux --abi musl 207 | cargo xtask build-npm-binary --lib /tmp/litevfs/litevfs-${{ env.VERSION }}-aarch64-unknown-linux-gnu/liblitevfs.so --cpu arm64 --os linux --abi gnu 208 | cargo xtask build-npm-binary --lib /tmp/litevfs/litevfs-${{ env.VERSION }}-aarch64-unknown-linux-musl/liblitevfs.so --cpu arm64 --os linux --abi musl 209 | cargo xtask build-npm-binary --lib /tmp/litevfs/litevfs-${{ env.VERSION }}-x86_64-apple-darwin/liblitevfs.dylib --cpu x64 --os darwin 210 | cargo xtask build-npm-binary --lib /tmp/litevfs/litevfs-${{ env.VERSION }}-aarch64-apple-darwin/liblitevfs.dylib --cpu arm64 --os darwin 211 | cargo xtask build-npm-binary --lib /tmp/litevfs/litevfs-${{ env.VERSION }}-x86_64-pc-windows-msvc/litevfs.dll --cpu x64 --os windows 212 | - name: Upload binary artifact 213 | uses: actions/upload-artifact@v3 214 | with: 215 | name: npm-packages-${{ env.VERSION }} 216 | path: target/npm 217 | if-no-files-found: error 218 | 219 | - name: Publish NPM packages 220 | if: github.event_name == 'release' 221 | env: 222 | NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} 223 | run: | 224 | cd target/npm 225 | for p in litevfs-*.tgz; do 226 | npm publish $p 227 | done -------------------------------------------------------------------------------- /crates/litevfs/src/http.rs: -------------------------------------------------------------------------------- 1 | #[cfg(not(target_os = "emscripten"))] 2 | pub(crate) use native::{Client, Request, Response}; 3 | 4 | #[cfg(target_os = "emscripten")] 5 | pub(crate) use emscripten::{Client, Request, Response}; 6 | 7 | pub(crate) enum Error { 8 | Status(u16, Box), 9 | Transport(String), 10 | } 11 | 12 | #[cfg(not(target_os = "emscripten"))] 13 | mod native { 14 | use serde::{de::DeserializeOwned, Serialize}; 15 | use std::io::Read; 16 | use url::Url; 17 | 18 | pub(crate) struct Client(ureq::Agent); 19 | pub(crate) struct Request(ureq::Request); 20 | pub(crate) struct Response(ureq::Response); 21 | 22 | fn map_err(e: ureq::Error) -> super::Error { 23 | match e { 24 | ureq::Error::Status(code, resp) => super::Error::Status(code, Box::new(Response(resp))), 25 | ureq::Error::Transport(err) => super::Error::Transport(err.to_string()), 26 | } 27 | } 28 | 29 | impl Client { 30 | pub(crate) fn new() -> Client { 31 | Client( 32 | ureq::AgentBuilder::new() 33 | .user_agent(&format!("LiteVFS/{}", env!("CARGO_PKG_VERSION"))) 34 | .try_proxy_from_env(true) 35 | .build(), 36 | ) 37 | } 38 | 39 | pub(crate) fn request(&self, method: &str, url: &Url) -> Request { 40 | Request(self.0.request_url(method, url)) 41 | } 42 | } 43 | 44 | impl Request { 45 | pub(crate) fn set(self, header: &str, value: &str) -> Self { 46 | Request(self.0.set(header, value)) 47 | } 48 | 49 | pub(crate) fn call(self) -> Result { 50 | self.0.call().map(Response).map_err(map_err) 51 | } 52 | 53 | pub(crate) fn send(self, reader: impl Read) -> Result { 54 | self.0.send(reader).map(Response).map_err(map_err) 55 | } 56 | 57 | pub(crate) fn send_json(self, data: impl Serialize) -> Result { 58 | self.0.send_json(data).map(Response).map_err(map_err) 59 | } 60 | } 61 | 62 | impl Response { 63 | pub(crate) fn header(&self, name: &str) -> Option<&str> { 64 | self.0.header(name) 65 | } 66 | 67 | pub(crate) fn into_reader(self) -> Box { 68 | self.0.into_reader() 69 | } 70 | 71 | pub(crate) fn into_json(self) -> std::io::Result { 72 | self.0.into_json() 73 | } 74 | } 75 | } 76 | 77 | #[cfg(target_os = "emscripten")] 78 | mod emscripten { 79 | use emscripten_sys::{ 80 | emscripten_fetch, emscripten_fetch_attr_init, emscripten_fetch_attr_t, 81 | emscripten_fetch_close, emscripten_fetch_get_response_headers, 82 | emscripten_fetch_get_response_headers_length, emscripten_fetch_t, 83 | EMSCRIPTEN_FETCH_LOAD_TO_MEMORY, EMSCRIPTEN_FETCH_REPLACE, EMSCRIPTEN_FETCH_SYNCHRONOUS, 84 | }; 85 | use serde::{de::DeserializeOwned, Serialize}; 86 | use std::{ 87 | ffi::{c_char, CString}, 88 | io::{self, Read}, 89 | mem::MaybeUninit, 90 | ptr, slice, 91 | }; 92 | use url::Url; 93 | 94 | struct Header(Vec, usize); 95 | 96 | impl Header { 97 | fn name(&self) -> &str { 98 | let bytes = &self.0[0..self.1 - 1]; 99 | std::str::from_utf8(bytes).expect("Legal chars in header name") 100 | } 101 | 102 | fn value(&self) -> Option<&str> { 103 | let bytes = &self.0[self.1..]; 104 | std::str::from_utf8(bytes).map(|s| s.trim()).ok() 105 | } 106 | } 107 | 108 | pub(crate) struct Client; 109 | 110 | pub(crate) struct Request { 111 | method: String, 112 | url: CString, 113 | headers: Vec
, 114 | } 115 | 116 | pub(crate) struct Response { 117 | fetch: *mut emscripten_fetch_t, 118 | body: &'static [u8], 119 | headers: Vec
, 120 | } 121 | 122 | impl Client { 123 | pub(crate) fn new() -> Client { 124 | Client 125 | } 126 | 127 | pub(crate) fn request(&self, method: &str, url: &Url) -> Request { 128 | Request { 129 | method: method.into(), 130 | url: CString::new(url.as_str()).unwrap(), 131 | headers: Vec::new(), 132 | } 133 | } 134 | } 135 | 136 | impl Request { 137 | pub(crate) fn set(mut self, header: &str, value: &str) -> Self { 138 | let combined = format!("{}\0{}\0", header, value); 139 | self.headers.retain(|h| h.name() != header); 140 | self.headers.push(Header(combined.into(), header.len() + 1)); 141 | self 142 | } 143 | 144 | pub(crate) fn call(self) -> Result { 145 | let headers = self.headers(); 146 | let mut req = self.fetch_attr(&headers); 147 | 148 | let resp = unsafe { 149 | emscripten_fetch(&mut req as *mut emscripten_fetch_attr_t, self.url.as_ptr()) 150 | }; 151 | 152 | self.response(resp) 153 | } 154 | 155 | pub(crate) fn send(self, mut reader: impl Read) -> Result { 156 | let mut body = Vec::new(); 157 | reader 158 | .read_to_end(&mut body) 159 | .map_err(|e| super::Error::Transport(e.to_string()))?; 160 | 161 | self.do_send(&body) 162 | } 163 | 164 | pub(crate) fn send_json(mut self, data: impl Serialize) -> Result { 165 | if self.header("Content-Type").is_none() { 166 | self = self.set("Content-Type", "application/json"); 167 | } 168 | 169 | let json_bytes = serde_json::to_vec(&data) 170 | .expect("Failed to serialize data passed to send_json into JSON"); 171 | 172 | self.do_send(&json_bytes) 173 | } 174 | 175 | pub(crate) fn header(&self, header: &str) -> Option<&str> { 176 | self.headers 177 | .iter() 178 | .find(|h| h.name().eq_ignore_ascii_case(header)) 179 | .and_then(|h| h.value()) 180 | } 181 | 182 | fn do_send(self, body: &[u8]) -> Result { 183 | let headers = self.headers(); 184 | let mut req = self.fetch_attr(&headers); 185 | req.requestData = body.as_ptr() as *const i8; 186 | req.requestDataSize = body.len(); 187 | 188 | let resp = unsafe { 189 | emscripten_fetch(&mut req as *mut emscripten_fetch_attr_t, self.url.as_ptr()) 190 | }; 191 | 192 | self.response(resp) 193 | } 194 | 195 | fn headers(&self) -> Vec<*const c_char> { 196 | let mut headers = Vec::with_capacity(self.headers.len() * 2 + 1); 197 | for h in &self.headers { 198 | headers.push(h.0.as_ptr() as *const i8); 199 | unsafe { headers.push(h.0.as_ptr().add(h.1) as *const i8) }; 200 | } 201 | headers.push(ptr::null()); 202 | 203 | headers 204 | } 205 | 206 | fn fetch_attr(&self, headers: &[*const c_char]) -> emscripten_fetch_attr_t { 207 | let mut attr = unsafe { 208 | let mut attr = MaybeUninit::uninit(); 209 | emscripten_fetch_attr_init(attr.as_mut_ptr()); 210 | let mut attr = attr.assume_init(); 211 | attr.requestMethod[..self.method.len()] 212 | .copy_from_slice(&*(self.method.as_bytes() as *const _ as *const [i8])); 213 | 214 | attr 215 | }; 216 | 217 | attr.attributes = EMSCRIPTEN_FETCH_LOAD_TO_MEMORY 218 | | EMSCRIPTEN_FETCH_SYNCHRONOUS 219 | | EMSCRIPTEN_FETCH_REPLACE; 220 | attr.requestHeaders = headers.as_ptr(); 221 | 222 | attr 223 | } 224 | 225 | fn response(&self, resp: *mut emscripten_fetch_t) -> Result { 226 | let headers = unsafe { 227 | let len = emscripten_fetch_get_response_headers_length(resp) + 1; 228 | let mut headers = Vec::::with_capacity(len); 229 | emscripten_fetch_get_response_headers(resp, headers.as_mut_ptr() as *mut i8, len); 230 | headers.set_len(len); 231 | 232 | headers 233 | .split(|b| b == &(b'\n')) 234 | .filter_map(|line| { 235 | let idx = line.iter().position(|b| b == &(b':'))?; 236 | 237 | let mut header = Vec::with_capacity(line.len() + 1); 238 | header.extend_from_slice(&line[0..idx]); 239 | header.push(0); 240 | header.extend_from_slice(&line[idx + 1..]); 241 | header.push(0); 242 | 243 | Some(Header(header, idx + 1)) 244 | }) 245 | .collect() 246 | }; 247 | 248 | let (status, resp) = unsafe { 249 | let status = (*resp).status; 250 | let resp = Response { 251 | fetch: resp, 252 | body: slice::from_raw_parts( 253 | (*resp).data as *const u8, 254 | (*resp).numBytes as usize, 255 | ), 256 | headers, 257 | }; 258 | 259 | (status, resp) 260 | }; 261 | 262 | match status { 263 | 200..=299 => Ok(resp), 264 | status => Err(super::Error::Status(status, Box::new(resp))), 265 | } 266 | } 267 | } 268 | 269 | impl Response { 270 | pub(crate) fn header(&self, header: &str) -> Option<&str> { 271 | self.headers 272 | .iter() 273 | .find(|h| h.name().eq_ignore_ascii_case(header)) 274 | .and_then(|h| h.value()) 275 | } 276 | 277 | pub(crate) fn into_reader(self) -> Box { 278 | Box::new(self) 279 | } 280 | 281 | pub(crate) fn into_json(self) -> io::Result { 282 | let reader = self.into_reader(); 283 | serde_json::from_reader(reader).map_err(|e| { 284 | io::Error::new( 285 | io::ErrorKind::InvalidData, 286 | format!("Failed to read JSON: {}", e), 287 | ) 288 | }) 289 | } 290 | } 291 | 292 | impl Read for Response { 293 | fn read(&mut self, buf: &mut [u8]) -> std::io::Result { 294 | self.body.read(buf) 295 | } 296 | } 297 | 298 | impl Drop for Response { 299 | fn drop(&mut self) { 300 | unsafe { 301 | emscripten_fetch_close(self.fetch); 302 | } 303 | } 304 | } 305 | } 306 | -------------------------------------------------------------------------------- /crates/litevfs/src/locks.rs: -------------------------------------------------------------------------------- 1 | use std::sync::{Arc, Mutex}; 2 | 3 | use sqlite_vfs::LockKind; 4 | 5 | pub(crate) struct VfsLock { 6 | inner: Arc>, 7 | } 8 | 9 | // Lock implements in-memory SQLite lock shared between multiple connections. 10 | // https://www.sqlite.org/lockingv3.html 11 | impl VfsLock { 12 | pub(crate) fn new() -> Self { 13 | Self { 14 | inner: Arc::new(Mutex::new(InnerVfsLock::new())), 15 | } 16 | } 17 | 18 | pub(crate) fn conn_lock(&self) -> ConnLock { 19 | ConnLock::new(Arc::clone(&self.inner)) 20 | } 21 | 22 | #[cfg(test)] 23 | fn readers(&self) -> usize { 24 | self.inner.lock().unwrap().readers() 25 | } 26 | 27 | #[cfg(test)] 28 | fn has_writer(&self) -> bool { 29 | self.inner.lock().unwrap().has_writer() 30 | } 31 | } 32 | 33 | struct InnerVfsLock { 34 | readers: usize, 35 | writer: Option, 36 | } 37 | 38 | impl InnerVfsLock { 39 | fn new() -> Self { 40 | Self { 41 | readers: 0, 42 | writer: None, 43 | } 44 | } 45 | 46 | fn transition(&mut self, from: LockKind, to: LockKind) -> LockKind { 47 | if from == to { 48 | return from; 49 | } 50 | 51 | match to { 52 | LockKind::None => { 53 | if from == LockKind::Shared { 54 | // Connection is a reader 55 | assert!(self.readers >= 1); 56 | self.readers -= 1; 57 | } else if from > LockKind::Shared { 58 | // Connection has at least RESERVED lock, only one is possible at a time 59 | self.writer = None; 60 | } 61 | LockKind::None 62 | } 63 | 64 | LockKind::Shared => { 65 | // PENDING lock is active, can't promote from NONE to SHARED 66 | if self.writer == Some(true) && from < LockKind::Shared { 67 | return from; 68 | } 69 | 70 | self.readers += 1; 71 | // Downgrade from a write lock 72 | if from > LockKind::Shared { 73 | self.writer = None 74 | } 75 | LockKind::Shared 76 | } 77 | 78 | LockKind::Reserved => { 79 | // If there is already a writer, or the connection is not in read mode, deny 80 | if self.writer.is_some() || from != LockKind::Shared { 81 | return from; 82 | } 83 | 84 | assert!(self.readers >= 1); 85 | self.readers -= 1; 86 | self.writer = Some(false); 87 | LockKind::Reserved 88 | } 89 | 90 | // Never requested explicitly 91 | LockKind::Pending => from, 92 | 93 | LockKind::Exclusive => { 94 | // Another connection is already a writer 95 | if self.writer.is_some() && from < LockKind::Reserved { 96 | return from; 97 | } 98 | 99 | if from == LockKind::Shared { 100 | self.readers -= 1; 101 | } 102 | 103 | self.writer = Some(true); 104 | if self.readers > 0 { 105 | LockKind::Pending 106 | } else { 107 | LockKind::Exclusive 108 | } 109 | } 110 | } 111 | } 112 | 113 | #[cfg(test)] 114 | fn readers(&self) -> usize { 115 | self.readers 116 | } 117 | 118 | fn has_writer(&self) -> bool { 119 | self.writer.is_some() 120 | } 121 | } 122 | 123 | /// ConnLock tracks individial connection lock state. 124 | pub(crate) struct ConnLock { 125 | vfs_lock: Arc>, 126 | state: LockKind, 127 | } 128 | 129 | impl ConnLock { 130 | fn new(vfs_lock: Arc>) -> ConnLock { 131 | ConnLock { 132 | vfs_lock, 133 | state: LockKind::None, 134 | } 135 | } 136 | 137 | pub(crate) fn acquire(&mut self, to: LockKind) -> bool { 138 | self.state = self.vfs_lock.lock().unwrap().transition(self.state, to); 139 | self.state == to 140 | } 141 | 142 | pub(crate) fn state(&self) -> LockKind { 143 | self.state 144 | } 145 | 146 | pub(crate) fn reserved(&self) -> bool { 147 | self.state >= LockKind::Shared || self.vfs_lock.lock().unwrap().has_writer() 148 | } 149 | } 150 | 151 | impl Drop for ConnLock { 152 | fn drop(&mut self) { 153 | self.acquire(LockKind::None); 154 | } 155 | } 156 | 157 | #[cfg(test)] 158 | mod tests { 159 | use super::VfsLock; 160 | use sqlite_vfs::LockKind; 161 | 162 | #[test] 163 | fn muiltiple_readers() { 164 | let vfs_lock = VfsLock::new(); 165 | 166 | let mut conn1_lock = vfs_lock.conn_lock(); 167 | let mut conn2_lock = vfs_lock.conn_lock(); 168 | 169 | assert!(conn1_lock.acquire(LockKind::Shared)); 170 | assert!(conn2_lock.acquire(LockKind::Shared)); 171 | assert_eq!(2, vfs_lock.readers()); 172 | assert_eq!(LockKind::Shared, conn1_lock.state()); 173 | assert_eq!(LockKind::Shared, conn2_lock.state()); 174 | } 175 | 176 | #[test] 177 | fn reader_while_reserved() { 178 | let vfs_lock = VfsLock::new(); 179 | 180 | let mut conn1_lock = vfs_lock.conn_lock(); 181 | let mut conn2_lock = vfs_lock.conn_lock(); 182 | 183 | assert!(conn1_lock.acquire(LockKind::Shared)); 184 | assert!(conn1_lock.acquire(LockKind::Reserved)); 185 | assert!(conn2_lock.acquire(LockKind::Shared)); 186 | assert_eq!(1, vfs_lock.readers()); 187 | assert!(vfs_lock.has_writer()); 188 | assert_eq!(LockKind::Reserved, conn1_lock.state()); 189 | assert_eq!(LockKind::Shared, conn2_lock.state()); 190 | } 191 | 192 | #[test] 193 | fn only_one_reserved() { 194 | let vfs_lock = VfsLock::new(); 195 | 196 | let mut conn1_lock = (&vfs_lock).conn_lock(); 197 | let mut conn2_lock = (&vfs_lock).conn_lock(); 198 | 199 | assert!(conn1_lock.acquire(LockKind::Shared)); 200 | assert!(conn1_lock.acquire(LockKind::Reserved)); 201 | assert!(conn2_lock.acquire(LockKind::Shared)); 202 | assert!(!conn2_lock.acquire(LockKind::Reserved)); 203 | assert_eq!(1, vfs_lock.readers()); 204 | assert!(vfs_lock.has_writer()); 205 | assert_eq!(LockKind::Reserved, conn1_lock.state()); 206 | assert_eq!(LockKind::Shared, conn2_lock.state()); 207 | } 208 | 209 | #[test] 210 | fn pending_if_readers() { 211 | let vfs_lock = VfsLock::new(); 212 | 213 | let mut conn1_lock = (&vfs_lock).conn_lock(); 214 | let mut conn2_lock = (&vfs_lock).conn_lock(); 215 | 216 | assert!(conn1_lock.acquire(LockKind::Shared)); 217 | assert!(conn2_lock.acquire(LockKind::Shared)); 218 | assert!(conn1_lock.acquire(LockKind::Reserved)); 219 | assert!(!conn1_lock.acquire(LockKind::Exclusive)); 220 | assert_eq!(1, vfs_lock.readers()); 221 | assert!(vfs_lock.has_writer()); 222 | assert_eq!(LockKind::Pending, conn1_lock.state()); 223 | assert_eq!(LockKind::Shared, conn2_lock.state()); 224 | } 225 | 226 | #[test] 227 | fn exclusive_if_no_readers() { 228 | let vfs_lock = VfsLock::new(); 229 | 230 | let mut conn1_lock = (&vfs_lock).conn_lock(); 231 | 232 | assert!(conn1_lock.acquire(LockKind::Shared)); 233 | assert!(conn1_lock.acquire(LockKind::Reserved)); 234 | assert!(conn1_lock.acquire(LockKind::Exclusive)); 235 | assert_eq!(0, vfs_lock.readers()); 236 | assert!(vfs_lock.has_writer()); 237 | assert_eq!(LockKind::Exclusive, conn1_lock.state()); 238 | } 239 | 240 | #[test] 241 | fn pending_to_exclusive() { 242 | let vfs_lock = VfsLock::new(); 243 | 244 | let mut conn1_lock = (&vfs_lock).conn_lock(); 245 | let mut conn2_lock = (&vfs_lock).conn_lock(); 246 | 247 | assert!(conn1_lock.acquire(LockKind::Shared)); 248 | assert!(conn2_lock.acquire(LockKind::Shared)); 249 | assert!(conn1_lock.acquire(LockKind::Reserved)); 250 | assert!(!conn1_lock.acquire(LockKind::Exclusive)); 251 | assert_eq!(1, vfs_lock.readers()); 252 | assert!(vfs_lock.has_writer()); 253 | assert_eq!(LockKind::Pending, conn1_lock.state()); 254 | assert_eq!(LockKind::Shared, conn2_lock.state()); 255 | 256 | assert!(conn2_lock.acquire(LockKind::None)); 257 | assert!(conn1_lock.acquire(LockKind::Exclusive)); 258 | assert_eq!(0, vfs_lock.readers()); 259 | assert!(vfs_lock.has_writer()); 260 | assert_eq!(LockKind::Exclusive, conn1_lock.state()); 261 | assert_eq!(LockKind::None, conn2_lock.state()); 262 | } 263 | 264 | #[test] 265 | fn no_new_readers_while_pending() { 266 | let vfs_lock = VfsLock::new(); 267 | 268 | let mut conn1_lock = (&vfs_lock).conn_lock(); 269 | let mut conn2_lock = (&vfs_lock).conn_lock(); 270 | 271 | assert!(conn1_lock.acquire(LockKind::Shared)); 272 | assert!(conn2_lock.acquire(LockKind::Shared)); 273 | assert!(conn1_lock.acquire(LockKind::Reserved)); 274 | assert!(!conn1_lock.acquire(LockKind::Exclusive)); 275 | 276 | let mut conn3_lock = (&vfs_lock).conn_lock(); 277 | 278 | assert!(!conn3_lock.acquire(LockKind::Shared)); 279 | assert_eq!(LockKind::None, conn3_lock.state()); 280 | } 281 | 282 | #[test] 283 | fn no_new_readers_while_exclusive() { 284 | let vfs_lock = VfsLock::new(); 285 | 286 | let mut conn1_lock = (&vfs_lock).conn_lock(); 287 | 288 | assert!(conn1_lock.acquire(LockKind::Shared)); 289 | assert!(conn1_lock.acquire(LockKind::Reserved)); 290 | assert!(conn1_lock.acquire(LockKind::Exclusive)); 291 | 292 | let mut conn2_lock = (&vfs_lock).conn_lock(); 293 | 294 | assert!(!conn2_lock.acquire(LockKind::Shared)); 295 | assert_eq!(LockKind::None, conn2_lock.state()); 296 | } 297 | 298 | #[test] 299 | fn exclusive_from_shared() { 300 | let vfs_lock = VfsLock::new(); 301 | 302 | let mut conn1_lock = (&vfs_lock).conn_lock(); 303 | 304 | assert!(conn1_lock.acquire(LockKind::Shared)); 305 | assert!(conn1_lock.acquire(LockKind::Exclusive)); 306 | assert_eq!(0, vfs_lock.readers()); 307 | assert!(vfs_lock.has_writer()); 308 | assert_eq!(LockKind::Exclusive, conn1_lock.state()); 309 | } 310 | 311 | #[test] 312 | fn drop_unlocks() { 313 | let vfs_lock = VfsLock::new(); 314 | 315 | { 316 | let mut conn1_lock = (&vfs_lock).conn_lock(); 317 | assert!(conn1_lock.acquire(LockKind::Shared)); 318 | assert_eq!(1, vfs_lock.readers()); 319 | } 320 | assert_eq!(0, vfs_lock.readers()); 321 | 322 | { 323 | let mut conn1_lock = (&vfs_lock).conn_lock(); 324 | assert!(conn1_lock.acquire(LockKind::Shared)); 325 | assert!(conn1_lock.acquire(LockKind::Exclusive)); 326 | assert!(vfs_lock.has_writer()); 327 | } 328 | assert!(!vfs_lock.has_writer()); 329 | } 330 | } 331 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /crates/emscripten-sys/src/lib.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::all, non_snake_case, non_camel_case_types, dead_code)] 2 | /* automatically generated by rust-bindgen 0.66.1 */ 3 | 4 | pub const EMSCRIPTEN_FETCH_LOAD_TO_MEMORY: u32 = 1; 5 | pub const EMSCRIPTEN_FETCH_STREAM_DATA: u32 = 2; 6 | pub const EMSCRIPTEN_FETCH_PERSIST_FILE: u32 = 4; 7 | pub const EMSCRIPTEN_FETCH_APPEND: u32 = 8; 8 | pub const EMSCRIPTEN_FETCH_REPLACE: u32 = 16; 9 | pub const EMSCRIPTEN_FETCH_NO_DOWNLOAD: u32 = 32; 10 | pub const EMSCRIPTEN_FETCH_SYNCHRONOUS: u32 = 64; 11 | pub const EMSCRIPTEN_FETCH_WAITABLE: u32 = 128; 12 | #[repr(C)] 13 | #[derive(Debug, Copy, Clone)] 14 | pub struct emscripten_fetch_attr_t { 15 | pub requestMethod: [::std::os::raw::c_char; 32usize], 16 | pub userData: *mut ::std::os::raw::c_void, 17 | pub onsuccess: ::std::option::Option, 18 | pub onerror: ::std::option::Option, 19 | pub onprogress: ::std::option::Option, 20 | pub onreadystatechange: 21 | ::std::option::Option, 22 | pub attributes: u32, 23 | pub timeoutMSecs: u32, 24 | pub withCredentials: ::std::os::raw::c_int, 25 | pub destinationPath: *const ::std::os::raw::c_char, 26 | pub userName: *const ::std::os::raw::c_char, 27 | pub password: *const ::std::os::raw::c_char, 28 | pub requestHeaders: *const *const ::std::os::raw::c_char, 29 | pub overriddenMimeType: *const ::std::os::raw::c_char, 30 | pub requestData: *const ::std::os::raw::c_char, 31 | pub requestDataSize: usize, 32 | } 33 | #[test] 34 | fn bindgen_test_layout_emscripten_fetch_attr_t() { 35 | const UNINIT: ::std::mem::MaybeUninit = 36 | ::std::mem::MaybeUninit::uninit(); 37 | let ptr = UNINIT.as_ptr(); 38 | assert_eq!( 39 | ::std::mem::size_of::(), 40 | 144usize, 41 | concat!("Size of: ", stringify!(emscripten_fetch_attr_t)) 42 | ); 43 | assert_eq!( 44 | ::std::mem::align_of::(), 45 | 8usize, 46 | concat!("Alignment of ", stringify!(emscripten_fetch_attr_t)) 47 | ); 48 | assert_eq!( 49 | unsafe { ::std::ptr::addr_of!((*ptr).requestMethod) as usize - ptr as usize }, 50 | 0usize, 51 | concat!( 52 | "Offset of field: ", 53 | stringify!(emscripten_fetch_attr_t), 54 | "::", 55 | stringify!(requestMethod) 56 | ) 57 | ); 58 | assert_eq!( 59 | unsafe { ::std::ptr::addr_of!((*ptr).userData) as usize - ptr as usize }, 60 | 32usize, 61 | concat!( 62 | "Offset of field: ", 63 | stringify!(emscripten_fetch_attr_t), 64 | "::", 65 | stringify!(userData) 66 | ) 67 | ); 68 | assert_eq!( 69 | unsafe { ::std::ptr::addr_of!((*ptr).onsuccess) as usize - ptr as usize }, 70 | 40usize, 71 | concat!( 72 | "Offset of field: ", 73 | stringify!(emscripten_fetch_attr_t), 74 | "::", 75 | stringify!(onsuccess) 76 | ) 77 | ); 78 | assert_eq!( 79 | unsafe { ::std::ptr::addr_of!((*ptr).onerror) as usize - ptr as usize }, 80 | 48usize, 81 | concat!( 82 | "Offset of field: ", 83 | stringify!(emscripten_fetch_attr_t), 84 | "::", 85 | stringify!(onerror) 86 | ) 87 | ); 88 | assert_eq!( 89 | unsafe { ::std::ptr::addr_of!((*ptr).onprogress) as usize - ptr as usize }, 90 | 56usize, 91 | concat!( 92 | "Offset of field: ", 93 | stringify!(emscripten_fetch_attr_t), 94 | "::", 95 | stringify!(onprogress) 96 | ) 97 | ); 98 | assert_eq!( 99 | unsafe { ::std::ptr::addr_of!((*ptr).onreadystatechange) as usize - ptr as usize }, 100 | 64usize, 101 | concat!( 102 | "Offset of field: ", 103 | stringify!(emscripten_fetch_attr_t), 104 | "::", 105 | stringify!(onreadystatechange) 106 | ) 107 | ); 108 | assert_eq!( 109 | unsafe { ::std::ptr::addr_of!((*ptr).attributes) as usize - ptr as usize }, 110 | 72usize, 111 | concat!( 112 | "Offset of field: ", 113 | stringify!(emscripten_fetch_attr_t), 114 | "::", 115 | stringify!(attributes) 116 | ) 117 | ); 118 | assert_eq!( 119 | unsafe { ::std::ptr::addr_of!((*ptr).timeoutMSecs) as usize - ptr as usize }, 120 | 76usize, 121 | concat!( 122 | "Offset of field: ", 123 | stringify!(emscripten_fetch_attr_t), 124 | "::", 125 | stringify!(timeoutMSecs) 126 | ) 127 | ); 128 | assert_eq!( 129 | unsafe { ::std::ptr::addr_of!((*ptr).withCredentials) as usize - ptr as usize }, 130 | 80usize, 131 | concat!( 132 | "Offset of field: ", 133 | stringify!(emscripten_fetch_attr_t), 134 | "::", 135 | stringify!(withCredentials) 136 | ) 137 | ); 138 | assert_eq!( 139 | unsafe { ::std::ptr::addr_of!((*ptr).destinationPath) as usize - ptr as usize }, 140 | 88usize, 141 | concat!( 142 | "Offset of field: ", 143 | stringify!(emscripten_fetch_attr_t), 144 | "::", 145 | stringify!(destinationPath) 146 | ) 147 | ); 148 | assert_eq!( 149 | unsafe { ::std::ptr::addr_of!((*ptr).userName) as usize - ptr as usize }, 150 | 96usize, 151 | concat!( 152 | "Offset of field: ", 153 | stringify!(emscripten_fetch_attr_t), 154 | "::", 155 | stringify!(userName) 156 | ) 157 | ); 158 | assert_eq!( 159 | unsafe { ::std::ptr::addr_of!((*ptr).password) as usize - ptr as usize }, 160 | 104usize, 161 | concat!( 162 | "Offset of field: ", 163 | stringify!(emscripten_fetch_attr_t), 164 | "::", 165 | stringify!(password) 166 | ) 167 | ); 168 | assert_eq!( 169 | unsafe { ::std::ptr::addr_of!((*ptr).requestHeaders) as usize - ptr as usize }, 170 | 112usize, 171 | concat!( 172 | "Offset of field: ", 173 | stringify!(emscripten_fetch_attr_t), 174 | "::", 175 | stringify!(requestHeaders) 176 | ) 177 | ); 178 | assert_eq!( 179 | unsafe { ::std::ptr::addr_of!((*ptr).overriddenMimeType) as usize - ptr as usize }, 180 | 120usize, 181 | concat!( 182 | "Offset of field: ", 183 | stringify!(emscripten_fetch_attr_t), 184 | "::", 185 | stringify!(overriddenMimeType) 186 | ) 187 | ); 188 | assert_eq!( 189 | unsafe { ::std::ptr::addr_of!((*ptr).requestData) as usize - ptr as usize }, 190 | 128usize, 191 | concat!( 192 | "Offset of field: ", 193 | stringify!(emscripten_fetch_attr_t), 194 | "::", 195 | stringify!(requestData) 196 | ) 197 | ); 198 | assert_eq!( 199 | unsafe { ::std::ptr::addr_of!((*ptr).requestDataSize) as usize - ptr as usize }, 200 | 136usize, 201 | concat!( 202 | "Offset of field: ", 203 | stringify!(emscripten_fetch_attr_t), 204 | "::", 205 | stringify!(requestDataSize) 206 | ) 207 | ); 208 | } 209 | #[repr(C)] 210 | #[derive(Debug, Copy, Clone)] 211 | pub struct emscripten_fetch_t { 212 | pub id: u32, 213 | pub userData: *mut ::std::os::raw::c_void, 214 | pub url: *const ::std::os::raw::c_char, 215 | pub data: *const ::std::os::raw::c_char, 216 | pub numBytes: u64, 217 | pub dataOffset: u64, 218 | pub totalBytes: u64, 219 | pub readyState: ::std::os::raw::c_ushort, 220 | pub status: ::std::os::raw::c_ushort, 221 | pub statusText: [::std::os::raw::c_char; 64usize], 222 | pub __proxyState: u32, 223 | pub __attributes: emscripten_fetch_attr_t, 224 | } 225 | #[test] 226 | fn bindgen_test_layout_emscripten_fetch_t() { 227 | const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); 228 | let ptr = UNINIT.as_ptr(); 229 | assert_eq!( 230 | ::std::mem::size_of::(), 231 | 272usize, 232 | concat!("Size of: ", stringify!(emscripten_fetch_t)) 233 | ); 234 | assert_eq!( 235 | ::std::mem::align_of::(), 236 | 8usize, 237 | concat!("Alignment of ", stringify!(emscripten_fetch_t)) 238 | ); 239 | assert_eq!( 240 | unsafe { ::std::ptr::addr_of!((*ptr).id) as usize - ptr as usize }, 241 | 0usize, 242 | concat!( 243 | "Offset of field: ", 244 | stringify!(emscripten_fetch_t), 245 | "::", 246 | stringify!(id) 247 | ) 248 | ); 249 | assert_eq!( 250 | unsafe { ::std::ptr::addr_of!((*ptr).userData) as usize - ptr as usize }, 251 | 8usize, 252 | concat!( 253 | "Offset of field: ", 254 | stringify!(emscripten_fetch_t), 255 | "::", 256 | stringify!(userData) 257 | ) 258 | ); 259 | assert_eq!( 260 | unsafe { ::std::ptr::addr_of!((*ptr).url) as usize - ptr as usize }, 261 | 16usize, 262 | concat!( 263 | "Offset of field: ", 264 | stringify!(emscripten_fetch_t), 265 | "::", 266 | stringify!(url) 267 | ) 268 | ); 269 | assert_eq!( 270 | unsafe { ::std::ptr::addr_of!((*ptr).data) as usize - ptr as usize }, 271 | 24usize, 272 | concat!( 273 | "Offset of field: ", 274 | stringify!(emscripten_fetch_t), 275 | "::", 276 | stringify!(data) 277 | ) 278 | ); 279 | assert_eq!( 280 | unsafe { ::std::ptr::addr_of!((*ptr).numBytes) as usize - ptr as usize }, 281 | 32usize, 282 | concat!( 283 | "Offset of field: ", 284 | stringify!(emscripten_fetch_t), 285 | "::", 286 | stringify!(numBytes) 287 | ) 288 | ); 289 | assert_eq!( 290 | unsafe { ::std::ptr::addr_of!((*ptr).dataOffset) as usize - ptr as usize }, 291 | 40usize, 292 | concat!( 293 | "Offset of field: ", 294 | stringify!(emscripten_fetch_t), 295 | "::", 296 | stringify!(dataOffset) 297 | ) 298 | ); 299 | assert_eq!( 300 | unsafe { ::std::ptr::addr_of!((*ptr).totalBytes) as usize - ptr as usize }, 301 | 48usize, 302 | concat!( 303 | "Offset of field: ", 304 | stringify!(emscripten_fetch_t), 305 | "::", 306 | stringify!(totalBytes) 307 | ) 308 | ); 309 | assert_eq!( 310 | unsafe { ::std::ptr::addr_of!((*ptr).readyState) as usize - ptr as usize }, 311 | 56usize, 312 | concat!( 313 | "Offset of field: ", 314 | stringify!(emscripten_fetch_t), 315 | "::", 316 | stringify!(readyState) 317 | ) 318 | ); 319 | assert_eq!( 320 | unsafe { ::std::ptr::addr_of!((*ptr).status) as usize - ptr as usize }, 321 | 58usize, 322 | concat!( 323 | "Offset of field: ", 324 | stringify!(emscripten_fetch_t), 325 | "::", 326 | stringify!(status) 327 | ) 328 | ); 329 | assert_eq!( 330 | unsafe { ::std::ptr::addr_of!((*ptr).statusText) as usize - ptr as usize }, 331 | 60usize, 332 | concat!( 333 | "Offset of field: ", 334 | stringify!(emscripten_fetch_t), 335 | "::", 336 | stringify!(statusText) 337 | ) 338 | ); 339 | assert_eq!( 340 | unsafe { ::std::ptr::addr_of!((*ptr).__proxyState) as usize - ptr as usize }, 341 | 124usize, 342 | concat!( 343 | "Offset of field: ", 344 | stringify!(emscripten_fetch_t), 345 | "::", 346 | stringify!(__proxyState) 347 | ) 348 | ); 349 | assert_eq!( 350 | unsafe { ::std::ptr::addr_of!((*ptr).__attributes) as usize - ptr as usize }, 351 | 128usize, 352 | concat!( 353 | "Offset of field: ", 354 | stringify!(emscripten_fetch_t), 355 | "::", 356 | stringify!(__attributes) 357 | ) 358 | ); 359 | } 360 | extern "C" { 361 | pub fn emscripten_fetch_attr_init(fetch_attr: *mut emscripten_fetch_attr_t); 362 | } 363 | extern "C" { 364 | pub fn emscripten_fetch( 365 | fetch_attr: *mut emscripten_fetch_attr_t, 366 | url: *const ::std::os::raw::c_char, 367 | ) -> *mut emscripten_fetch_t; 368 | } 369 | extern "C" { 370 | pub fn emscripten_fetch_wait( 371 | fetch: *mut emscripten_fetch_t, 372 | timeoutMSecs: f64, 373 | ) -> ::std::os::raw::c_int; 374 | } 375 | extern "C" { 376 | pub fn emscripten_fetch_close(fetch: *mut emscripten_fetch_t) -> ::std::os::raw::c_int; 377 | } 378 | extern "C" { 379 | pub fn emscripten_fetch_get_response_headers_length(fetch: *mut emscripten_fetch_t) -> usize; 380 | } 381 | extern "C" { 382 | pub fn emscripten_fetch_get_response_headers( 383 | fetch: *mut emscripten_fetch_t, 384 | dst: *mut ::std::os::raw::c_char, 385 | dstSizeBytes: usize, 386 | ) -> usize; 387 | } 388 | extern "C" { 389 | pub fn emscripten_fetch_unpack_response_headers( 390 | headersString: *const ::std::os::raw::c_char, 391 | ) -> *mut *mut ::std::os::raw::c_char; 392 | } 393 | extern "C" { 394 | pub fn emscripten_fetch_free_unpacked_response_headers( 395 | unpackedHeaders: *mut *mut ::std::os::raw::c_char, 396 | ); 397 | } 398 | -------------------------------------------------------------------------------- /crates/litevfs/src/syncer.rs: -------------------------------------------------------------------------------- 1 | use crate::lfsc; 2 | use litetx as ltx; 3 | use std::collections::BTreeSet; 4 | 5 | #[derive(Debug)] 6 | pub(crate) enum Changes { 7 | All, 8 | Pages(BTreeSet), 9 | } 10 | 11 | impl From for Option { 12 | fn from(c: lfsc::Changes) -> Self { 13 | match c { 14 | lfsc::Changes::All(_) => Some(Changes::All), 15 | lfsc::Changes::Pages(_, None) => None, 16 | lfsc::Changes::Pages(_, Some(pages)) => { 17 | Some(Changes::Pages(BTreeSet::from_iter(pages))) 18 | } 19 | } 20 | } 21 | } 22 | 23 | #[cfg(not(target_os = "emscripten"))] 24 | pub(crate) use native::Syncer; 25 | 26 | #[cfg(target_os = "emscripten")] 27 | pub(crate) use emscripten::Syncer; 28 | 29 | #[cfg(not(target_os = "emscripten"))] 30 | mod native { 31 | use crate::{lfsc, PositionsLogger}; 32 | use litetx as ltx; 33 | use std::{ 34 | collections::HashMap, 35 | io, 36 | sync::{Arc, Condvar, Mutex}, 37 | thread, time, 38 | }; 39 | use string_interner::{DefaultSymbol, StringInterner}; 40 | 41 | pub(crate) struct Syncer { 42 | client: Arc, 43 | notifier: crossbeam_channel::Sender<()>, 44 | period: time::Duration, 45 | 46 | interner: Mutex, 47 | dbs: Mutex>, 48 | cvar: Condvar, 49 | } 50 | 51 | struct Db { 52 | position: Option, 53 | changes: Option, 54 | conns: u32, 55 | 56 | last_sync: time::SystemTime, 57 | period: time::Duration, 58 | } 59 | 60 | impl Db { 61 | fn sync_period(&self) -> Option { 62 | if self.period.is_zero() { 63 | return None; 64 | } 65 | 66 | Some(self.period) 67 | } 68 | 69 | fn next_sync(&self) -> Option { 70 | self.last_sync.checked_add(self.sync_period()?) 71 | } 72 | 73 | fn needs_sync(&self, now: &time::SystemTime) -> bool { 74 | if let Some(ref ns) = self.next_sync() { 75 | return ns <= now; 76 | } 77 | 78 | false 79 | } 80 | } 81 | 82 | impl Syncer { 83 | pub(crate) fn new(client: Arc, period: time::Duration) -> Arc { 84 | let (tx, rx) = crossbeam_channel::unbounded(); 85 | let syncer = Arc::new(Syncer { 86 | client, 87 | notifier: tx, 88 | period, 89 | interner: Mutex::new(StringInterner::new()), 90 | dbs: Mutex::new(HashMap::new()), 91 | cvar: Condvar::new(), 92 | }); 93 | 94 | thread::spawn({ 95 | let syncer = Arc::clone(&syncer); 96 | 97 | move || syncer.run(rx) 98 | }); 99 | 100 | syncer 101 | } 102 | 103 | fn sym(&self, db: &str) -> DefaultSymbol { 104 | self.interner.lock().unwrap().get_or_intern(db) 105 | } 106 | 107 | pub(crate) fn open_conn(&self, db: &str, pos: Option) { 108 | let sym = self.sym(db); 109 | 110 | self.dbs 111 | .lock() 112 | .unwrap() 113 | .entry(sym) 114 | .and_modify(|db| db.conns += 1) 115 | .or_insert(Db { 116 | position: pos, 117 | changes: None, 118 | conns: 1, 119 | last_sync: time::SystemTime::now(), 120 | period: self.period, 121 | }); 122 | 123 | self.notify(); 124 | } 125 | 126 | pub(crate) fn close_conn(&self, db: &str) { 127 | let sym = self.sym(db); 128 | 129 | let mut dbs = self.dbs.lock().unwrap(); 130 | let remove = { 131 | let db = dbs.get_mut(&sym).unwrap(); 132 | 133 | assert!(db.conns > 0); 134 | db.conns -= 1; 135 | db.conns == 0 136 | }; 137 | 138 | if remove { 139 | dbs.remove(&sym); 140 | } 141 | 142 | self.notify(); 143 | } 144 | 145 | pub(crate) fn needs_sync(&self, db: &str, pos: Option) -> bool { 146 | let sym = self.sym(db); 147 | 148 | let dbs = self.dbs.lock().unwrap(); 149 | let db = dbs.get(&sym).unwrap(); 150 | if db.position.is_some() && db.position != pos { 151 | return true; 152 | } 153 | 154 | db.needs_sync(&time::SystemTime::now()) 155 | } 156 | 157 | pub(crate) fn get_changes( 158 | &self, 159 | db: &str, 160 | _pos: Option, 161 | ) -> io::Result<(Option, Option)> { 162 | let sym = self.sym(db); 163 | 164 | let mut dbs = self.dbs.lock().unwrap(); 165 | while dbs.get(&sym).unwrap().needs_sync(&time::SystemTime::now()) { 166 | self.notify(); 167 | dbs = self.cvar.wait(dbs).unwrap(); 168 | } 169 | 170 | let db = dbs.get_mut(&sym).unwrap(); 171 | 172 | Ok((db.position, db.changes.take())) 173 | } 174 | 175 | pub(crate) fn put_changes(&self, db: &str, prev_changes: super::Changes) { 176 | let sym = self.sym(db); 177 | 178 | let mut dbs = self.dbs.lock().unwrap(); 179 | let db = dbs.get_mut(&sym).unwrap(); 180 | 181 | db.changes = merge_changes(Some(prev_changes), db.changes.take()) 182 | } 183 | 184 | pub(crate) fn set_pos(&self, db: &str, pos: Option) { 185 | let pos = if let Some(pos) = pos { pos } else { return }; 186 | 187 | let sym = self.sym(db); 188 | 189 | let mut dbs = self.dbs.lock().unwrap(); 190 | let db = dbs.get_mut(&sym).unwrap(); 191 | 192 | if matches!(db.position, Some(rp) if rp.txid > pos.txid) { 193 | return; 194 | } 195 | 196 | db.position = Some(pos); 197 | db.last_sync = time::SystemTime::now(); 198 | db.changes.take(); 199 | } 200 | 201 | pub(crate) fn sync_one(&self, db: &str, deep: bool) -> io::Result<()> { 202 | let sym = self.sym(db); 203 | if !deep { 204 | return self.sync(&[sym]); 205 | } 206 | 207 | let pos = self.dbs.lock().unwrap().get(&sym).unwrap().position; 208 | 209 | let changes = self.client.sync_db(db, pos)?; 210 | 211 | self.dbs.lock().unwrap().entry(sym).and_modify(|db| { 212 | let local_txid = db.position.map(|p| p.txid.into_inner()).unwrap_or(0); 213 | let remote_txid = changes.pos().map(|p| p.txid.into_inner()).unwrap_or(0); 214 | 215 | if remote_txid >= local_txid { 216 | db.position = changes.pos(); 217 | db.changes = merge_changes(changes.into(), db.changes.take()); 218 | db.last_sync = time::SystemTime::now(); 219 | } 220 | }); 221 | 222 | Ok(()) 223 | } 224 | 225 | pub(crate) fn sync_period(&self, db: &str) -> time::Duration { 226 | let sym = self.sym(db); 227 | 228 | self.dbs.lock().unwrap().get(&sym).unwrap().period 229 | } 230 | 231 | pub(crate) fn set_sync_period(&self, db: &str, period: time::Duration) { 232 | let sym = self.sym(db); 233 | 234 | self.dbs.lock().unwrap().get_mut(&sym).unwrap().period = period; 235 | 236 | self.notify(); 237 | } 238 | 239 | fn sync(&self, db_syms: &[DefaultSymbol]) -> io::Result<()> { 240 | let old_positions = { 241 | let interner = self.interner.lock().unwrap(); 242 | let dbs = self.dbs.lock().unwrap(); 243 | 244 | db_syms 245 | .iter() 246 | .filter_map(|&k| { 247 | Some(( 248 | interner.resolve(k).unwrap().to_owned(), 249 | dbs.get(&k)?.position, 250 | )) 251 | }) 252 | .collect() 253 | }; 254 | 255 | log::debug!( 256 | "[syncer] sync: positions = {}", 257 | PositionsLogger(&old_positions) 258 | ); 259 | let mut changes = self.client.sync(&old_positions)?; 260 | 261 | let interner = self.interner.lock().unwrap(); 262 | let mut dbs = self.dbs.lock().unwrap(); 263 | let now = time::SystemTime::now(); 264 | for (&k, db) in dbs.iter_mut() { 265 | let name = interner.resolve(k).unwrap(); 266 | let (new_pos, changes) = if let Some(changes) = changes.remove(name) { 267 | (changes.pos(), changes.into()) 268 | } else { 269 | (None, None) 270 | }; 271 | 272 | db.changes = merge_changes( 273 | if old_positions.get(name) == Some(&db.position) { 274 | changes 275 | } else { 276 | None 277 | }, 278 | db.changes.take(), 279 | ); 280 | db.position = new_pos; 281 | db.last_sync = now; 282 | } 283 | 284 | self.cvar.notify_all(); 285 | 286 | Ok(()) 287 | } 288 | 289 | fn notify(&self) { 290 | self.notifier.send(()).unwrap(); 291 | } 292 | 293 | fn run(&self, rx: crossbeam_channel::Receiver<()>) { 294 | use crossbeam_channel::{after, never, select}; 295 | 296 | loop { 297 | let (min_sync_period, last_sync) = { 298 | let dbs = self.dbs.lock().unwrap(); 299 | 300 | ( 301 | dbs.values().filter_map(|db| db.sync_period()).min(), 302 | dbs.values().map(|db| db.last_sync).max(), 303 | ) 304 | }; 305 | 306 | let since_last_sync = if let Some(last_sync) = last_sync { 307 | time::SystemTime::now() 308 | .duration_since(last_sync) 309 | .unwrap_or_default() 310 | } else { 311 | time::Duration::ZERO 312 | }; 313 | let next_sync = 314 | min_sync_period.map(|p| p.checked_sub(since_last_sync).unwrap_or_default()); 315 | 316 | let waiter = if let Some(next_sync) = next_sync { 317 | log::debug!("[syncer]: next sync in {}ms", next_sync.as_millis()); 318 | after(next_sync) 319 | } else { 320 | never() 321 | }; 322 | 323 | select! { 324 | recv(rx) -> _ => (), 325 | recv(waiter) -> _ => (), 326 | }; 327 | 328 | let now = time::SystemTime::now(); 329 | let dbs = self 330 | .dbs 331 | .lock() 332 | .unwrap() 333 | .iter() 334 | .filter_map(|(&k, db)| if db.needs_sync(&now) { Some(k) } else { None }) 335 | .collect::>(); 336 | if !dbs.is_empty() { 337 | if let Err(err) = self.sync(&dbs) { 338 | log::warn!("[syncer] run: sync failed: {}", err); 339 | } 340 | } 341 | } 342 | } 343 | } 344 | 345 | fn merge_changes( 346 | c1: Option, 347 | c2: Option, 348 | ) -> Option { 349 | match (c1, c2) { 350 | (c1, None) => c1, 351 | (None, c2) => c2, 352 | (Some(super::Changes::All), _) | (_, Some(super::Changes::All)) => { 353 | Some(super::Changes::All) 354 | } 355 | (Some(super::Changes::Pages(p1)), Some(super::Changes::Pages(p2))) => { 356 | Some(super::Changes::Pages(&p1 | &p2)) 357 | } 358 | } 359 | } 360 | } 361 | 362 | #[cfg(target_os = "emscripten")] 363 | mod emscripten { 364 | use crate::lfsc; 365 | use litetx as ltx; 366 | use std::{ 367 | collections::HashMap, 368 | io, 369 | sync::{Arc, Mutex}, 370 | time, 371 | }; 372 | 373 | pub(crate) struct Syncer { 374 | client: Arc, 375 | period: time::Duration, 376 | 377 | dbs: Mutex>, 378 | } 379 | 380 | struct Db { 381 | last_sync: time::SystemTime, 382 | period: time::Duration, 383 | } 384 | 385 | impl Db { 386 | fn sync_period(&self) -> Option { 387 | if self.period.is_zero() { 388 | return None; 389 | } 390 | 391 | Some(self.period) 392 | } 393 | 394 | fn next_sync(&self) -> Option { 395 | self.last_sync.checked_add(self.sync_period()?) 396 | } 397 | 398 | fn needs_sync(&self, now: &time::SystemTime) -> bool { 399 | if let Some(ref ns) = self.next_sync() { 400 | return ns <= now; 401 | } 402 | 403 | false 404 | } 405 | } 406 | 407 | impl Syncer { 408 | pub(crate) fn new(client: Arc, period: time::Duration) -> Arc { 409 | Arc::new(Syncer { 410 | client, 411 | period, 412 | 413 | dbs: Mutex::new(HashMap::new()), 414 | }) 415 | } 416 | 417 | pub(crate) fn open_conn(&self, db: &str, _pos: Option) { 418 | let mut dbs = self.dbs.lock().unwrap(); 419 | 420 | if !dbs.contains_key(db) { 421 | dbs.insert( 422 | db.to_string(), 423 | Db { 424 | last_sync: time::SystemTime::now(), 425 | period: self.period, 426 | }, 427 | ); 428 | } 429 | } 430 | 431 | pub(crate) fn close_conn(&self, _db: &str) {} 432 | 433 | pub(crate) fn needs_sync(&self, db: &str, _pos: Option) -> bool { 434 | let dbs = self.dbs.lock().unwrap(); 435 | 436 | let db = dbs.get(db).unwrap(); 437 | 438 | db.needs_sync(&time::SystemTime::now()) 439 | } 440 | 441 | pub(crate) fn get_changes( 442 | &self, 443 | db: &str, 444 | pos: Option, 445 | ) -> io::Result<(Option, Option)> { 446 | let mut dbs = HashMap::new(); 447 | dbs.insert(db.into(), pos); 448 | let mut changes = self.client.sync(&dbs)?; 449 | let changes = if let Some(changes) = changes.remove(db) { 450 | changes 451 | } else { 452 | return Ok((pos, None)); 453 | }; 454 | 455 | let mut dbs = self.dbs.lock().unwrap(); 456 | dbs.get_mut(db).unwrap().last_sync = time::SystemTime::now(); 457 | 458 | Ok((changes.pos(), changes.into())) 459 | } 460 | 461 | pub(crate) fn put_changes(&self, _db: &str, _prev_changes: super::Changes) {} 462 | 463 | pub(crate) fn set_pos(&self, _db: &str, _pos: Option) {} 464 | 465 | pub(crate) fn sync_one(&self, _db: &str, _deep: bool) -> io::Result<()> { 466 | Ok(()) 467 | } 468 | 469 | pub(crate) fn sync_period(&self, db: &str) -> time::Duration { 470 | self.dbs.lock().unwrap().get(db).unwrap().period 471 | } 472 | 473 | pub(crate) fn set_sync_period(&self, db: &str, period: time::Duration) { 474 | self.dbs.lock().unwrap().get_mut(db).unwrap().period = period; 475 | } 476 | } 477 | } 478 | -------------------------------------------------------------------------------- /crates/litevfs/src/lfsc.rs: -------------------------------------------------------------------------------- 1 | use crate::{http, IterLogger, OptionLogger, PositionsLogger}; 2 | use litetx as ltx; 3 | use std::{collections::HashMap, env, fmt, io, sync}; 4 | 5 | /// All possible errors returned by the LFSC client. 6 | #[derive(thiserror::Error, Debug)] 7 | pub(crate) enum Error { 8 | #[error("transport level: {0}")] 9 | Transport(String), 10 | #[error("ltx position mismatch: {0}")] 11 | PosMismatch(ltx::Pos), 12 | #[error("LFSC: {0}")] 13 | Lfsc(LfscError), 14 | #[error("body: {0}")] 15 | Body(#[from] io::Error), 16 | #[error("environment: {0}")] 17 | Env(String), 18 | } 19 | 20 | impl From for io::Error { 21 | fn from(e: Error) -> Self { 22 | match e { 23 | Error::Transport(e) => io::Error::new(io::ErrorKind::Other, e), 24 | Error::PosMismatch(_) => io::Error::new(io::ErrorKind::InvalidData, e), 25 | Error::Lfsc(e) if e.http_code == 404 => io::Error::new(io::ErrorKind::NotFound, e), 26 | Error::Lfsc(e) if e.http_code == 409 => io::Error::new(io::ErrorKind::AlreadyExists, e), 27 | Error::Lfsc(e) => io::Error::new(io::ErrorKind::Other, e), 28 | Error::Body(e) => e, 29 | Error::Env(s) => io::Error::new(io::ErrorKind::Other, s), 30 | } 31 | } 32 | } 33 | 34 | impl From for Error { 35 | fn from(e: serde_json::Error) -> Self { 36 | Error::Body(io::Error::new(io::ErrorKind::InvalidData, e)) 37 | } 38 | } 39 | 40 | type Result = std::result::Result; 41 | 42 | #[derive(thiserror::Error, Debug)] 43 | pub(crate) struct LfscError { 44 | pub(crate) http_code: u16, 45 | pub(crate) code: String, 46 | pub(crate) error: String, 47 | } 48 | 49 | impl fmt::Display for LfscError { 50 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> std::result::Result<(), fmt::Error> { 51 | write!( 52 | f, 53 | "{} ({}): {}", 54 | match self.http_code { 55 | 400 => "validation", 56 | 401 => "auth", 57 | 404 => "notfound", 58 | 409 => "conflict", 59 | 422 => "unprocessable", 60 | _ => "unknown", 61 | }, 62 | self.code, 63 | self.error 64 | ) 65 | } 66 | } 67 | 68 | #[derive(Debug, serde::Deserialize)] 69 | struct LfscErrorRepr { 70 | code: String, 71 | error: String, 72 | #[serde(default)] 73 | #[serde(with = "option_pos")] 74 | pos: Option, 75 | } 76 | 77 | /// A LiteFS Cloud client. 78 | pub(crate) struct Client { 79 | client: http::Client, 80 | host: url::Url, 81 | token: Option, 82 | cluster: Option, 83 | cluster_id: Option, 84 | instance_id: sync::RwLock>, 85 | } 86 | 87 | /// A single database page fetched from LFSC. 88 | #[serde_with::serde_as] 89 | #[derive(Debug, PartialEq, serde::Deserialize)] 90 | pub(crate) struct Page { 91 | #[serde_as(as = "serde_with::base64::Base64")] 92 | data: Vec, 93 | #[serde(rename = "pgno")] 94 | number: ltx::PageNum, 95 | } 96 | 97 | impl Page { 98 | /// Get the page number. 99 | pub(crate) fn number(&self) -> ltx::PageNum { 100 | self.number 101 | } 102 | 103 | /// Consume the page and return the underlying buffer. 104 | pub(crate) fn into_inner(self) -> Vec { 105 | self.data 106 | } 107 | } 108 | 109 | impl AsRef<[u8]> for Page { 110 | fn as_ref(&self) -> &[u8] { 111 | &self.data 112 | } 113 | } 114 | 115 | #[derive(serde::Deserialize)] 116 | struct DbChanges { 117 | #[serde(with = "option_pos")] 118 | pos: Option, 119 | pgnos: Option>, 120 | all: Option, 121 | } 122 | 123 | /// A set of pages changed since previously known state. 124 | #[derive(Debug)] 125 | pub(crate) enum Changes { 126 | All(Option), 127 | Pages(Option, Option>), 128 | } 129 | 130 | impl Changes { 131 | pub(crate) fn pos(&self) -> Option { 132 | match self { 133 | Changes::All(pos) => *pos, 134 | Changes::Pages(pos, _) => *pos, 135 | } 136 | } 137 | } 138 | impl From for Changes { 139 | fn from(c: DbChanges) -> Changes { 140 | match c.all { 141 | Some(true) => Changes::All(c.pos), 142 | _ => Changes::Pages(c.pos, c.pgnos), 143 | } 144 | } 145 | } 146 | 147 | #[allow(dead_code)] 148 | #[derive(Debug)] 149 | pub(crate) enum LeaseOp<'a> { 150 | Acquire(std::time::Duration), 151 | Refresh(&'a str, std::time::Duration), 152 | } 153 | 154 | impl<'a> fmt::Display for LeaseOp<'a> { 155 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> std::result::Result<(), fmt::Error> { 156 | match self { 157 | LeaseOp::Acquire(dur) => write!(f, "acquire({}ms)", dur.as_millis()), 158 | LeaseOp::Refresh(id, dur) => write!(f, "refresh({}, {}ms)", id, dur.as_millis()), 159 | } 160 | } 161 | } 162 | 163 | #[derive(Debug, Clone, serde::Deserialize, PartialEq)] 164 | pub(crate) struct Lease { 165 | pub(crate) id: String, 166 | #[serde(with = "time::serde::rfc3339")] 167 | pub(crate) expires_at: time::OffsetDateTime, 168 | } 169 | 170 | impl fmt::Display for Lease { 171 | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> std::result::Result<(), fmt::Error> { 172 | write!(f, "{}/{}", self.id, self.expires_at) 173 | } 174 | } 175 | 176 | impl Client { 177 | const CLUSTER_ID_LEN: usize = 20; 178 | const CLUSTER_ID_PREFIX: &'static str = "LFSC"; 179 | 180 | pub(crate) fn builder() -> ClientBuilder { 181 | ClientBuilder::default() 182 | } 183 | 184 | pub(crate) fn from_env() -> Result { 185 | let builder = Client::builder().token( 186 | &env::var("LITEFS_CLOUD_TOKEN") 187 | .map_err(|_| Error::Env("LITEFS_CLOUD_TOKEN env var is not set".into()))?, 188 | ); 189 | let builder = match env::var("LITEFS_CLOUD_CLUSTER") { 190 | Ok(cluster) => builder.cluster(&cluster), 191 | Err(_) => builder, 192 | }; 193 | let builder = match env::var("LITEFS_CLOUD_HOST") { 194 | Ok(host) => builder.host( 195 | &host 196 | .parse() 197 | .map_err(|e: url::ParseError| Error::Env(e.to_string()))?, 198 | ), 199 | Err(_) => builder, 200 | }; 201 | 202 | let mut client = builder.build(); 203 | 204 | // let info = client.info()?; 205 | // client.set_cluster_id(if let Some(cluster_id) = info.cluster_id { 206 | // cluster_id 207 | // } else { 208 | // Client::generate_cluster_id() 209 | // }); 210 | client.set_cluster_id(Client::generate_cluster_id()); 211 | 212 | log::info!( 213 | "[lfsc] from_env: host = {}, cluster = {}, cluster_id = {}", 214 | client.host, 215 | OptionLogger(&client.cluster), 216 | OptionLogger(&client.cluster_id), 217 | ); 218 | 219 | Ok(client) 220 | } 221 | 222 | pub(crate) fn set_cluster_id(&mut self, id: String) { 223 | self.cluster_id = Some(id) 224 | } 225 | 226 | pub(crate) fn generate_cluster_id() -> String { 227 | use rand::Rng; 228 | 229 | let mut buf = [0; (Client::CLUSTER_ID_LEN - Client::CLUSTER_ID_PREFIX.len()) / 2]; 230 | rand::thread_rng().fill(&mut buf); 231 | 232 | format!("{}{}", Client::CLUSTER_ID_PREFIX, hex::encode_upper(buf)) 233 | } 234 | 235 | pub(crate) fn pos_map(&self) -> Result>> { 236 | log::debug!("[lfsc] pos_map"); 237 | 238 | match self.pos_map_inner() { 239 | Err(err) => { 240 | log::error!("[lfsc] pos_map: {}", err); 241 | Err(err) 242 | } 243 | x => x, 244 | } 245 | } 246 | 247 | pub(crate) fn write_tx( 248 | &self, 249 | db: &str, 250 | ltx: impl io::Read, 251 | ltx_len: u64, 252 | lease: &str, 253 | ) -> Result<()> { 254 | log::debug!( 255 | "[lfsc] write_tx: db = {}, lease = {}, ltx_len = {}", 256 | db, 257 | lease, 258 | ltx_len 259 | ); 260 | 261 | match self.write_tx_inner(db, ltx, ltx_len, lease) { 262 | Err(err) => { 263 | log::error!( 264 | "[lfsc] write_tx: db = {}, lease = {}, ltx_len = {}: {}", 265 | db, 266 | lease, 267 | ltx_len, 268 | err 269 | ); 270 | Err(err) 271 | } 272 | x => x, 273 | } 274 | } 275 | 276 | pub(crate) fn get_pages( 277 | &self, 278 | db: &str, 279 | pos: ltx::Pos, 280 | pgnos: &[ltx::PageNum], 281 | ) -> Result> { 282 | log::debug!( 283 | "[lfsc] get_pages: db = {}, pos = {}, pgnos = {}", 284 | db, 285 | pos, 286 | IterLogger(pgnos) 287 | ); 288 | 289 | match self.get_pages_inner(db, pos, pgnos) { 290 | Err(err) => { 291 | log::error!( 292 | "[lfsc] get_pages: db = {}, pos = {}, pgnos = {}: {}", 293 | db, 294 | pos, 295 | IterLogger(pgnos), 296 | err 297 | ); 298 | Err(err) 299 | } 300 | x => x, 301 | } 302 | } 303 | 304 | #[allow(dead_code)] 305 | pub(crate) fn info(&self) -> Result { 306 | log::debug!("[lfsc] info"); 307 | 308 | match self.info_inner() { 309 | Err(err) => { 310 | log::error!("[lfsc] info: {}", err); 311 | Err(err) 312 | } 313 | x => x, 314 | } 315 | } 316 | 317 | #[allow(dead_code)] 318 | pub(crate) fn sync_db(&self, db: &str, pos: Option) -> Result { 319 | log::debug!("[lfsc] sync: db = {}, pos = {}", db, OptionLogger(&pos)); 320 | 321 | match self.sync_db_inner(db, pos) { 322 | Err(err) => { 323 | log::error!( 324 | "[lfsc] sync_db: db = {}, pos = {}: {}", 325 | db, 326 | OptionLogger(&pos), 327 | err 328 | ); 329 | Err(err) 330 | } 331 | x => x, 332 | } 333 | } 334 | 335 | #[allow(dead_code)] 336 | pub(crate) fn acquire_lease(&self, db: &str, op: LeaseOp) -> Result { 337 | log::debug!("[lfsc] acquire_lease: db = {}, op = {}", db, op); 338 | 339 | match self.acquire_lease_inner(db, &op) { 340 | Err(err) => { 341 | log::error!("[lfsc] acquire_lease: db = {}, op = {}: {}", db, op, err); 342 | Err(err) 343 | } 344 | x => x, 345 | } 346 | } 347 | 348 | #[allow(dead_code)] 349 | pub(crate) fn release_lease(&self, db: &str, lease: Lease) -> Result<()> { 350 | log::debug!("[lfsc] release_lease: db = {}, lease = {}", db, lease.id); 351 | 352 | match self.release_lease_inner(db, &lease) { 353 | Err(err) => { 354 | log::error!( 355 | "[lfsc] release_lease: db = {}, lease = {}: {}", 356 | db, 357 | lease.id, 358 | err 359 | ); 360 | Err(err) 361 | } 362 | x => x, 363 | } 364 | } 365 | 366 | pub(crate) fn sync( 367 | &self, 368 | positions: &HashMap>, 369 | ) -> Result> { 370 | log::debug!("[lfsc] sync: positions = {}", PositionsLogger(positions)); 371 | 372 | match self.sync_inner(positions) { 373 | Err(err) => { 374 | log::error!( 375 | "[lfsc] sync: positions = {}: {}", 376 | PositionsLogger(positions), 377 | err 378 | ); 379 | Err(err) 380 | } 381 | x => x, 382 | } 383 | } 384 | 385 | fn pos_map_inner(&self) -> Result>> { 386 | let mut u = self.host.clone(); 387 | u.set_path("/pos"); 388 | 389 | #[derive(serde::Deserialize)] 390 | #[serde(transparent)] 391 | struct Helper(#[serde(with = "option_pos")] Option); 392 | 393 | Ok(self 394 | .call::>("GET", u)? 395 | .into_iter() 396 | .map(|(k, v)| (k, v.0)) 397 | .collect()) 398 | } 399 | 400 | fn write_tx_inner( 401 | &self, 402 | db: &str, 403 | ltx: impl io::Read, 404 | ltx_len: u64, 405 | lease: &str, 406 | ) -> Result<()> { 407 | let mut u = self.host.clone(); 408 | u.set_path("/db/tx"); 409 | u.query_pairs_mut().append_pair("db", db); 410 | 411 | let req = self 412 | .make_request("POST", u) 413 | .set("Content-Length", <x_len.to_string()) 414 | .set("Lfsc-Lease-Id", lease); 415 | let resp = self.process_response(req.send(ltx))?; 416 | 417 | // consume the body (and ignore any errors) to reuse the connection 418 | io::copy(&mut resp.into_reader(), &mut io::sink()).ok(); 419 | 420 | Ok(()) 421 | } 422 | 423 | fn get_pages_inner( 424 | &self, 425 | db: &str, 426 | pos: ltx::Pos, 427 | pgnos: &[ltx::PageNum], 428 | ) -> Result> { 429 | #[derive(serde::Deserialize)] 430 | struct GetPageResponse { 431 | pages: Vec, 432 | } 433 | 434 | let mut u = self.host.clone(); 435 | u.set_path("/db/page"); 436 | u.query_pairs_mut() 437 | .append_pair("db", db) 438 | .append_pair("pos", &pos.to_string()) 439 | .append_pair( 440 | "pgno", 441 | &pgnos 442 | .iter() 443 | .map(|pgno| pgno.to_string()) 444 | .collect::>() 445 | .join(","), 446 | ); 447 | 448 | Ok(self.call::("GET", u)?.pages) 449 | } 450 | 451 | #[allow(dead_code)] 452 | fn info_inner(&self) -> Result { 453 | let mut u = self.host.clone(); 454 | u.set_path("/info"); 455 | 456 | self.call("GET", u) 457 | } 458 | 459 | fn sync_db_inner(&self, db: &str, pos: Option) -> Result { 460 | let mut u = self.host.clone(); 461 | u.set_path("/db/sync"); 462 | u.query_pairs_mut().append_pair("db", db); 463 | if let Some(pos) = pos { 464 | u.query_pairs_mut().append_pair("pos", &pos.to_string()); 465 | } 466 | 467 | Ok(self.call::("GET", u)?.into()) 468 | } 469 | 470 | #[allow(dead_code)] 471 | fn acquire_lease_inner(&self, db: &str, op: &LeaseOp) -> Result { 472 | let mut u = self.host.clone(); 473 | u.set_path("/lease"); 474 | u.query_pairs_mut().append_pair("db", db); 475 | match op { 476 | LeaseOp::Acquire(duration) => u 477 | .query_pairs_mut() 478 | .append_pair("duration", &duration.as_millis().to_string()), 479 | 480 | LeaseOp::Refresh(lease, duration) => u 481 | .query_pairs_mut() 482 | .append_pair("id", lease) 483 | .append_pair("duration", &duration.as_millis().to_string()), 484 | }; 485 | 486 | self.call::("POST", u) 487 | } 488 | 489 | #[allow(dead_code)] 490 | fn release_lease_inner(&self, db: &str, lease: &Lease) -> Result<()> { 491 | let mut u = self.host.clone(); 492 | u.set_path("/lease"); 493 | u.query_pairs_mut() 494 | .append_pair("db", db) 495 | .append_pair("id", &lease.id); 496 | 497 | let req = self.make_request("DELETE", u); 498 | let resp = self.process_response(req.call())?; 499 | // consume the body (and ignore any errors) to reuse the connection 500 | io::copy(&mut resp.into_reader(), &mut io::sink()).ok(); 501 | 502 | Ok(()) 503 | } 504 | 505 | fn sync_inner( 506 | &self, 507 | positions: &HashMap>, 508 | ) -> Result> { 509 | let mut u = self.host.clone(); 510 | u.set_path("/sync"); 511 | 512 | #[derive(serde::Serialize)] 513 | #[serde(transparent)] 514 | struct Helper(#[serde(with = "option_pos")] Option); 515 | 516 | #[derive(serde::Serialize)] 517 | struct SyncRequest<'a> { 518 | positions: HashMap<&'a str, Helper>, 519 | } 520 | 521 | #[derive(serde::Deserialize)] 522 | struct SyncResponse { 523 | changes: HashMap, 524 | } 525 | 526 | let positions: HashMap<&str, Helper> = positions 527 | .iter() 528 | .map(|(k, &v)| (k.as_str(), Helper(v))) 529 | .collect(); 530 | 531 | let req = self.make_request("POST", u); 532 | let resp = self.process_response(req.send_json(SyncRequest { positions }))?; 533 | let resp = resp.into_json::()?; 534 | 535 | Ok(resp 536 | .changes 537 | .into_iter() 538 | .map(|(k, v)| (k, v.into())) 539 | .collect()) 540 | } 541 | 542 | fn call(&self, method: &str, u: url::Url) -> Result 543 | where 544 | R: serde::de::DeserializeOwned, 545 | { 546 | let req = self.make_request(method, u); 547 | let resp = self.process_response(req.call())?; 548 | 549 | Ok(resp.into_json()?) 550 | } 551 | 552 | fn make_request(&self, method: &str, mut u: url::Url) -> http::Request { 553 | if let Some(ref cluster) = self.cluster { 554 | u.query_pairs_mut().append_pair("cluster", cluster); 555 | } 556 | 557 | let mut req = self.client.request(method, &u); 558 | if let Some(ref token) = self.token { 559 | req = req.set("Authorization", token); 560 | } 561 | if let Some(instance_id) = self.instance_id.read().unwrap().as_deref() { 562 | req = req.set("fly-force-instance-id", instance_id); 563 | } 564 | if let Some(ref cluster_id) = self.cluster_id { 565 | req = req.set("Litefs-Cluster-Id", cluster_id) 566 | } 567 | 568 | req 569 | } 570 | 571 | fn process_response( 572 | &self, 573 | resp: std::result::Result, 574 | ) -> Result { 575 | match resp { 576 | Ok(resp) => { 577 | let mut instance_id = self.instance_id.write().unwrap(); 578 | if instance_id.as_deref() != resp.header("Lfsc-Instance-Id") { 579 | *instance_id = resp.header("Lfsc-Instance-Id").map(Into::into); 580 | } 581 | 582 | Ok(resp) 583 | } 584 | Err(http::Error::Transport(err)) => Err(Error::Transport(err)), 585 | Err(http::Error::Status(code, body)) => { 586 | let repr: LfscErrorRepr = body.into_json()?; 587 | match repr.pos { 588 | Some(pos) if repr.code == "EPOSMISMATCH" => Err(Error::PosMismatch(pos)), 589 | _ => Err(Error::Lfsc(LfscError { 590 | http_code: code, 591 | code: repr.code, 592 | error: repr.error, 593 | })), 594 | } 595 | } 596 | } 597 | } 598 | } 599 | 600 | #[allow(dead_code)] 601 | #[derive(serde::Deserialize)] 602 | pub(crate) struct Info { 603 | #[serde(rename = "clusterID")] 604 | pub(crate) cluster_id: Option, 605 | } 606 | 607 | /// A LiteFS Cloud client builder. 608 | #[derive(Default)] 609 | pub(crate) struct ClientBuilder { 610 | host: Option, 611 | token: Option, 612 | cluster: Option, 613 | } 614 | 615 | impl ClientBuilder { 616 | pub(crate) fn host(mut self, u: &url::Url) -> Self { 617 | self.host = Some(u.clone()); 618 | self 619 | } 620 | 621 | pub(crate) fn token(mut self, token: &str) -> Self { 622 | self.token = Some(token.to_string()); 623 | self 624 | } 625 | 626 | pub(crate) fn cluster(mut self, cluster: &str) -> Self { 627 | self.cluster = Some(cluster.to_string()); 628 | self 629 | } 630 | 631 | pub(crate) fn build(self) -> Client { 632 | Client { 633 | client: http::Client::new(), 634 | host: self 635 | .host 636 | .unwrap_or(url::Url::parse("https://litefs.fly.io").unwrap()), 637 | token: self.token, 638 | cluster: self.cluster, 639 | cluster_id: None, 640 | instance_id: sync::RwLock::new(None), 641 | } 642 | } 643 | } 644 | 645 | mod option_pos { 646 | use litetx as ltx; 647 | use serde::{ 648 | de::{self, Deserializer}, 649 | ser::{Serialize, SerializeStruct, Serializer}, 650 | Deserialize, 651 | }; 652 | 653 | pub fn serialize(value: &Option, serializer: S) -> Result 654 | where 655 | S: Serializer, 656 | { 657 | match value { 658 | Some(pos) => pos.serialize(serializer), 659 | None => { 660 | let mut state = serializer.serialize_struct("Pos", 2)?; 661 | state.serialize_field("txid", "0000000000000000")?; 662 | state.serialize_field("postApplyChecksum", "0000000000000000")?; 663 | state.end() 664 | } 665 | } 666 | } 667 | 668 | pub fn deserialize<'de, D>(deserializer: D) -> Result, D::Error> 669 | where 670 | D: Deserializer<'de>, 671 | { 672 | #[derive(Deserialize)] 673 | struct Helper { 674 | txid: String, 675 | #[serde(rename = "postApplyChecksum")] 676 | post_apply_checksum: String, 677 | } 678 | 679 | let helper: Helper = Deserialize::deserialize(deserializer)?; 680 | let txid = u64::from_str_radix(&helper.txid, 16).map_err(de::Error::custom)?; 681 | let post_apply_checksum = 682 | u64::from_str_radix(&helper.post_apply_checksum, 16).map_err(de::Error::custom)?; 683 | 684 | match (txid, post_apply_checksum) { 685 | (0, 0) => Ok(None), 686 | (t, p) => Ok(Some(ltx::Pos { 687 | txid: ltx::TXID::new(t).map_err(de::Error::custom)?, 688 | post_apply_checksum: ltx::Checksum::new(p), 689 | })), 690 | } 691 | } 692 | } 693 | 694 | #[cfg(test)] 695 | mod tests { 696 | use super::{Lease, Page}; 697 | use litetx as ltx; 698 | use serde_test::{assert_de_tokens, Token}; 699 | 700 | #[test] 701 | fn page_de() { 702 | let page = Page { 703 | data: vec![1, 2, 3, 4, 5, 6], 704 | number: ltx::PageNum::new(123).unwrap(), 705 | }; 706 | 707 | assert_de_tokens( 708 | &page, 709 | &[ 710 | Token::Struct { 711 | name: "Page", 712 | len: 2, 713 | }, 714 | Token::Str("pgno"), 715 | Token::U32(123), 716 | Token::Str("data"), 717 | Token::BorrowedStr("AQIDBAUG"), 718 | Token::StructEnd, 719 | ], 720 | ); 721 | } 722 | 723 | #[test] 724 | fn lease_de() { 725 | use time::macros::datetime; 726 | 727 | let lease = Lease { 728 | id: "123456789".into(), 729 | expires_at: datetime!(2023-08-29 13:20:55.706550992 +2), 730 | }; 731 | 732 | assert_de_tokens( 733 | &lease, 734 | &[ 735 | Token::Struct { 736 | name: "Lease", 737 | len: 2, 738 | }, 739 | Token::Str("id"), 740 | Token::Str("123456789"), 741 | Token::Str("expires_at"), 742 | Token::Str("2023-08-29T11:20:55.706550992Z"), 743 | Token::StructEnd, 744 | ], 745 | ); 746 | } 747 | } 748 | -------------------------------------------------------------------------------- /crates/litevfs/src/pager.rs: -------------------------------------------------------------------------------- 1 | use crate::{lfsc, IterLogger, OptionLogger, LITEVFS_IOERR_POS_MISMATCH}; 2 | use bytesize::ByteSize; 3 | use caches::{Cache, SegmentedCache}; 4 | use litetx::{self as ltx, PageChecksum}; 5 | use read_write_at::ReadAtMut; 6 | use sqlite_vfs::CodeError; 7 | use std::{ 8 | ffi, fs, 9 | io::{self, Read, Write}, 10 | path::{Path, PathBuf}, 11 | sync::{ 12 | atomic::{AtomicU64, AtomicUsize, Ordering}, 13 | Arc, Mutex, 14 | }, 15 | }; 16 | use string_interner::{DefaultSymbol, StringInterner}; 17 | 18 | #[derive(PartialEq, Eq)] 19 | pub(crate) enum PageSource { 20 | Local, 21 | Remote, 22 | } 23 | 24 | /// [Pager] manages SQLite page data. It uses local filesystem to cache 25 | /// the pages and when the pages are absent in the cache, requests them from LFSC. 26 | pub(crate) struct Pager { 27 | root: PathBuf, 28 | client: Arc, 29 | 30 | interner: Mutex, 31 | lru: Mutex>, 32 | 33 | min_available_space: AtomicU64, 34 | max_cached_pages: AtomicUsize, 35 | } 36 | 37 | impl Pager { 38 | pub(crate) fn new>(path: P, client: Arc) -> Pager { 39 | Pager { 40 | root: path.as_ref().to_path_buf(), 41 | client, 42 | 43 | interner: Mutex::new(StringInterner::new()), 44 | // The size is chosen from: 45 | // - 128Mb of space 46 | // - 4k page size 47 | // In reality is doesn't matter as we are gonna check available 48 | // FS space anyway. But we need some predetermined size as 49 | // the cache is not resizable. 50 | lru: Mutex::new(SegmentedCache::new(6500, 26000).unwrap()), 51 | 52 | min_available_space: AtomicU64::new(10 * 1024 * 1024), 53 | max_cached_pages: AtomicUsize::new(0), 54 | } 55 | } 56 | 57 | /// Returns a base path for the given `db`. 58 | pub(crate) fn db_path(&self, db: &str) -> PathBuf { 59 | self.root.join(db) 60 | } 61 | 62 | /// Prepares all the paths for the given `db`. 63 | pub(crate) fn prepare_db(&self, db: &str) -> io::Result<()> { 64 | fs::create_dir_all(self.pages_path(db))?; 65 | fs::create_dir_all(self.tmp_path(db))?; 66 | 67 | Ok(()) 68 | } 69 | 70 | /// Returns a `db` `page` at the given database `pos`. 71 | pub(crate) fn get_page( 72 | &self, 73 | db: &str, 74 | pos: Option, 75 | pgno: ltx::PageNum, 76 | prefetch: Option<&[ltx::PageNum]>, 77 | ) -> io::Result { 78 | log::debug!( 79 | "[pager] get_page: db = {}, pos = {}, pgno = {}, prefetch = {}", 80 | db, 81 | OptionLogger(&pos), 82 | pgno, 83 | IterLogger(if let Some(pgnos) = prefetch { 84 | pgnos 85 | } else { 86 | &[] 87 | }), 88 | ); 89 | 90 | // Request the page either from local cache or from LFSC and convert 91 | // io::ErrorKind::NotFound errors to io::ErrorKind::UnexpectedEof, as 92 | // this is what local IO will return in case we read past the file. 93 | // TODO: we may need to suppress duplicated calls to the same page here. 94 | let r = match self.get_page_inner(db, pos, pgno, prefetch) { 95 | Err(err) if err.kind() == io::ErrorKind::NotFound => { 96 | Err(io::ErrorKind::UnexpectedEof.into()) 97 | } 98 | x => x, 99 | }; 100 | 101 | // Log the error, if any 102 | match r { 103 | Err(err) => { 104 | log::error!( 105 | "[pager] get_page: db = {}, pos = {}, pgno = {}, prefetch = {}: {}", 106 | db, 107 | OptionLogger(&pos), 108 | pgno, 109 | IterLogger(if let Some(pgnos) = prefetch { 110 | pgnos 111 | } else { 112 | &[] 113 | }), 114 | err 115 | ); 116 | Err(err) 117 | } 118 | x => x, 119 | } 120 | } 121 | 122 | /// Copies the page starting at `offset` to the provided buffer. 123 | #[allow(clippy::too_many_arguments)] 124 | pub(crate) fn get_page_slice( 125 | &self, 126 | db: &str, 127 | pos: Option, 128 | pgno: ltx::PageNum, 129 | buf: &mut [u8], 130 | offset: u64, 131 | local_only: bool, 132 | prefetch: Option>, 133 | ) -> io::Result { 134 | log::debug!( 135 | "[pager] get_page_slice: db = {}, pos = {}, pgno = {}, len = {}, offset = {}, local_only = {}, prefetch = {}", 136 | db, 137 | OptionLogger(&pos), 138 | pgno, 139 | buf.len(), 140 | offset, 141 | local_only, 142 | IterLogger(if let Some(pgnos) = prefetch.as_deref() { pgnos } else { &[] }), 143 | ); 144 | 145 | // Request the page either from local cache or from LFSC and convert 146 | // io::ErrorKind::NotFound errors to io::ErrorKind::UnexpectedEof, as 147 | // this is what local IO will return in case we read past the file. 148 | // TODO: we may need to suppress duplicated calls to the same page here. 149 | let r = match self.get_page_slice_inner( 150 | db, 151 | pos, 152 | pgno, 153 | buf, 154 | offset, 155 | local_only, 156 | prefetch.as_deref(), 157 | ) { 158 | Err(err) if err.kind() == io::ErrorKind::NotFound => { 159 | Err(io::ErrorKind::UnexpectedEof.into()) 160 | } 161 | x => x, 162 | }; 163 | 164 | // Log the error, if any 165 | match r { 166 | Err(err) => { 167 | log::error!( 168 | "[pager] get_page_slice: db = {}, pos = {}, pgno = {}, len = {}, offset = {}, local_only = {}, prefetch = {}: {}", 169 | db, 170 | OptionLogger(&pos), 171 | pgno, 172 | buf.len(), 173 | offset, 174 | local_only, 175 | IterLogger(if let Some(pgnos) = prefetch.as_deref() { pgnos } else { &[] }), 176 | err 177 | ); 178 | Err(err) 179 | } 180 | x => x, 181 | } 182 | } 183 | 184 | /// Writes page into the local cache. The page is not shipped to LFSC until the 185 | /// database is committed. 186 | pub(crate) fn put_page(&self, db: &str, page: PageRef) -> io::Result<()> { 187 | log::debug!("[pager] put_page: db = {}, pgno = {}", db, page.number()); 188 | 189 | match self.put_page_inner(db, page) { 190 | Err(err) => { 191 | log::error!( 192 | "[pager] put_page: db = {}, pgno = {}: {}", 193 | db, 194 | page.number(), 195 | err, 196 | ); 197 | Err(err) 198 | } 199 | x => x, 200 | } 201 | } 202 | 203 | /// Deletes the page from the local cache. It's fine to attempt to delete an non-existing 204 | /// page. 205 | pub(crate) fn del_page(&self, db: &str, pgno: ltx::PageNum) -> io::Result { 206 | log::debug!("[pager] del_page: db = {} , pgno = {}", db, pgno); 207 | 208 | match self.del_page_inner(db, pgno) { 209 | Err(err) => { 210 | log::error!("[pager] del_page: db = {}, pgno = {}: {}", db, pgno, err); 211 | Err(err) 212 | } 213 | x => x, 214 | } 215 | } 216 | 217 | /// Removes all pages past the provided `pgno`. 218 | pub(crate) fn truncate(&self, db: &str, pgno: ltx::PageNum) -> io::Result<()> { 219 | log::debug!("[pager] truncate: db = {}, pgno = {}", db, pgno); 220 | 221 | match self.truncate_inner(db, pgno) { 222 | Err(err) => { 223 | log::error!("[pager] truncate: db = {}, pgno = {}: {}", db, pgno, err); 224 | Err(err) 225 | } 226 | x => x, 227 | } 228 | } 229 | 230 | /// Removes all pages of a database. 231 | pub(crate) fn clear(&self, db: &str) -> io::Result> { 232 | log::debug!("[pager] clear: db = {}", db); 233 | 234 | match self.clear_inner(db) { 235 | Err(err) => { 236 | log::error!("[pager] clear: db = {}: {}", db, err); 237 | Err(err) 238 | } 239 | x => x, 240 | } 241 | } 242 | 243 | /// Checks if the page is cached locally 244 | pub(crate) fn has_page(&self, db: &str, pgno: ltx::PageNum) -> io::Result { 245 | match self.has_page_inner(db, pgno) { 246 | Err(err) => { 247 | log::error!("[pager] has_page: db = {} pgno = {}: {}", db, pgno, err); 248 | Err(err) 249 | } 250 | x => x, 251 | } 252 | } 253 | 254 | /// Returns the minimum available space that pager is trying to keep on the FS. 255 | pub(crate) fn min_available_space(&self) -> u64 { 256 | self.min_available_space.load(Ordering::Acquire) 257 | } 258 | 259 | /// Sets the minimum available space that pager needs to maintain on the FS. 260 | pub(crate) fn set_min_available_space(&self, maa: u64) { 261 | self.min_available_space.store(maa, Ordering::Release) 262 | } 263 | 264 | /// Returns the maximum number of pages that pager will cache on local FS. 265 | pub(crate) fn max_cached_pages(&self) -> usize { 266 | self.max_cached_pages.load(Ordering::Acquire) 267 | } 268 | 269 | /// Sets the maximum number of pages that pager will cache on local FS. 270 | pub(crate) fn set_max_cached_pages(&self, mcp: usize) { 271 | self.max_cached_pages.store(mcp, Ordering::Release) 272 | } 273 | 274 | fn get_page_inner( 275 | &self, 276 | db: &str, 277 | pos: Option, 278 | pgno: ltx::PageNum, 279 | prefetch: Option<&[ltx::PageNum]>, 280 | ) -> io::Result { 281 | match self.get_page_local(db, pos, pgno) { 282 | Ok(page) => return Ok(page), 283 | Err(err) if err.kind() != io::ErrorKind::NotFound => return Err(err), 284 | _ => (), 285 | }; 286 | 287 | self.get_page_remote(db, pos, pgno, prefetch) 288 | } 289 | 290 | #[allow(clippy::too_many_arguments)] 291 | fn get_page_slice_inner( 292 | &self, 293 | db: &str, 294 | pos: Option, 295 | pgno: ltx::PageNum, 296 | buf: &mut [u8], 297 | offset: u64, 298 | local_only: bool, 299 | prefetch: Option<&[ltx::PageNum]>, 300 | ) -> io::Result { 301 | match self.get_page_slice_local(db, pos, pgno, buf, offset) { 302 | Ok(_) => return Ok(PageSource::Local), 303 | Err(err) if err.kind() != io::ErrorKind::NotFound => return Err(err), 304 | _ => (), 305 | }; 306 | 307 | if local_only { 308 | return Err(io::Error::new( 309 | io::ErrorKind::WouldBlock, 310 | "local_only page not found in cache", 311 | )); 312 | } 313 | 314 | let page = self.get_page_remote(db, pos, pgno, prefetch)?; 315 | let offset = offset as usize; 316 | buf.copy_from_slice(&page.as_ref()[offset..offset + buf.len()]); 317 | 318 | Ok(PageSource::Remote) 319 | } 320 | 321 | fn get_page_local( 322 | &self, 323 | db: &str, 324 | _pos: Option, 325 | pgno: ltx::PageNum, 326 | ) -> io::Result { 327 | let mut file = fs::File::open(self.pages_path(db).join(PathBuf::from(pgno)))?; 328 | let mut buf = Vec::new(); 329 | file.read_to_end(&mut buf)?; 330 | 331 | // Mark the page as recently accessed 332 | self.lru.lock().unwrap().get(&self.cache_key(db, pgno)); 333 | 334 | Ok(Page::new(pgno, buf)) 335 | } 336 | 337 | fn get_page_slice_local( 338 | &self, 339 | db: &str, 340 | _pos: Option, 341 | pgno: ltx::PageNum, 342 | buf: &mut [u8], 343 | offset: u64, 344 | ) -> io::Result<()> { 345 | let mut file = fs::File::open(self.pages_path(db).join(PathBuf::from(pgno)))?; 346 | file.read_exact_at(buf, offset)?; 347 | 348 | // Mark the page as recently accessed 349 | self.lru.lock().unwrap().get(&self.cache_key(db, pgno)); 350 | 351 | Ok(()) 352 | } 353 | 354 | fn get_page_remote( 355 | &self, 356 | db: &str, 357 | pos: Option, 358 | pgno: ltx::PageNum, 359 | prefetch: Option<&[ltx::PageNum]>, 360 | ) -> io::Result { 361 | let pos = if let Some(pos) = pos { 362 | pos 363 | } else { 364 | return Err(io::ErrorKind::NotFound.into()); 365 | }; 366 | 367 | let mut pages = vec![pgno]; 368 | if let Some(pgnos) = prefetch { 369 | pages.extend(pgnos); 370 | } 371 | let pages = match self.client.get_pages(db, pos, &pages) { 372 | Ok(pages) => pages, 373 | Err(lfsc::Error::PosMismatch(x)) => { 374 | log::warn!("get_page_remote: db = {}, pgno = {}, pos mismatch error, requested = {}, got = {}", 375 | db, pgno, pos, x); 376 | // LFSC no longer have the requested pos. At this point we may try to recover 377 | // from this ourselves, or tell the user to retry the transaction. The only 378 | // safe situation when we can recover is when this is the very first read 379 | // of a TX. But, in 99.9% the very first read will hit the cache (page 1), 380 | // so just return a custom error code to the user. The client code can retry 381 | // the transaction automatically after that. 382 | return Err(io::Error::new( 383 | io::ErrorKind::Other, 384 | CodeError::new(LITEVFS_IOERR_POS_MISMATCH), 385 | )); 386 | } 387 | Err(x) => return Err(x.into()), 388 | }; 389 | 390 | let mut requested_page: Option = None; 391 | for page in pages { 392 | log::trace!( 393 | "[pager] get_page_remote: pos = {}, pgno = {}, got = {}", 394 | pos, 395 | pgno, 396 | page.number(), 397 | ); 398 | let page_ref = PageRef { 399 | data: page.as_ref(), 400 | number: page.number(), 401 | }; 402 | self.put_page_inner(db, page_ref)?; 403 | 404 | if page.number() == pgno { 405 | requested_page = Some(Page::new(page.number(), page.into_inner())) 406 | } 407 | } 408 | 409 | requested_page.ok_or(io::ErrorKind::NotFound.into()) 410 | } 411 | 412 | fn put_page_inner(&self, db: &str, page: PageRef) -> io::Result<()> { 413 | let tmp_name = self.tmp_path(db).join(PathBuf::from(page.number())); 414 | let final_name = self.pages_path(db).join(PathBuf::from(page.number())); 415 | 416 | self.reclaim_space()?; 417 | 418 | let mut file = fs::File::create(&tmp_name)?; 419 | file.write_all(page.as_ref())?; 420 | fs::rename(tmp_name, final_name)?; 421 | 422 | self.lru 423 | .lock() 424 | .unwrap() 425 | .put(self.cache_key(db, page.number()), ()); 426 | 427 | Ok(()) 428 | } 429 | 430 | fn del_page_inner(&self, db: &str, pgno: ltx::PageNum) -> io::Result { 431 | let name = self.pages_path(db).join(PathBuf::from(pgno)); 432 | let removed = remove_file(name)?; 433 | 434 | self.lru.lock().unwrap().remove(&self.cache_key(db, pgno)); 435 | 436 | Ok(removed) 437 | } 438 | 439 | fn truncate_inner(&self, db: &str, pgno: ltx::PageNum) -> io::Result<()> { 440 | let fname: ffi::OsString = PathBuf::from(pgno).into(); 441 | 442 | for entry in fs::read_dir(self.pages_path(db))? { 443 | let entry = entry?; 444 | if !entry.file_type()?.is_file() || entry.file_name() <= fname { 445 | continue; 446 | } 447 | 448 | remove_file(entry.path())?; 449 | 450 | let rpgno = ltx::PageNum::try_from(Path::new(&entry.file_name()))?; 451 | self.lru.lock().unwrap().remove(&self.cache_key(db, rpgno)); 452 | } 453 | 454 | Ok(()) 455 | } 456 | 457 | fn clear_inner(&self, db: &str) -> io::Result> { 458 | let mut pgnos = Vec::new(); 459 | 460 | for entry in fs::read_dir(self.pages_path(db))? { 461 | let entry = entry?; 462 | if !entry.file_type()?.is_file() { 463 | continue; 464 | } 465 | 466 | remove_file(entry.path())?; 467 | 468 | let rpgno = ltx::PageNum::try_from(Path::new(&entry.file_name()))?; 469 | self.lru.lock().unwrap().remove(&self.cache_key(db, rpgno)); 470 | 471 | pgnos.push(rpgno); 472 | } 473 | 474 | Ok(pgnos) 475 | } 476 | 477 | fn has_page_inner(&self, db: &str, pgno: ltx::PageNum) -> io::Result { 478 | let page_name = self.pages_path(db).join(PathBuf::from(pgno)); 479 | 480 | page_name.try_exists() 481 | } 482 | 483 | fn pages_path(&self, db: &str) -> PathBuf { 484 | self.db_path(db).join("pages") 485 | } 486 | 487 | fn tmp_path(&self, db: &str) -> PathBuf { 488 | self.db_path(db).join("tmp") 489 | } 490 | 491 | fn cache_key(&self, db: &str, pgno: ltx::PageNum) -> PageCacheKey { 492 | PageCacheKey { 493 | dbsym: self.interner.lock().unwrap().get_or_intern(db), 494 | pgno, 495 | } 496 | } 497 | 498 | fn reclaim_space(&self) -> io::Result<()> { 499 | let max_pages = self.max_cached_pages(); 500 | let min_space = self.min_available_space(); 501 | 502 | loop { 503 | let pages = self.lru.lock().unwrap().len(); 504 | let space = statvfs(&self.root)?.available_space; 505 | 506 | log::trace!( 507 | "[pager] reclaim_space: pages = {}, max_pages = {}, space = {}, min_space = {}", 508 | pages, 509 | max_pages, 510 | ByteSize::b(space).to_string_as(true), 511 | ByteSize::b(min_space).to_string_as(true), 512 | ); 513 | 514 | if pages == 0 || space >= min_space && (pages <= max_pages || max_pages == 0) { 515 | return Ok(()); 516 | } 517 | 518 | self.remove_lru_page()?; 519 | } 520 | } 521 | 522 | fn remove_lru_page(&self) -> io::Result<()> { 523 | let cache_key = { 524 | let mut lru = self.lru.lock().unwrap(); 525 | 526 | if let Some((cache_key, _)) = lru.remove_lru_from_probationary() { 527 | cache_key 528 | } else if let Some((cache_key, _)) = lru.remove_lru_from_protected() { 529 | cache_key 530 | } else { 531 | return Ok(()); 532 | } 533 | }; 534 | 535 | if let Some(db) = self.interner.lock().unwrap().resolve(cache_key.dbsym) { 536 | log::trace!( 537 | "[pager] remove_lru_page: db = {}, pgno = {}", 538 | db, 539 | cache_key.pgno 540 | ); 541 | remove_file(self.pages_path(db).join(PathBuf::from(cache_key.pgno)))?; 542 | } 543 | 544 | Ok(()) 545 | } 546 | } 547 | 548 | /// A struct that owns a single database page. 549 | pub(crate) struct Page { 550 | data: Vec, 551 | number: ltx::PageNum, 552 | } 553 | 554 | impl Page { 555 | /// Return a new [Page] with `number` and the given `data`. 556 | pub(crate) fn new(number: ltx::PageNum, data: Vec) -> Page { 557 | Page { data, number } 558 | } 559 | 560 | /// Returns `page` number. 561 | pub(crate) fn number(&self) -> ltx::PageNum { 562 | self.number 563 | } 564 | 565 | /// Returns `page` checksum. 566 | pub(crate) fn checksum(&self) -> ltx::Checksum { 567 | self.data.page_checksum(self.number()) 568 | } 569 | } 570 | 571 | impl AsRef<[u8]> for Page { 572 | fn as_ref(&self) -> &[u8] { 573 | &self.data 574 | } 575 | } 576 | 577 | /// A struct that borrows a single database page. Cheap to construct and copy. 578 | #[derive(Clone, Copy)] 579 | pub(crate) struct PageRef<'a> { 580 | data: &'a [u8], 581 | number: ltx::PageNum, 582 | } 583 | 584 | impl<'a> PageRef<'a> { 585 | /// Return a new [PageRef] with `number` and the given `data`. 586 | pub(crate) fn new(number: ltx::PageNum, data: &'a [u8]) -> PageRef<'a> { 587 | PageRef { data, number } 588 | } 589 | 590 | /// Returns `page` number. 591 | pub(crate) fn number(&self) -> ltx::PageNum { 592 | self.number 593 | } 594 | } 595 | 596 | impl<'a> AsRef<[u8]> for PageRef<'a> { 597 | fn as_ref(&self) -> &[u8] { 598 | self.data 599 | } 600 | } 601 | 602 | #[derive(PartialEq, Eq, Hash)] 603 | struct PageCacheKey { 604 | dbsym: DefaultSymbol, 605 | pgno: ltx::PageNum, 606 | } 607 | 608 | fn remove_file>(file: P) -> io::Result { 609 | match fs::remove_file(file) { 610 | Err(err) if err.kind() == io::ErrorKind::NotFound => Ok(false), 611 | Err(x) => Err(x), 612 | Ok(()) => Ok(true), 613 | } 614 | } 615 | 616 | struct FsStats { 617 | available_space: u64, 618 | } 619 | 620 | #[cfg(unix)] 621 | #[allow(clippy::unnecessary_cast)] 622 | fn statvfs

(path: P) -> io::Result 623 | where 624 | P: AsRef, 625 | { 626 | use std::{mem, os::unix::prelude::OsStrExt}; 627 | 628 | let cstr = match ffi::CString::new(path.as_ref().as_os_str().as_bytes()) { 629 | Ok(cstr) => cstr, 630 | Err(..) => { 631 | return Err(io::Error::new( 632 | io::ErrorKind::InvalidInput, 633 | "path contained a null", 634 | )) 635 | } 636 | }; 637 | 638 | unsafe { 639 | let mut stat: libc::statvfs = mem::zeroed(); 640 | if libc::statvfs(cstr.as_ptr() as *const _, &mut stat) != 0 { 641 | Err(io::Error::last_os_error()) 642 | } else { 643 | Ok(FsStats { 644 | available_space: stat.f_frsize as u64 * stat.f_bavail as u64, 645 | }) 646 | } 647 | } 648 | } 649 | 650 | #[cfg(windows)] 651 | fn statvfs

(path: P) -> io::Result 652 | where 653 | P: AsRef, 654 | { 655 | use std::os::windows::ffi::OsStrExt; 656 | use winapi::{ 657 | shared::minwindef::DWORD, 658 | um::fileapi::{GetDiskFreeSpaceW, GetVolumePathNameW}, 659 | }; 660 | 661 | let root_path: &mut [u16] = &mut [0; 261]; 662 | let path_utf8: Vec = path 663 | .as_ref() 664 | .as_os_str() 665 | .encode_wide() 666 | .chain(Some(0)) 667 | .collect(); 668 | let available_space = unsafe { 669 | if GetVolumePathNameW( 670 | path_utf8.as_ptr(), 671 | root_path.as_mut_ptr(), 672 | root_path.len() as DWORD, 673 | ) == 0 674 | { 675 | return Err(io::Error::last_os_error()); 676 | } 677 | 678 | let mut sectors_per_cluster = 0; 679 | let mut bytes_per_sector = 0; 680 | let mut number_of_free_clusters = 0; 681 | let mut total_number_of_clusters = 0; 682 | if GetDiskFreeSpaceW( 683 | root_path.as_ptr(), 684 | &mut sectors_per_cluster, 685 | &mut bytes_per_sector, 686 | &mut number_of_free_clusters, 687 | &mut total_number_of_clusters, 688 | ) == 0 689 | { 690 | return Err(io::Error::last_os_error()); 691 | } 692 | 693 | let bytes_per_cluster = sectors_per_cluster as u64 * bytes_per_sector as u64; 694 | bytes_per_cluster * number_of_free_clusters as u64 695 | }; 696 | 697 | Ok(FsStats { available_space }) 698 | } 699 | 700 | #[cfg(test)] 701 | mod tests { 702 | use std::env::temp_dir; 703 | 704 | #[test] 705 | fn statvfs() { 706 | let stats = super::statvfs(temp_dir()).expect("statvfs"); 707 | 708 | assert!(stats.available_space > 0); 709 | } 710 | } 711 | -------------------------------------------------------------------------------- /crates/litevfs/src/database.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | leaser::Leaser, 3 | lfsc, 4 | locks::{ConnLock, VfsLock}, 5 | pager::{PageRef, PageSource, Pager}, 6 | sqlite, 7 | syncer::{Changes, Syncer}, 8 | IterLogger, OptionLogger, 9 | }; 10 | use litetx as ltx; 11 | use sqlite_vfs::OpenAccess; 12 | use std::{ 13 | collections::{BTreeMap, BTreeSet, HashMap}, 14 | fs, 15 | io::{self, Read, Seek, SeekFrom}, 16 | ops, 17 | path::PathBuf, 18 | sync::{Arc, Mutex, RwLock}, 19 | time, 20 | }; 21 | 22 | const DEFAULT_MAX_PREFETCH_PAGES: usize = 32; 23 | pub(crate) const MAX_MAX_PREFETCH_PAGES: usize = 128; 24 | 25 | pub(crate) struct DatabaseManager { 26 | pager: Arc, 27 | databases: HashMap>>, 28 | client: Arc, 29 | leaser: Arc, 30 | syncer: Arc, 31 | } 32 | 33 | impl DatabaseManager { 34 | pub(crate) fn new( 35 | pager: Arc, 36 | client: Arc, 37 | leaser: Arc, 38 | syncer: Arc, 39 | ) -> DatabaseManager { 40 | DatabaseManager { 41 | pager, 42 | databases: HashMap::new(), 43 | client, 44 | leaser, 45 | syncer, 46 | } 47 | } 48 | 49 | pub(crate) fn get_database( 50 | &mut self, 51 | dbname: &str, 52 | access: OpenAccess, 53 | ) -> io::Result>> { 54 | let db = if let Some(db) = self.get_database_local_in_mem(dbname, access)? { 55 | db 56 | } else if let Some(db) = self.get_database_local_on_disk(dbname, access)? { 57 | self.databases.insert(dbname.into(), Arc::clone(&db)); 58 | db 59 | } else if let Some(db) = self.get_database_remote(dbname, access)? { 60 | self.databases.insert(dbname.into(), Arc::clone(&db)); 61 | db 62 | } else { 63 | return Err(io::Error::new( 64 | io::ErrorKind::NotFound, 65 | "database not found", 66 | )); 67 | }; 68 | 69 | if access != OpenAccess::Read { 70 | if db.read().unwrap().wal { 71 | return Err(io::Error::new( 72 | io::ErrorKind::Unsupported, 73 | "DB in WAL mode can't be opened for RW", 74 | )); 75 | } 76 | if db.read().unwrap().auto_vacuum { 77 | return Err(io::Error::new( 78 | io::ErrorKind::Unsupported, 79 | "DB with auto_vacuum can't be opened for RW", 80 | )); 81 | } 82 | } 83 | 84 | Ok(db) 85 | } 86 | 87 | fn get_database_local_in_mem( 88 | &self, 89 | dbname: &str, 90 | access: OpenAccess, 91 | ) -> io::Result>>> { 92 | let db = self.databases.get(dbname); 93 | 94 | if db.is_some() && access == OpenAccess::CreateNew { 95 | return Err(io::Error::new( 96 | io::ErrorKind::AlreadyExists, 97 | "database already exists", 98 | )); 99 | } 100 | 101 | Ok(db.cloned()) 102 | } 103 | 104 | fn get_database_local_on_disk( 105 | &self, 106 | dbname: &str, 107 | access: OpenAccess, 108 | ) -> io::Result>>> { 109 | let pos = self.pager.db_path(dbname).join("pos"); 110 | if !pos.try_exists()? { 111 | return Ok(None); 112 | } 113 | 114 | if access == OpenAccess::CreateNew { 115 | return Err(io::Error::new( 116 | io::ErrorKind::AlreadyExists, 117 | "database already exists", 118 | )); 119 | } 120 | 121 | let pos = fs::read(pos)?; 122 | let pos = serde_json::from_slice(&pos)?; 123 | log::info!( 124 | "[manager] get_database_local_on_disk: name = {}, access = {:?}, pos = {}", 125 | dbname, 126 | access, 127 | pos 128 | ); 129 | 130 | Ok(Some(Arc::new(RwLock::new(Database::new( 131 | dbname, 132 | Some(pos), 133 | Arc::clone(&self.pager), 134 | Arc::clone(&self.client), 135 | Arc::clone(&self.leaser), 136 | Arc::clone(&self.syncer), 137 | )?)))) 138 | } 139 | 140 | fn get_database_remote( 141 | &self, 142 | dbname: &str, 143 | access: OpenAccess, 144 | ) -> io::Result>>> { 145 | let pos = self.client.pos_map()?.remove(dbname); 146 | 147 | if pos.is_some() && access == OpenAccess::CreateNew { 148 | return Err(io::Error::new( 149 | io::ErrorKind::AlreadyExists, 150 | "database already exists", 151 | )); 152 | }; 153 | 154 | if pos.is_none() && matches!(access, OpenAccess::Read | OpenAccess::Write) { 155 | return Ok(None); 156 | } 157 | 158 | let pos = pos.flatten(); 159 | log::info!( 160 | "[manager] get_database_remote: name = {}, access = {:?}, pos = {}", 161 | dbname, 162 | access, 163 | OptionLogger(&pos) 164 | ); 165 | 166 | Ok(Some(Arc::new(RwLock::new(Database::new( 167 | dbname, 168 | pos, 169 | Arc::clone(&self.pager), 170 | Arc::clone(&self.client), 171 | Arc::clone(&self.leaser), 172 | Arc::clone(&self.syncer), 173 | )?)))) 174 | } 175 | 176 | pub(crate) fn database_exists>(&self, dbname: S) -> io::Result { 177 | if self.databases.contains_key(dbname.as_ref()) 178 | || self.client.pos_map()?.contains_key(dbname.as_ref()) 179 | { 180 | Ok(true) 181 | } else { 182 | Ok(false) 183 | } 184 | } 185 | } 186 | 187 | pub(crate) struct Database { 188 | lock: VfsLock, 189 | pub(crate) name: String, 190 | client: Arc, 191 | pager: Arc, 192 | leaser: Arc, 193 | syncer: Arc, 194 | ltx_path: PathBuf, 195 | pos_path: PathBuf, 196 | pub(crate) journal_path: PathBuf, 197 | pub(crate) page_size: Option, 198 | committed_db_size: Mutex>, 199 | current_db_size: Option, 200 | pub(crate) pos: Option, 201 | dirty_pages: BTreeMap>, 202 | prefetch_pages: Mutex>, 203 | pub(crate) prefetch_limit: usize, 204 | wal: bool, 205 | auto_vacuum: bool, 206 | } 207 | 208 | impl Database { 209 | fn new( 210 | name: &str, 211 | pos: Option, 212 | pager: Arc, 213 | client: Arc, 214 | leaser: Arc, 215 | syncer: Arc, 216 | ) -> io::Result { 217 | let ltx_path = pager.db_path(name).join("ltx"); 218 | let pos_path = pager.db_path(name).join("pos"); 219 | let journal_path = pager.db_path(name).join("journal"); 220 | 221 | pager.prepare_db(name)?; 222 | fs::create_dir_all(<x_path)?; 223 | 224 | let (wal, auto_vacuum, page_size, commit) = 225 | match pager.get_page(name, pos, ltx::PageNum::ONE, None) { 226 | Ok(page) => ( 227 | Database::parse_wal(page.as_ref()), 228 | Database::parse_autovacuum(page.as_ref())?, 229 | Some(Database::parse_page_size_database(page.as_ref())?), 230 | Some(Database::parse_commit_database( 231 | page.as_ref(), 232 | sqlite::COMMIT_RANGE, 233 | )?), 234 | ), 235 | Err(err) if err.kind() == io::ErrorKind::UnexpectedEof => { 236 | (false, false, None, None) 237 | } 238 | Err(err) => return Err(err), 239 | }; 240 | 241 | if wal { 242 | log::warn!("[database] db = {}, database in WAL mode", name); 243 | } 244 | if auto_vacuum { 245 | log::warn!("[database] db = {}, database with auto vacuum", name); 246 | } 247 | 248 | Ok(Database { 249 | lock: VfsLock::new(), 250 | name: name.into(), 251 | client, 252 | pager, 253 | leaser, 254 | syncer, 255 | ltx_path, 256 | pos_path, 257 | journal_path, 258 | page_size, 259 | committed_db_size: Mutex::new(commit), 260 | current_db_size: None, 261 | pos, 262 | dirty_pages: BTreeMap::new(), 263 | prefetch_pages: Mutex::new(BTreeSet::new()), 264 | prefetch_limit: DEFAULT_MAX_PREFETCH_PAGES, 265 | wal, 266 | auto_vacuum, 267 | }) 268 | } 269 | 270 | fn parse_wal(page1: &[u8]) -> bool { 271 | let write_version = u8::from_be(page1[sqlite::WRITE_VERSION_OFFSET]); 272 | let read_version = u8::from_be(page1[sqlite::READ_VERSION_OFFSET]); 273 | 274 | write_version == 2 || read_version == 2 275 | } 276 | 277 | fn parse_autovacuum(page1: &[u8]) -> io::Result { 278 | let auto_vacuum = u32::from_be_bytes( 279 | page1[52..56] 280 | .try_into() 281 | .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?, 282 | ); 283 | 284 | Ok(auto_vacuum > 0) 285 | } 286 | 287 | fn parse_page_size_database(page1: &[u8]) -> io::Result { 288 | let page_size = match u16::from_be_bytes( 289 | page1[16..18] 290 | .try_into() 291 | .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?, 292 | ) { 293 | 1 => 65536, 294 | n => n as u32, 295 | }; 296 | 297 | ltx::PageSize::new(page_size).map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) 298 | } 299 | 300 | fn parse_commit_database(page1: &[u8], loc: ops::Range) -> io::Result { 301 | let commit = u32::from_be_bytes( 302 | page1[loc] 303 | .try_into() 304 | .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?, 305 | ); 306 | 307 | ltx::PageNum::new(commit).map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) 308 | } 309 | 310 | pub(crate) fn parse_page_size_journal(hdr: &[u8]) -> io::Result { 311 | let page_size = u32::from_be_bytes( 312 | hdr[24..28] 313 | .try_into() 314 | .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?, 315 | ); 316 | 317 | ltx::PageSize::new(page_size).map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) 318 | } 319 | 320 | pub(crate) fn conn_lock(&self) -> ConnLock { 321 | self.lock.conn_lock() 322 | } 323 | 324 | pub(crate) fn page_size(&self) -> io::Result { 325 | self.page_size 326 | .ok_or(io::Error::new(io::ErrorKind::Other, "page size unknown")) 327 | } 328 | 329 | fn ensure_aligned(&self, buf: &[u8], offset: u64) -> io::Result<()> { 330 | let page_size = self.page_size()?.into_inner() as usize; 331 | 332 | // SQLite always writes exactly one page 333 | if offset as usize % page_size != 0 { 334 | return Err(io::Error::new( 335 | io::ErrorKind::InvalidInput, 336 | "offset not page aligned", 337 | )); 338 | } 339 | if buf.len() != page_size { 340 | return Err(io::Error::new( 341 | io::ErrorKind::InvalidInput, 342 | "unexpected buffer size", 343 | )); 344 | }; 345 | 346 | Ok(()) 347 | } 348 | 349 | fn page_num_for(&self, offset: u64) -> io::Result { 350 | let page_size = self.page_size()?; 351 | Ok(ltx::PageNum::new( 352 | (offset / page_size.into_inner() as u64 + 1) as u32, 353 | )?) 354 | } 355 | 356 | pub(crate) fn size(&self) -> io::Result { 357 | let commit = if let Some(commit) = self.current_db_size { 358 | commit 359 | } else if let Some(commit) = *self.committed_db_size.lock().unwrap() { 360 | commit 361 | } else { 362 | return Ok(0); 363 | }; 364 | 365 | Ok(self.page_size()?.into_inner() as u64 * commit.into_inner() as u64) 366 | } 367 | 368 | pub(crate) fn read_at( 369 | &self, 370 | buf: &mut [u8], 371 | offset: u64, 372 | local_only: bool, 373 | ) -> io::Result { 374 | let (number, page_offset) = if offset <= sqlite::HEADER_SIZE as u64 { 375 | (ltx::PageNum::ONE, offset) 376 | } else { 377 | self.ensure_aligned(buf, offset)?; 378 | (self.page_num_for(offset)?, 0) 379 | }; 380 | 381 | let source = self.pager.get_page_slice( 382 | &self.name, 383 | self.pos, 384 | number, 385 | buf, 386 | page_offset, 387 | local_only, 388 | self.prefetch_pages(number), 389 | )?; 390 | 391 | if self.can_prefetch(buf) { 392 | let mut prefetch = self.prefetch_pages.lock().unwrap(); 393 | if let Some(candidates) = sqlite::prefetch_candidates(buf, number).map(|t| { 394 | t.into_iter() 395 | .filter(|&pgno| !self.pager.has_page(&self.name, pgno).unwrap_or(false)) 396 | .collect() 397 | }) { 398 | *prefetch = candidates; 399 | } 400 | } 401 | 402 | let offset = offset as usize; 403 | if offset <= sqlite::WRITE_VERSION_OFFSET 404 | && offset + buf.len() >= sqlite::READ_VERSION_OFFSET 405 | && self.wal 406 | { 407 | buf[sqlite::WRITE_VERSION_OFFSET - offset] = u8::to_be(1); 408 | buf[sqlite::READ_VERSION_OFFSET - offset] = u8::to_be(1); 409 | }; 410 | 411 | if offset <= sqlite::COMMIT_RANGE.start 412 | && offset + buf.len() >= sqlite::COMMIT_RANGE.end 413 | && !self.dirty_pages.contains_key(<x::PageNum::ONE) 414 | { 415 | *self.committed_db_size.lock().unwrap() = Some(Database::parse_commit_database( 416 | buf, 417 | sqlite::COMMIT_RANGE.start - offset..sqlite::COMMIT_RANGE.end - offset, 418 | )?); 419 | } 420 | 421 | Ok(source) 422 | } 423 | 424 | fn can_prefetch(&self, buf: &[u8]) -> bool { 425 | let page_size = if let Ok(ps) = self.page_size() { 426 | ps.into_inner() as usize 427 | } else { 428 | return false; 429 | }; 430 | 431 | buf.len() == page_size 432 | } 433 | 434 | pub(crate) fn write_at(&mut self, buf: &[u8], offset: u64) -> io::Result<()> { 435 | if offset == 0 && buf.len() >= sqlite::HEADER_SIZE { 436 | if self.page_size().is_err() { 437 | self.page_size = Some(Database::parse_page_size_database(buf)?); 438 | } 439 | self.current_db_size = 440 | Some(Database::parse_commit_database(buf, sqlite::COMMIT_RANGE)?); 441 | } 442 | 443 | _ = self.leaser.get_lease(&self.name)?; 444 | if self.wal { 445 | return Err(io::Error::new( 446 | io::ErrorKind::Unsupported, 447 | "writing to DB in WAL mode is unsupported", 448 | )); 449 | } 450 | 451 | self.ensure_aligned(buf, offset)?; 452 | let page_num = self.page_num_for(offset)?; 453 | 454 | let orig_checksum = match *self.committed_db_size.lock().unwrap() { 455 | Some(dbsize) if page_num > dbsize => None, 456 | _ => match self.pager.get_page(&self.name, self.pos, page_num, None) { 457 | Ok(page) => Some(page.checksum()), 458 | Err(err) if err.kind() == io::ErrorKind::UnexpectedEof => None, 459 | Err(err) => return Err(err), 460 | }, 461 | }; 462 | 463 | let page = PageRef::new(page_num, buf); 464 | self.pager.put_page(&self.name, page)?; 465 | 466 | if page_num == ltx::PageNum::lock_page(self.page_size()?) { 467 | return Ok(()); 468 | } 469 | 470 | self.dirty_pages 471 | .entry(page.number()) 472 | .or_insert(orig_checksum); 473 | 474 | Ok(()) 475 | } 476 | 477 | pub(crate) fn truncate(&mut self, size: u64) -> io::Result<()> { 478 | let page_size = self.page_size()?.into_inner() as usize; 479 | let size = size as usize; 480 | if size % page_size != 0 { 481 | return Err(io::Error::new( 482 | io::ErrorKind::InvalidInput, 483 | "size not page aligned", 484 | )); 485 | } 486 | 487 | self.pager 488 | .truncate(&self.name, ltx::PageNum::new((size / page_size) as u32)?) 489 | } 490 | 491 | fn is_journal_header_valid(&self) -> io::Result { 492 | const VALID_JOURNAL_HDR: [u8; 8] = [0xd9, 0xd5, 0x05, 0xf9, 0x20, 0xa1, 0x63, 0xd7]; 493 | let mut hdr: [u8; 8] = [0; 8]; 494 | 495 | fs::File::open(&self.journal_path)?.read_exact(&mut hdr)?; 496 | 497 | Ok(hdr == VALID_JOURNAL_HDR) 498 | } 499 | 500 | pub(crate) fn commit_journal(&mut self) -> io::Result<()> { 501 | if !self.is_journal_header_valid()? { 502 | log::info!("[database] rollback: db = {}", self.name); 503 | self.dirty_pages.clear(); 504 | return Ok(()); 505 | }; 506 | 507 | let txid = if let Some(pos) = self.pos { 508 | pos.txid + 1 509 | } else { 510 | ltx::TXID::ONE 511 | }; 512 | 513 | log::debug!( 514 | "[database] commit_journal: db = {}, pos = {}, committed_size = {}, current_size = {}", 515 | self.name, 516 | OptionLogger(&self.pos), 517 | OptionLogger(&self.committed_db_size.lock().unwrap()), 518 | OptionLogger(&self.current_db_size), 519 | ); 520 | 521 | let pos = match self.commit_journal_inner(txid) { 522 | Ok(pos) => pos, 523 | Err(err) => { 524 | log::error!( 525 | "[database] commit_journal: db = {}, pos = {}, committed_size = {}, current_size = {}: {}", 526 | self.name, 527 | OptionLogger(&self.pos), 528 | OptionLogger(&self.committed_db_size.lock().unwrap()), 529 | OptionLogger(&self.current_db_size), 530 | err, 531 | ); 532 | 533 | // Commit failed, remove the dirty pages so they can 534 | // be refetched from LFSC 535 | for &page_num in self.dirty_pages.keys() { 536 | self.pager.del_page(&self.name, page_num)?; 537 | } 538 | self.current_db_size.take(); 539 | self.dirty_pages.clear(); 540 | 541 | return Err(err); 542 | } 543 | }; 544 | 545 | *self.committed_db_size.lock().unwrap() = self.current_db_size; 546 | self.current_db_size.take(); 547 | self.dirty_pages.clear(); 548 | 549 | self.pos = Some(pos); 550 | self.syncer.set_pos(&self.name, self.pos); 551 | 552 | Ok(()) 553 | } 554 | 555 | fn commit_journal_inner(&mut self, txid: ltx::TXID) -> io::Result { 556 | if self.current_db_size < *self.committed_db_size.lock().unwrap() { 557 | log::warn!( 558 | "[database] commit_journal: db = {}: VACUUM is not supported by LiteVFS", 559 | self.name 560 | ); 561 | return Err(io::Error::new( 562 | io::ErrorKind::Other, 563 | "vacuum is not supported by LiteVFS", 564 | )); 565 | } 566 | 567 | let commit = self.current_db_size.ok_or(io::Error::new( 568 | io::ErrorKind::Other, 569 | "database size unknown", 570 | ))?; 571 | let lease = self.leaser.get_lease(&self.name)?; 572 | 573 | let ltx_path = self.ltx_path.join(format!("{0}-{0}.ltx", txid)); 574 | let mut file = fs::OpenOptions::new() 575 | .read(true) 576 | .write(true) 577 | .create(true) 578 | .truncate(true) 579 | .open(<x_path)?; 580 | let mut enc = ltx::Encoder::new( 581 | &file, 582 | <x::Header { 583 | flags: ltx::HeaderFlags::empty(), 584 | page_size: self.page_size()?, 585 | commit, 586 | min_txid: txid, 587 | max_txid: txid, 588 | timestamp: time::SystemTime::now(), 589 | pre_apply_checksum: self.pos.map(|p| p.post_apply_checksum), 590 | }, 591 | )?; 592 | 593 | let mut checksum = self 594 | .pos 595 | .map(|p| p.post_apply_checksum.into_inner()) 596 | .unwrap_or(0); 597 | let mut pages = Vec::with_capacity(self.dirty_pages.len()); 598 | for (&page_num, &prev_checksum) in self.dirty_pages.iter().filter(|&(&n, _)| n <= commit) { 599 | let page = self.pager.get_page(&self.name, self.pos, page_num, None)?; 600 | if let Some(prev_checksum) = prev_checksum { 601 | checksum ^= prev_checksum.into_inner(); 602 | }; 603 | checksum ^= page.checksum().into_inner(); 604 | enc.encode_page(page_num, page.as_ref())?; 605 | pages.push(page_num); 606 | } 607 | 608 | let checksum = ltx::Checksum::new(checksum); 609 | enc.finish(checksum)?; 610 | 611 | // rewind the file and send it to LFSC 612 | file.seek(SeekFrom::Start(0))?; 613 | self.client 614 | .write_tx(&self.name, &file, file.metadata()?.len(), &lease)?; 615 | fs::remove_file(<x_path)?; 616 | 617 | let pos = ltx::Pos { 618 | txid, 619 | post_apply_checksum: checksum, 620 | }; 621 | 622 | self.commit_pos(pos)?; 623 | 624 | Ok(pos) 625 | } 626 | 627 | fn commit_pos(&mut self, pos: ltx::Pos) -> io::Result<()> { 628 | let file = fs::OpenOptions::new() 629 | .write(true) 630 | .create(true) 631 | .truncate(true) 632 | .open(&self.pos_path)?; 633 | serde_json::to_writer(&file, &pos)?; 634 | file.sync_all()?; 635 | 636 | Ok(()) 637 | } 638 | 639 | pub(crate) fn needs_sync(&self) -> bool { 640 | self.syncer.needs_sync(&self.name, self.pos) 641 | } 642 | 643 | pub(crate) fn sync(&mut self, force: bool, deep: bool) -> io::Result<()> { 644 | if force { 645 | self.syncer.sync_one(&self.name, deep)?; 646 | } 647 | 648 | let pos = match self.syncer.get_changes(&self.name, self.pos)? { 649 | // No changes 650 | (pos, None) => { 651 | log::debug!( 652 | "[database] sync: db = {}, prev_pos = {}, pos = {}, no changes", 653 | self.name, 654 | OptionLogger(&self.pos), 655 | OptionLogger(&pos), 656 | ); 657 | pos 658 | } 659 | 660 | // All pages have changed, clear the cache completely 661 | (pos, Some(Changes::All)) => { 662 | log::debug!( 663 | "[database] sync: db = {}, prev_pos = {}, pos = {}, all pages have changed", 664 | self.name, 665 | OptionLogger(&self.pos), 666 | OptionLogger(&pos) 667 | ); 668 | match self.pager.clear(&self.name) { 669 | Err(err) => { 670 | self.syncer.put_changes(&self.name, Changes::All); 671 | return Err(err); 672 | } 673 | Ok(pgnos) => { 674 | *self.prefetch_pages.lock().unwrap() = 675 | pgnos.into_iter().take(self.prefetch_limit).collect(); 676 | } 677 | }; 678 | self.committed_db_size.lock().unwrap().take(); 679 | 680 | pos 681 | } 682 | 683 | // Some pages have changed, drop them from the cache 684 | (pos, Some(Changes::Pages(pgnos))) => { 685 | log::debug!( 686 | "[database] sync: db = {}, prev_pos = {}, pos = {}, pages = {}", 687 | self.name, 688 | OptionLogger(&self.pos), 689 | OptionLogger(&pos), 690 | IterLogger(&pgnos) 691 | ); 692 | 693 | let mut prefetch = self.prefetch_pages.lock().unwrap(); 694 | prefetch.clear(); 695 | for pgno in &pgnos { 696 | match self.pager.del_page(&self.name, *pgno) { 697 | Err(err) => { 698 | self.syncer.put_changes(&self.name, Changes::Pages(pgnos)); 699 | return Err(err); 700 | } 701 | Ok(true) if prefetch.len() < self.prefetch_limit => { 702 | prefetch.insert(*pgno); 703 | } 704 | _ => (), 705 | } 706 | if *pgno == ltx::PageNum::ONE { 707 | self.committed_db_size.lock().unwrap().take(); 708 | }; 709 | } 710 | 711 | pos 712 | } 713 | }; 714 | 715 | if let Some(pos) = pos { 716 | self.commit_pos(pos)?; 717 | } 718 | 719 | self.pos = pos; 720 | 721 | Ok(()) 722 | } 723 | 724 | pub(crate) fn cache(&mut self) -> io::Result<()> { 725 | self.sync(true, true)?; 726 | 727 | // Make sure we have up-to-date view of the DB header 728 | let mut header = [0; sqlite::HEADER_SIZE]; 729 | self.read_at(&mut header, 0, false)?; 730 | 731 | let dbsize = self 732 | .committed_db_size 733 | .lock() 734 | .unwrap() 735 | .ok_or(io::Error::new( 736 | io::ErrorKind::Other, 737 | "database size unknown", 738 | ))?; 739 | 740 | log::info!( 741 | "[database] caching, db = {}, pos = {}, size = {}", 742 | self.name, 743 | OptionLogger(&self.pos), 744 | dbsize 745 | ); 746 | let mut pgnos = Vec::with_capacity(MAX_MAX_PREFETCH_PAGES); 747 | for pgno in 1..=dbsize.into_inner() { 748 | let pgno = ltx::PageNum::new(pgno).unwrap(); 749 | 750 | if self.pager.has_page(&self.name, pgno)? { 751 | continue; 752 | } 753 | 754 | if pgno == dbsize || pgnos.len() == MAX_MAX_PREFETCH_PAGES { 755 | self.pager 756 | .get_page(&self.name, self.pos, pgno, Some(&pgnos))?; 757 | pgnos.clear(); 758 | } else { 759 | pgnos.push(pgno); 760 | } 761 | } 762 | 763 | Ok(()) 764 | } 765 | 766 | fn prefetch_pages(&self, pgno: ltx::PageNum) -> Option> { 767 | let prefetch = self.prefetch_pages.lock().unwrap(); 768 | let pgnos = if prefetch.contains(&pgno) { 769 | Some(prefetch.iter().filter(|&&no| no != pgno).copied().collect()) 770 | } else { 771 | None 772 | }; 773 | 774 | pgnos 775 | } 776 | 777 | pub(crate) fn acquire_lease(&self) -> io::Result<()> { 778 | self.leaser.acquire_lease(&self.name) 779 | } 780 | 781 | pub(crate) fn release_lease(&self) -> io::Result<()> { 782 | self.leaser.release_lease(&self.name) 783 | } 784 | } 785 | -------------------------------------------------------------------------------- /crates/litevfs/src/vfs.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | database::{Database, DatabaseManager, MAX_MAX_PREFETCH_PAGES}, 3 | leaser::Leaser, 4 | lfsc, 5 | locks::{ConnLock, VfsLock}, 6 | pager::{PageSource, Pager}, 7 | syncer::Syncer, 8 | }; 9 | use bytesize::ByteSize; 10 | use humantime::{format_duration, parse_duration}; 11 | use rand::Rng; 12 | use read_write_at::{ReadAtMut, WriteAtMut}; 13 | use sqlite_vfs::{LockKind, OpenAccess, OpenKind, OpenOptions, Vfs}; 14 | use std::{ 15 | fs, io, 16 | path::{Path, PathBuf}, 17 | process, 18 | sync::{ 19 | atomic::{AtomicU64, Ordering}, 20 | Arc, Mutex, RwLock, 21 | }, 22 | thread, time, 23 | }; 24 | 25 | const DEFAULT_MAX_REQS_PER_QUERY: usize = 64; 26 | const MAX_MAX_REQS_PER_QUERY: usize = 1024; 27 | 28 | /// LiteVfs implements SQLite VFS ops. 29 | pub struct LiteVfs { 30 | path: PathBuf, 31 | pager: Arc, 32 | syncer: Arc, 33 | database_manager: Mutex, 34 | temp_counter: AtomicU64, 35 | } 36 | 37 | impl Vfs for LiteVfs { 38 | type Handle = LiteHandle; 39 | 40 | fn open(&self, db: &str, opts: OpenOptions) -> io::Result { 41 | log::debug!("[vfs] open: db = {}, opts = {:?}", db, opts); 42 | 43 | if !matches!( 44 | opts.kind, 45 | OpenKind::MainDb | OpenKind::TempDb | OpenKind::MainJournal | OpenKind::TempJournal 46 | ) { 47 | log::error!( 48 | "[vfs] open: db = {}, opts = {:?}: unsupported open kind", 49 | db, 50 | opts 51 | ); 52 | return Err(io::Error::new( 53 | io::ErrorKind::InvalidInput, 54 | "unsupported open kind", 55 | )); 56 | }; 57 | 58 | let (dbname, kind) = self.database_name_kind(db); 59 | if kind != opts.kind && (opts.kind != OpenKind::TempJournal && kind != OpenKind::TempDb) { 60 | return Err(io::Error::new( 61 | io::ErrorKind::InvalidInput, 62 | "unsupported database name", 63 | )); 64 | }; 65 | 66 | let res = match kind { 67 | OpenKind::MainDb => self 68 | .database_manager 69 | .lock() 70 | .unwrap() 71 | .get_database(dbname, opts.access) 72 | .map(|database| { 73 | let (conn_lock, pos) = { 74 | let database = database.read().unwrap(); 75 | 76 | (database.conn_lock(), database.pos) 77 | }; 78 | self.syncer.open_conn(dbname, pos); 79 | 80 | LiteHandle::new(LiteDatabaseHandle::new( 81 | Arc::clone(&self.pager), 82 | Arc::clone(&self.syncer), 83 | database, 84 | conn_lock, 85 | )) 86 | }), 87 | OpenKind::TempDb => Ok(LiteHandle::new(LiteTempDbHandle::new( 88 | self.path.join(db), 89 | opts.access, 90 | )?)), 91 | 92 | OpenKind::MainJournal => self 93 | .database_manager 94 | .lock() 95 | .unwrap() 96 | .get_database(dbname, opts.access) 97 | .and_then(|database| Ok(LiteHandle::new(LiteJournalHandle::new(database)?))), 98 | _ => unreachable!(), 99 | }; 100 | 101 | if let Err(ref err) = res { 102 | log::error!("[vfs] open: db = {}, opts = {:?}: {}", db, opts, err); 103 | } 104 | 105 | res 106 | } 107 | 108 | fn delete(&self, db: &str) -> io::Result<()> { 109 | log::debug!("[vfs] delete: db = {}", db); 110 | 111 | let (dbname, kind) = self.database_name_kind(db); 112 | match kind { 113 | OpenKind::MainDb => (), 114 | OpenKind::MainJournal => { 115 | let database = self 116 | .database_manager 117 | .lock() 118 | .unwrap() 119 | .get_database(dbname.as_ref(), OpenAccess::Write)?; 120 | database.write().unwrap().commit_journal()?; 121 | fs::remove_file(&database.read().unwrap().journal_path)?; 122 | } 123 | _ => (), 124 | }; 125 | 126 | Ok(()) 127 | } 128 | 129 | fn exists(&self, db: &str) -> io::Result { 130 | log::debug!("[vfs] exists: db = {}", db); 131 | 132 | let (dbname, kind) = self.database_name_kind(db); 133 | match kind { 134 | OpenKind::MainDb => self 135 | .database_manager 136 | .lock() 137 | .unwrap() 138 | .database_exists(dbname), 139 | OpenKind::MainJournal => { 140 | let database = self 141 | .database_manager 142 | .lock() 143 | .unwrap() 144 | .get_database(dbname.as_ref(), OpenAccess::Read)?; 145 | let database = database.read().unwrap(); 146 | 147 | Ok(database.journal_path.exists()) 148 | } 149 | _ => Ok(false), 150 | } 151 | } 152 | 153 | fn temporary_name(&self) -> String { 154 | format!( 155 | "sfvetil-{:x}_{:x}.db", 156 | process::id(), 157 | self.temp_counter.fetch_add(1, Ordering::AcqRel) 158 | ) 159 | } 160 | 161 | fn random(&self, buffer: &mut [i8]) { 162 | rand::thread_rng().fill(buffer); 163 | } 164 | 165 | fn sleep(&self, duration: time::Duration) -> time::Duration { 166 | log::debug!("[vfs] sleep: duration: {:?}", duration); 167 | 168 | // TODO: This will block JS runtime. Should be call back to JS here??? 169 | let now = time::Instant::now(); 170 | thread::sleep(duration); 171 | now.elapsed() 172 | } 173 | } 174 | 175 | impl LiteVfs { 176 | pub(crate) fn new>(path: P, client: lfsc::Client) -> Self { 177 | let client = Arc::new(client); 178 | let pager = Arc::new(Pager::new(&path, Arc::clone(&client))); 179 | let leaser = Leaser::new(Arc::clone(&client), time::Duration::from_secs(1)); 180 | let syncer = Syncer::new(Arc::clone(&client), time::Duration::from_secs(1)); 181 | 182 | LiteVfs { 183 | path: path.as_ref().to_path_buf(), 184 | pager: Arc::clone(&pager), 185 | syncer: Arc::clone(&syncer), 186 | database_manager: Mutex::new(DatabaseManager::new(pager, client, leaser, syncer)), 187 | temp_counter: AtomicU64::new(0), 188 | } 189 | } 190 | 191 | fn database_name_kind<'a>(&self, db: &'a str) -> (&'a str, OpenKind) { 192 | if let Some(db) = db.strip_suffix("-journal") { 193 | (db, OpenKind::MainJournal) 194 | } else if let Some(db) = db.strip_suffix("-wal") { 195 | (db.trim_end_matches("-wal"), OpenKind::Wal) 196 | } else if db.starts_with("sfvetil-") { 197 | (db, OpenKind::TempDb) 198 | } else { 199 | (db, OpenKind::MainDb) 200 | } 201 | } 202 | } 203 | 204 | pub trait DatabaseHandle: Sync { 205 | fn size(&self) -> io::Result; 206 | fn read_exact_at(&mut self, buf: &mut [u8], offset: u64) -> io::Result<()>; 207 | fn write_all_at(&mut self, buf: &[u8], offset: u64) -> io::Result<()>; 208 | fn sync(&mut self, _data_only: bool) -> io::Result<()> { 209 | Ok(()) 210 | } 211 | fn set_len(&mut self, size: u64) -> io::Result<()>; 212 | fn lock(&mut self, _lock: LockKind) -> bool { 213 | unreachable!("should not be called"); 214 | } 215 | fn reserved(&mut self) -> bool { 216 | unreachable!("should not be called"); 217 | } 218 | fn current_lock(&self) -> LockKind { 219 | unreachable!("should not be called"); 220 | } 221 | 222 | fn pragma( 223 | &mut self, 224 | _pragma: &str, 225 | _val: Option<&str>, 226 | ) -> Option, io::Error>> { 227 | None 228 | } 229 | 230 | fn handle_type(&self) -> &'static str; 231 | fn handle_name(&self) -> &str; 232 | } 233 | 234 | pub struct LiteHandle { 235 | inner: Box, 236 | } 237 | 238 | impl LiteHandle { 239 | pub(crate) fn new(handler: H) -> LiteHandle 240 | where 241 | H: DatabaseHandle + 'static, 242 | { 243 | LiteHandle { 244 | inner: Box::new(handler), 245 | } 246 | } 247 | } 248 | 249 | impl sqlite_vfs::DatabaseHandle for LiteHandle { 250 | type WalIndex = sqlite_vfs::WalDisabled; 251 | 252 | fn size(&self) -> io::Result { 253 | match self.inner.size() { 254 | Err(err) => { 255 | log::error!( 256 | "[handle] size: type = {}, name = {}: {}", 257 | self.inner.handle_type(), 258 | self.inner.handle_name(), 259 | err, 260 | ); 261 | 262 | Err(err) 263 | } 264 | Ok(val) => Ok(val), 265 | } 266 | } 267 | 268 | fn read_exact_at(&mut self, buf: &mut [u8], offset: u64) -> io::Result<()> { 269 | match self.inner.read_exact_at(buf, offset) { 270 | Err(err) => { 271 | // SQLite reads past journal file during normal operation. 272 | // Silence this error. 273 | if err.kind() == io::ErrorKind::UnexpectedEof 274 | && self.inner.handle_type() == "journal" 275 | && offset >= self.size()? 276 | { 277 | return Err(err); 278 | } 279 | 280 | log::error!( 281 | "[handle] read_exact_at: type = {}, name = {}, len = {}, offset = {}: {}", 282 | self.inner.handle_type(), 283 | self.inner.handle_name(), 284 | buf.len(), 285 | offset, 286 | err, 287 | ); 288 | 289 | Err(err) 290 | } 291 | _ => Ok(()), 292 | } 293 | } 294 | 295 | fn write_all_at(&mut self, buf: &[u8], offset: u64) -> io::Result<()> { 296 | match self.inner.write_all_at(buf, offset) { 297 | Err(err) => { 298 | log::error!( 299 | "[handle] write_all_at: type = {}, name = {}, len = {}, offset = {}: {}", 300 | self.inner.handle_type(), 301 | self.inner.handle_name(), 302 | buf.len(), 303 | offset, 304 | err, 305 | ); 306 | 307 | Err(err) 308 | } 309 | _ => Ok(()), 310 | } 311 | } 312 | 313 | fn sync(&mut self, data_only: bool) -> io::Result<()> { 314 | match self.inner.sync(data_only) { 315 | Err(err) => { 316 | log::error!( 317 | "[handle] sync: type = {}, name = {}, data_only = {}: {}", 318 | self.inner.handle_type(), 319 | self.inner.handle_name(), 320 | data_only, 321 | err, 322 | ); 323 | 324 | Err(err) 325 | } 326 | _ => Ok(()), 327 | } 328 | } 329 | 330 | fn set_len(&mut self, size: u64) -> io::Result<()> { 331 | match self.inner.set_len(size) { 332 | Err(err) => { 333 | log::error!( 334 | "[handle] set_len: type = {}, name = {}, size = {}: {}", 335 | self.inner.handle_type(), 336 | self.inner.handle_name(), 337 | size, 338 | err, 339 | ); 340 | 341 | Err(err) 342 | } 343 | _ => Ok(()), 344 | } 345 | } 346 | 347 | fn lock(&mut self, lock: LockKind) -> io::Result { 348 | Ok(self.inner.lock(lock)) 349 | } 350 | 351 | fn reserved(&mut self) -> io::Result { 352 | Ok(self.inner.reserved()) 353 | } 354 | 355 | fn current_lock(&self) -> io::Result { 356 | Ok(self.inner.current_lock()) 357 | } 358 | 359 | fn pragma( 360 | &mut self, 361 | pragma: &str, 362 | val: Option<&str>, 363 | ) -> Option, io::Error>> { 364 | match self.inner.pragma(pragma, val) { 365 | Some(Err(err)) => { 366 | let val = if let Some(val) = val { val } else { "" }; 367 | log::error!( 368 | "[handle] pragma: pragma = {}, value = {}: {}", 369 | pragma, 370 | val, 371 | err 372 | ); 373 | 374 | Some(Err(err)) 375 | } 376 | x => x, 377 | } 378 | } 379 | 380 | fn wal_index(&self, _readonly: bool) -> io::Result { 381 | Ok(sqlite_vfs::WalDisabled) 382 | } 383 | } 384 | 385 | struct LiteDatabaseHandle { 386 | pager: Arc, 387 | syncer: Arc, 388 | database: Arc>, 389 | lock: ConnLock, 390 | name: String, 391 | 392 | cur_pages_per_query: usize, 393 | max_pages_per_query: usize, 394 | } 395 | 396 | impl LiteDatabaseHandle { 397 | pub(crate) fn new( 398 | pager: Arc, 399 | syncer: Arc, 400 | database: Arc>, 401 | lock: ConnLock, 402 | ) -> Self { 403 | let name = database.read().unwrap().name.clone(); 404 | LiteDatabaseHandle { 405 | pager, 406 | syncer, 407 | database, 408 | lock, 409 | name, 410 | 411 | cur_pages_per_query: 0, 412 | max_pages_per_query: DEFAULT_MAX_REQS_PER_QUERY, 413 | } 414 | } 415 | 416 | fn acquire_exclusive(&mut self) -> io::Result<()> { 417 | if self.lock.state() != LockKind::None { 418 | return Err(io::Error::new( 419 | io::ErrorKind::Other, 420 | "connection is already holding a lock", 421 | )); 422 | } 423 | 424 | let now = time::Instant::now(); 425 | let timeout = time::Duration::from_secs(1); 426 | let check_timeout = move || -> io::Result<()> { 427 | if now.elapsed() > timeout { 428 | return Err(io::Error::new( 429 | io::ErrorKind::WouldBlock, 430 | format!( 431 | "waiting for more than {} to acquire exclusive lock", 432 | format_duration(timeout) 433 | ), 434 | )); 435 | }; 436 | 437 | thread::sleep(time::Duration::from_millis(1)); 438 | 439 | Ok(()) 440 | }; 441 | 442 | loop { 443 | if self.lock.acquire(LockKind::Shared) { 444 | if self.lock.acquire(LockKind::Reserved) { 445 | // Now that we have a reserved lock there can only be readers. 446 | // So loop here until all of them finish. 447 | while !self.lock.acquire(LockKind::Exclusive) { 448 | check_timeout()?; 449 | } 450 | 451 | return Ok(()); 452 | } else { 453 | // Return back to none if we can't progress from shared 454 | self.lock.acquire(LockKind::None); 455 | } 456 | } 457 | 458 | check_timeout()?; 459 | } 460 | } 461 | 462 | fn release_exclusive(&mut self) { 463 | self.lock.acquire(LockKind::None); 464 | } 465 | 466 | fn acquire_lease_and_sync(&mut self) -> io::Result<()> { 467 | self.acquire_exclusive()?; 468 | 469 | { 470 | let mut db = self.database.write().unwrap(); 471 | if let Err(err) = db.acquire_lease() { 472 | drop(db); 473 | self.release_exclusive(); 474 | return Err(err); 475 | } 476 | if let Err(err) = db.sync(true, false) { 477 | _ = db.release_lease(); 478 | drop(db); 479 | self.release_exclusive(); 480 | return Err(err); 481 | } 482 | }; 483 | 484 | self.release_exclusive(); 485 | 486 | Ok(()) 487 | } 488 | 489 | fn cache_db(&mut self) -> io::Result<()> { 490 | self.acquire_exclusive()?; 491 | 492 | let ret = self.database.write().unwrap().cache(); 493 | 494 | self.release_exclusive(); 495 | 496 | ret 497 | } 498 | } 499 | 500 | impl Drop for LiteDatabaseHandle { 501 | fn drop(&mut self) { 502 | self.syncer.close_conn(&self.name) 503 | } 504 | } 505 | 506 | impl DatabaseHandle for LiteDatabaseHandle { 507 | fn size(&self) -> io::Result { 508 | self.database.read().unwrap().size() 509 | } 510 | 511 | fn read_exact_at(&mut self, buf: &mut [u8], offset: u64) -> io::Result<()> { 512 | let local_only = 513 | self.max_pages_per_query > 0 && self.cur_pages_per_query >= self.max_pages_per_query; 514 | if let PageSource::Remote = self 515 | .database 516 | .read() 517 | .unwrap() 518 | .read_at(buf, offset, local_only)? 519 | { 520 | self.cur_pages_per_query += 1; 521 | } 522 | 523 | Ok(()) 524 | } 525 | 526 | fn write_all_at(&mut self, buf: &[u8], offset: u64) -> io::Result<()> { 527 | self.database.write().unwrap().write_at(buf, offset) 528 | } 529 | 530 | fn set_len(&mut self, size: u64) -> io::Result<()> { 531 | self.database.write().unwrap().truncate(size) 532 | } 533 | 534 | fn lock(&mut self, lock: LockKind) -> bool { 535 | // This connection will read data soon, check if we need to sync with LFSC. 536 | if self.lock.state() == LockKind::None 537 | && lock == LockKind::Shared 538 | && self.database.read().unwrap().needs_sync() 539 | { 540 | // This is a bit complicated. We need to initiate the sync even for read transactions, 541 | // so there may be concurrent transactions executing at the time we enter `sync()`. 542 | // So wait for them to finish first, otherwise they might see inconsistent state. 543 | if let Err(err) = self.acquire_exclusive() { 544 | log::warn!( 545 | "[database] sync: db = {}, timeout waiting for active connections, skipping sync: {}", 546 | self.name, err 547 | ); 548 | 549 | return self.lock.acquire(lock); 550 | } 551 | 552 | // There are no readers, try and sync. If we fail, let SQLite take the read lock, we may still be 553 | // able to read the data. The important part here is that `sync()` doesn't fetch any data, so 554 | // the cache stays consistent. 555 | if let Err(err) = self.database.write().unwrap().sync(false, false) { 556 | log::warn!("[database] sync: db = {}: {}", self.name, err); 557 | } 558 | 559 | self.release_exclusive(); 560 | } 561 | 562 | if lock == LockKind::None { 563 | self.cur_pages_per_query = 0 564 | } 565 | 566 | self.lock.acquire(lock) 567 | } 568 | 569 | fn reserved(&mut self) -> bool { 570 | self.lock.reserved() 571 | } 572 | 573 | fn current_lock(&self) -> LockKind { 574 | self.lock.state() 575 | } 576 | 577 | fn pragma( 578 | &mut self, 579 | pragma: &str, 580 | val: Option<&str>, 581 | ) -> Option, io::Error>> { 582 | match (pragma, val) { 583 | ("journal_mode", Some(val)) if val.to_uppercase() == "WAL" => { 584 | Some(Err(io::Error::new( 585 | io::ErrorKind::InvalidInput, 586 | "WAL is not supported by LiteVFS", 587 | ))) 588 | } 589 | ("litevfs_min_available_space", None) => Some(Ok(Some( 590 | ByteSize::b(self.pager.min_available_space()).to_string_as(true), 591 | ))), 592 | ("litevfs_min_available_space", Some(val)) => match val.parse::() { 593 | Ok(val) => { 594 | self.pager.set_min_available_space(val.as_u64()); 595 | Some(Ok(None)) 596 | } 597 | Err(e) => Some(Err(io::Error::new(io::ErrorKind::InvalidInput, e))), 598 | }, 599 | 600 | ("litevfs_max_cached_pages", None) => { 601 | Some(Ok(Some(self.pager.max_cached_pages().to_string()))) 602 | } 603 | ("litevfs_max_cached_pages", Some(val)) => match val.parse::() { 604 | Ok(val) => { 605 | self.pager.set_max_cached_pages(val); 606 | Some(Ok(None)) 607 | } 608 | Err(e) => Some(Err(io::Error::new(io::ErrorKind::InvalidInput, e))), 609 | }, 610 | 611 | ("litevfs_max_reqs_per_query", None) => { 612 | Some(Ok(Some(self.max_pages_per_query.to_string()))) 613 | } 614 | ("litevfs_max_reqs_per_query", Some(val)) => match val.parse::() { 615 | Ok(val) if val <= MAX_MAX_REQS_PER_QUERY => { 616 | self.max_pages_per_query = val; 617 | Some(Ok(None)) 618 | } 619 | Ok(_) => Some(Err(io::Error::new( 620 | io::ErrorKind::InvalidInput, 621 | format!("can't be greater than {}", MAX_MAX_REQS_PER_QUERY), 622 | ))), 623 | Err(e) => Some(Err(io::Error::new(io::ErrorKind::InvalidInput, e))), 624 | }, 625 | 626 | ("litevfs_cache_sync_period", None) => Some(Ok(Some( 627 | format_duration(self.syncer.sync_period(&self.name)).to_string(), 628 | ))), 629 | ("litevfs_cache_sync_period", Some(val)) => { 630 | let val = if val 631 | .chars() 632 | .last() 633 | .map(|c| c.is_ascii_digit()) 634 | .unwrap_or_default() 635 | { 636 | val.parse() 637 | .map(time::Duration::from_secs) 638 | .map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e)) 639 | } else { 640 | parse_duration(val).map_err(|e| io::Error::new(io::ErrorKind::InvalidInput, e)) 641 | }; 642 | 643 | match val { 644 | Ok(val) => { 645 | self.syncer.set_sync_period(&self.name, val); 646 | Some(Ok(None)) 647 | } 648 | Err(e) => Some(Err(e)), 649 | } 650 | } 651 | 652 | ("litevfs_max_prefetch_pages", None) => Some(Ok(Some( 653 | self.database.read().unwrap().prefetch_limit.to_string(), 654 | ))), 655 | ("litevfs_max_prefetch_pages", Some(val)) => match val.parse::() { 656 | Ok(val) if val <= MAX_MAX_PREFETCH_PAGES => { 657 | self.database.write().unwrap().prefetch_limit = val; 658 | Some(Ok(None)) 659 | } 660 | Ok(_) => Some(Err(io::Error::new( 661 | io::ErrorKind::InvalidInput, 662 | format!("can't be greater than {}", MAX_MAX_PREFETCH_PAGES), 663 | ))), 664 | Err(e) => Some(Err(io::Error::new(io::ErrorKind::InvalidInput, e))), 665 | }, 666 | 667 | ("litevfs_acquire_lease", None) => match self.acquire_lease_and_sync() { 668 | Ok(()) => Some(Ok(None)), 669 | Err(e) => Some(Err(e)), 670 | }, 671 | ("litevfs_release_lease", None) => { 672 | match self.database.read().unwrap().release_lease() { 673 | Ok(()) => Some(Ok(None)), 674 | Err(e) => Some(Err(e)), 675 | } 676 | } 677 | 678 | ("litevfs_cache_db", None) => match self.cache_db() { 679 | Ok(()) => Some(Ok(None)), 680 | Err(e) => Some(Err(e)), 681 | }, 682 | _ => None, 683 | } 684 | } 685 | 686 | fn handle_type(&self) -> &'static str { 687 | "database" 688 | } 689 | fn handle_name(&self) -> &str { 690 | &self.name 691 | } 692 | } 693 | 694 | struct LiteJournalHandle { 695 | journal: fs::File, 696 | database: Arc>, 697 | name: String, 698 | } 699 | 700 | impl LiteJournalHandle { 701 | pub(crate) fn new(database: Arc>) -> io::Result { 702 | let (journal, name) = { 703 | let db = database.read().unwrap(); 704 | let journal = fs::OpenOptions::new() 705 | .read(true) 706 | .write(true) 707 | .create(true) 708 | .truncate(false) 709 | .open(&db.journal_path)?; 710 | let name = db.name.clone(); 711 | (journal, name) 712 | }; 713 | 714 | Ok(LiteJournalHandle { 715 | journal, 716 | database, 717 | name, 718 | }) 719 | } 720 | } 721 | 722 | impl DatabaseHandle for LiteJournalHandle { 723 | fn size(&self) -> io::Result { 724 | self.journal.metadata().map(|m| m.len()) 725 | } 726 | 727 | fn read_exact_at(&mut self, buf: &mut [u8], offset: u64) -> io::Result<()> { 728 | self.journal.read_exact_at(buf, offset) 729 | } 730 | 731 | fn write_all_at(&mut self, buf: &[u8], offset: u64) -> io::Result<()> { 732 | const JOURNAL_HDR_SIZE: usize = 28; 733 | 734 | { 735 | let mut db = self.database.write().unwrap(); 736 | if offset == 0 && buf.len() >= JOURNAL_HDR_SIZE && db.page_size().is_err() { 737 | db.page_size = Some(Database::parse_page_size_journal(buf)?); 738 | }; 739 | if offset == 0 && buf.len() == JOURNAL_HDR_SIZE && buf.iter().all(|&b| b == 0) { 740 | db.commit_journal()?; 741 | }; 742 | } 743 | 744 | self.journal.write_all_at(buf, offset) 745 | } 746 | 747 | fn set_len(&mut self, size: u64) -> io::Result<()> { 748 | self.database.write().unwrap().commit_journal()?; 749 | self.journal.set_len(size) 750 | } 751 | 752 | fn handle_type(&self) -> &'static str { 753 | "journal" 754 | } 755 | fn handle_name(&self) -> &str { 756 | &self.name 757 | } 758 | } 759 | 760 | struct LiteTempDbHandle { 761 | name: String, 762 | file: fs::File, 763 | lock: ConnLock, 764 | } 765 | 766 | impl LiteTempDbHandle { 767 | pub(crate) fn new>(path: P, access: OpenAccess) -> io::Result { 768 | let mut o = fs::OpenOptions::new(); 769 | o.read(true).write(access != OpenAccess::Read); 770 | match access { 771 | OpenAccess::Create => { 772 | o.create(true); 773 | } 774 | OpenAccess::CreateNew => { 775 | o.create_new(true); 776 | } 777 | _ => (), 778 | }; 779 | 780 | let name = path.as_ref().to_string_lossy().to_string(); 781 | let file = o.open(path)?; 782 | let vfs_lock = VfsLock::new(); 783 | let lock = vfs_lock.conn_lock(); 784 | Ok(LiteTempDbHandle { name, file, lock }) 785 | } 786 | } 787 | 788 | impl DatabaseHandle for LiteTempDbHandle { 789 | fn size(&self) -> io::Result { 790 | self.file.metadata().map(|m| m.len()) 791 | } 792 | 793 | fn read_exact_at(&mut self, buf: &mut [u8], offset: u64) -> io::Result<()> { 794 | self.file.read_exact_at(buf, offset) 795 | } 796 | 797 | fn write_all_at(&mut self, buf: &[u8], offset: u64) -> io::Result<()> { 798 | self.file.write_all_at(buf, offset) 799 | } 800 | 801 | fn set_len(&mut self, size: u64) -> io::Result<()> { 802 | self.file.set_len(size) 803 | } 804 | 805 | fn lock(&mut self, lock: LockKind) -> bool { 806 | self.lock.acquire(lock) 807 | } 808 | 809 | fn reserved(&mut self) -> bool { 810 | self.lock.reserved() 811 | } 812 | 813 | fn current_lock(&self) -> LockKind { 814 | self.lock.state() 815 | } 816 | 817 | fn handle_type(&self) -> &'static str { 818 | "tempdb" 819 | } 820 | fn handle_name(&self) -> &str { 821 | &self.name 822 | } 823 | } 824 | --------------------------------------------------------------------------------