├── .gitignore ├── .rustfmt.toml ├── .github ├── dependabot.yml └── workflows │ ├── release.yaml │ └── build.yml ├── src ├── testutil.rs ├── reader.rs ├── file_set.rs ├── segment.rs ├── message.rs ├── index.rs └── lib.rs ├── examples ├── simple_write.rs └── bulk.rs ├── Cargo.toml ├── LICENSE.md ├── benches ├── message.rs ├── segment.rs ├── index.rs └── log-append.rs └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | target 2 | Cargo.lock 3 | .log* 4 | .DS_Store 5 | log 6 | -------------------------------------------------------------------------------- /.rustfmt.toml: -------------------------------------------------------------------------------- 1 | version = "Two" 2 | reorder_imports = true 3 | wrap_comments = true 4 | unstable_features = true 5 | condense_wildcard_suffixes = true 6 | edition = "2018" 7 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: cargo 4 | directory: "/" 5 | schedule: 6 | interval: daily 7 | time: "13:00" 8 | open-pull-requests-limit: 10 9 | -------------------------------------------------------------------------------- /src/testutil.rs: -------------------------------------------------------------------------------- 1 | use std::{ 2 | fs, 3 | path::{Path, PathBuf}, 4 | }; 5 | 6 | pub struct TestDir { 7 | path: PathBuf, 8 | } 9 | 10 | impl TestDir { 11 | pub fn new() -> TestDir { 12 | let mut path_buf = PathBuf::new(); 13 | path_buf.push("target"); 14 | path_buf.push("test-data"); 15 | path_buf.push(format!("test-{:020}", rand::random::())); 16 | fs::create_dir_all(&path_buf).unwrap(); 17 | TestDir { path: path_buf } 18 | } 19 | } 20 | 21 | impl Drop for TestDir { 22 | fn drop(&mut self) { 23 | fs::remove_dir_all(&self).expect("Unable to delete test data directory"); 24 | } 25 | } 26 | 27 | impl AsRef for TestDir { 28 | fn as_ref(&self) -> &Path { 29 | self.path.as_ref() 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /.github/workflows/release.yaml: -------------------------------------------------------------------------------- 1 | name: release 2 | 3 | on: 4 | push: 5 | tags: 6 | - v* 7 | 8 | env: 9 | CARGO_TERM_COLOR: always 10 | 11 | jobs: 12 | publish-crate: 13 | name: Publish Crate 14 | runs-on: ubuntu-latest 15 | steps: 16 | - name: Clone 17 | uses: actions/checkout@v2 18 | - name: Cache 19 | uses: actions/cache@v2 20 | with: 21 | path: | 22 | ~/.cargo/registry 23 | ~/.cargo/git 24 | ~/.rustup 25 | target 26 | key: ${{ runner.os }}-stable 27 | - name: Setup 28 | run: | 29 | rustup install stable 30 | - name: Build 31 | run: cargo build --verbose 32 | - name: Run tests 33 | run: cargo test 34 | - name: Publish 35 | run: cargo publish 36 | env: 37 | CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }} 38 | -------------------------------------------------------------------------------- /examples/simple_write.rs: -------------------------------------------------------------------------------- 1 | use commitlog::{message::*, *}; 2 | use std::time::{self, SystemTime}; 3 | 4 | fn main() { 5 | // open a directory called 'log' for segment and index storage 6 | let opts = LogOptions::new(format!( 7 | ".log{}", 8 | SystemTime::now() 9 | .duration_since(time::UNIX_EPOCH) 10 | .unwrap() 11 | .as_secs() 12 | )); 13 | let mut log = CommitLog::new(opts).unwrap(); 14 | 15 | // append to the log 16 | log.append_msg("hello world").unwrap(); // offset 0 17 | log.append_msg("second message").unwrap(); // offset 1 18 | 19 | // read the messages 20 | let messages = log.read(0, ReadLimit::default()).unwrap(); 21 | for msg in messages.iter() { 22 | println!( 23 | "{} - {}", 24 | msg.offset(), 25 | String::from_utf8_lossy(msg.payload()) 26 | ); 27 | } 28 | 29 | // prints: 30 | // 0 - hello world 31 | // 1 - second message 32 | } 33 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | authors = ["Zack Owens "] 3 | homepage = "https://github.com/zowens/commitlog" 4 | license = "MIT" 5 | name = "commitlog" 6 | readme = "README.md" 7 | repository = "https://github.com/zowens/commitlog" 8 | description = "Sequential, disk-backed commit log library." 9 | version = "0.2.0" 10 | edition = "2018" 11 | 12 | [features] 13 | # Enable benchmark of the private api. This flag should be used only 14 | # for benchmarking purposes! 15 | internals = [] 16 | 17 | [dependencies] 18 | byteorder = "1.0" 19 | bytes = "1" 20 | crc32c = "0.6" 21 | log = "0.4.1" 22 | memmap2 = "0.9" 23 | page_size = "0.6" 24 | 25 | [dev-dependencies] 26 | criterion = "0.5" 27 | env_logger = "0.11" 28 | rand = "0.8" 29 | 30 | [[bench]] 31 | name = "index" 32 | harness = false 33 | required-features = ["internals"] 34 | 35 | [[bench]] 36 | name = "log-append" 37 | harness = false 38 | 39 | [[bench]] 40 | name = "message" 41 | harness = false 42 | 43 | [[bench]] 44 | name = "segment" 45 | harness = false 46 | required-features = ["internals"] 47 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2016 Zack Owens 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /benches/message.rs: -------------------------------------------------------------------------------- 1 | use criterion::{criterion_group, criterion_main, Criterion}; 2 | 3 | use commitlog::message::{set_offsets, MessageBuf}; 4 | 5 | fn bench_message_construct(c: &mut Criterion) { 6 | c.bench_function("message construct", |b| { 7 | b.iter(|| { 8 | let mut msg_buf = MessageBuf::default(); 9 | msg_buf 10 | .push( 11 | "719c3b4556066a1c7a06c9d55959d003d9b4627 12 | 3aabe2eae15ef4ba78321ae2a68b0997a4abbd035a4cdbc8b27d701089a5af63a 13 | 8b81f9dc16a874d0eda0983b79c1a6f79fe3ae61612ba2558562a85595f2f3f07 14 | fab8faba1b849685b61aad6b131b7041ca79cc662b4c5aad4d1b78fb1034fafa2 15 | fe4f30207395e399c6d724", 16 | ) 17 | .unwrap(); 18 | msg_buf 19 | .push( 20 | "2cea26f165640d448a9b89f1f871e6fca80a125 21 | 5b1daea6752bf99d8c5f90e706deaecddf304b2bf5a5e72e32b29bc7c54018265 22 | d17317a670ea406fd7e6b485a19f5fb1efe686badb6599d45106b95b55695cd4e 23 | 24729edb312a5dec1bc80e8d8b3ee4b69af1f3a9c801e7fb527e65f7c13c62bb3 24 | 7261c0", 25 | ) 26 | .unwrap(); 27 | set_offsets(&mut msg_buf, 1250); 28 | }) 29 | }); 30 | } 31 | 32 | criterion_group!(benches, bench_message_construct); 33 | criterion_main!(benches); 34 | -------------------------------------------------------------------------------- /benches/segment.rs: -------------------------------------------------------------------------------- 1 | use criterion::{criterion_group, criterion_main, Criterion}; 2 | 3 | use commitlog::{message::MessageBuf, Segment}; 4 | use testutil::TestDir; 5 | 6 | mod testutil { 7 | include!("../src/testutil.rs"); 8 | } 9 | 10 | fn bench_segment_append(c: &mut Criterion) { 11 | let path = TestDir::new(); 12 | 13 | let mut seg = Segment::new(path, 100u64, 1000 * 1024 * 1024).unwrap(); 14 | let payload = b"01234567891011121314151617181920"; 15 | 16 | c.bench_function("segment append", |b| { 17 | b.iter(|| { 18 | let mut buf = MessageBuf::default(); 19 | buf.push(payload).unwrap(); 20 | seg.append(&mut buf).unwrap(); 21 | }) 22 | }); 23 | } 24 | 25 | fn bench_segment_append_flush(c: &mut Criterion) { 26 | let path = TestDir::new(); 27 | 28 | let mut seg = Segment::new(path, 100u64, 1000 * 1024 * 1024).unwrap(); 29 | let payload = b"01234567891011121314151617181920"; 30 | 31 | c.bench_function("segment append flush", |b| { 32 | b.iter(|| { 33 | let mut buf = MessageBuf::default(); 34 | buf.push(payload).unwrap(); 35 | seg.append(&mut buf).unwrap(); 36 | seg.flush_sync().unwrap(); 37 | }) 38 | }); 39 | } 40 | 41 | criterion_group!(benches, bench_segment_append, bench_segment_append_flush); 42 | criterion_main!(benches); 43 | -------------------------------------------------------------------------------- /src/reader.rs: -------------------------------------------------------------------------------- 1 | //! Custom log reading. 2 | use super::message::{MessageBuf, MessageError}; 3 | use std::fs::File; 4 | 5 | /// Trait that allows reading from a slice of the log. 6 | pub trait LogSliceReader { 7 | /// Result type of this reader. 8 | type Result: 'static; 9 | 10 | /// Reads the slice of the file containing the message set. 11 | /// 12 | /// * `file` - The segment file that contains the slice of the log. 13 | /// * `file_position` - The offset within the file that starts the slice. 14 | /// * `bytes` - Total number of bytes, from the offset, that contains the 15 | /// message set slice. 16 | fn read_from( 17 | &mut self, 18 | file: &File, 19 | file_position: u32, 20 | bytes: usize, 21 | ) -> Result; 22 | } 23 | 24 | #[cfg(unix)] 25 | #[derive(Default, Copy, Clone)] 26 | /// Reader of the file segment into memory. 27 | pub struct MessageBufReader; 28 | 29 | impl LogSliceReader for MessageBufReader { 30 | type Result = MessageBuf; 31 | 32 | fn read_from( 33 | &mut self, 34 | file: &File, 35 | file_position: u32, 36 | bytes: usize, 37 | ) -> Result { 38 | use std::os::unix::fs::FileExt; 39 | 40 | let mut vec = vec![0; bytes]; 41 | file.read_at(&mut vec, u64::from(file_position))?; 42 | MessageBuf::from_bytes(vec) 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /benches/index.rs: -------------------------------------------------------------------------------- 1 | use criterion::{black_box, criterion_group, criterion_main, Criterion}; 2 | 3 | use commitlog::{Index, IndexBuf}; 4 | use testutil::TestDir; 5 | 6 | mod testutil { 7 | include!("../src/testutil.rs"); 8 | } 9 | 10 | fn bench_find_exact(c: &mut Criterion) { 11 | let dir = TestDir::new(); 12 | let mut index = Index::new(&dir, 10u64, 9000usize).unwrap(); 13 | 14 | for i in 0..10 { 15 | let mut buf = IndexBuf::new(20, 10u64); 16 | for j in 0..200 { 17 | let off = 10u32 + (i * j); 18 | buf.push(off as u64, off); 19 | } 20 | index.append(buf).unwrap(); 21 | } 22 | 23 | index.flush_sync().unwrap(); 24 | c.bench_function("find extract", |b| { 25 | b.iter(|| { 26 | index.find(black_box(943)).unwrap(); 27 | }) 28 | }); 29 | } 30 | 31 | fn bench_insert_flush(c: &mut Criterion) { 32 | let dir = TestDir::new(); 33 | let mut index = Index::new(&dir, 10u64, 9000usize).unwrap(); 34 | 35 | c.bench_function("insert flush", |b| { 36 | b.iter(|| { 37 | let mut buf = IndexBuf::new(20, 10u64); 38 | for j in 0..20 { 39 | let off = 10u32 + j; 40 | buf.push(off as u64, off); 41 | } 42 | index.append(buf).unwrap(); 43 | index.flush_sync().unwrap(); 44 | }) 45 | }); 46 | } 47 | 48 | criterion_group!(benches, bench_find_exact, bench_insert_flush); 49 | criterion_main!(benches); 50 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Commit Log 2 | 3 | Sequential, disk-backed commit log library for Rust. The library can be used in various higher-level distributed abstractions on top of a distributed log such as [Paxos](https://github.com/zowens/paxos-rs), [Chain Replication](https://github.com/zowens/chain-replication) or Raft. 4 | 5 | [![Crates.io](https://img.shields.io/crates/v/commitlog.svg?maxAge=2592000)](https://crates.io/crates/commitlog) 6 | [![Docs.rs](https://docs.rs/commitlog/badge.svg)](https://docs.rs/commitlog/) 7 | [![Travis](https://travis-ci.org/zowens/commitlog.svg?branch=master)](https://travis-ci.org/zowens/commitlog/) 8 | 9 | [Documentation](https://docs.rs/commitlog/) 10 | 11 | ## Usage 12 | 13 | First, add this to your `Cargo.toml`: 14 | 15 | ```toml 16 | [dependencies] 17 | commitlog = "0.2" 18 | ``` 19 | 20 | ```rust 21 | use commitlog::*; 22 | use commitlog::message::*; 23 | 24 | fn main() { 25 | // open a directory called 'log' for segment and index storage 26 | let opts = LogOptions::new("log"); 27 | let mut log = CommitLog::new(opts).unwrap(); 28 | 29 | // append to the log 30 | log.append_msg("hello world").unwrap(); // offset 0 31 | log.append_msg("second message").unwrap(); // offset 1 32 | 33 | // read the messages 34 | let messages = log.read(0, ReadLimit::default()).unwrap(); 35 | for msg in messages.iter() { 36 | println!("{} - {}", msg.offset(), String::from_utf8_lossy(msg.payload())); 37 | } 38 | 39 | // prints: 40 | // 0 - hello world 41 | // 1 - second message 42 | } 43 | 44 | ``` 45 | 46 | ## Prior Art 47 | 48 | - [Apache Kafka](https://kafka.apache.org/) 49 | - [Jocko](https://github.com/travisjeffery/jocko) + [EXCELLENT Blog Post](https://medium.com/the-hoard/how-kafkas-storage-internals-work-3a29b02e026) 50 | -------------------------------------------------------------------------------- /examples/bulk.rs: -------------------------------------------------------------------------------- 1 | use commitlog::{message::*, *}; 2 | use std::time::{self, SystemTime}; 3 | 4 | const BATCH_SIZE: u32 = 200; 5 | const BATCHES: u32 = 10_000; 6 | 7 | fn main() { 8 | env_logger::init(); 9 | 10 | // open a directory called 'log' for segment and index storage 11 | let opts = LogOptions::new(format!( 12 | ".log{}", 13 | SystemTime::now() 14 | .duration_since(time::UNIX_EPOCH) 15 | .unwrap() 16 | .as_secs() 17 | )); 18 | let mut log = CommitLog::new(opts).unwrap(); 19 | 20 | let start = SystemTime::now(); 21 | for i in 0..BATCHES { 22 | let mut buf = (0..BATCH_SIZE) 23 | .map(|j| format!("{}-{}", i, j)) 24 | .collect::(); 25 | log.append(&mut buf).expect("Unable to append batch"); 26 | 27 | if i == 99 || i == 50 { 28 | log.flush().expect("Unable to flush"); 29 | } 30 | } 31 | 32 | let end = SystemTime::now(); 33 | println!( 34 | "Appended {} messages in {:?}", 35 | BATCH_SIZE * BATCHES, 36 | end.duration_since(start) 37 | ); 38 | 39 | // read the log 40 | let start = SystemTime::now(); 41 | let mut total = 0; 42 | let mut iterations = 0; 43 | let mut pos = 0; 44 | loop { 45 | let entries = log 46 | .read(pos, ReadLimit::max_bytes(10_240)) 47 | .expect("Unable to read messages from the log"); 48 | match entries.iter().last().map(|m| m.offset()) { 49 | Some(off) => { 50 | iterations += 1; 51 | total += entries.len(); 52 | assert!(pos < off); 53 | pos = off + 1; 54 | } 55 | None => { 56 | let end = SystemTime::now(); 57 | println!( 58 | "Read {} messages in {:?}, {} iterations", 59 | total, 60 | end.duration_since(start), 61 | iterations 62 | ); 63 | break; 64 | } 65 | } 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /benches/log-append.rs: -------------------------------------------------------------------------------- 1 | use criterion::{criterion_group, criterion_main, Criterion}; 2 | 3 | use commitlog::{message::MessageBuf, CommitLog, LogOptions}; 4 | use testutil::TestDir; 5 | 6 | mod testutil { 7 | include!("../src/testutil.rs"); 8 | } 9 | 10 | fn commitlog_append_10000(c: &mut Criterion) { 11 | let dir = TestDir::new(); 12 | let mut log = CommitLog::new(LogOptions::new(&dir)).unwrap(); 13 | 14 | c.bench_function("append 10000", |b| { 15 | b.iter(|| { 16 | for _ in 0..10_000 { 17 | log.append_msg( 18 | "719c3b4556066a1c7a06c9d55959d003d9b46273aabe2 \ 19 | eae15ef4ba78321ae2a68b0997a4abbd035a4cdbc8b27d701089a5af63a8b \ 20 | 81f9dc16a874d0eda0983b79c1a6f79fe3ae61612ba2558562a85595f2f3f \ 21 | 07fab8faba1b849685b61aad6b131b7041ca79cc662b4c5aad4d1b78fb103 \ 22 | 4fafa2fe4f30207395e399c6d724", 23 | ) 24 | .unwrap(); 25 | } 26 | log.flush().unwrap(); 27 | }) 28 | }); 29 | } 30 | 31 | fn commitlog_append_10000_batched(c: &mut Criterion) { 32 | let dir = TestDir::new(); 33 | let mut log = CommitLog::new(LogOptions::new(&dir)).unwrap(); 34 | 35 | c.bench_function("append 10000 batched", |b| { 36 | b.iter(|| { 37 | let mut buf = MessageBuf::default(); 38 | for _ in 0..200 { 39 | for _ in 0..50 { 40 | buf.push( 41 | "719c3b4556066a1c7a06c9d55959d003d9b46273aabe2 \ 42 | eae15ef4ba78321ae2a68b0997a4abbd035a4cdbc8b27d701089a5af63a8b \ 43 | 81f9dc16a874d0eda0983b79c1a6f79fe3ae61612ba2558562a85595f2f3f \ 44 | 07fab8faba1b849685b61aad6b131b7041ca79cc662b4c5aad4d1b78fb103 \ 45 | 4fafa2fe4f30207395e399c6d724", 46 | ) 47 | .unwrap(); 48 | } 49 | log.append(&mut buf).unwrap(); 50 | unsafe { 51 | buf.unsafe_clear(); 52 | } 53 | } 54 | log.flush().unwrap(); 55 | }) 56 | }); 57 | } 58 | 59 | criterion_group!( 60 | benches, 61 | commitlog_append_10000, 62 | commitlog_append_10000_batched 63 | ); 64 | criterion_main!(benches); 65 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: build 2 | on: 3 | push: 4 | branches: [ master ] 5 | pull_request: 6 | branches: [ master ] 7 | env: 8 | CARGO_TERM_COLOR: always 9 | jobs: 10 | build: 11 | runs-on: ${{matrix.os}} 12 | strategy: 13 | matrix: 14 | include: 15 | - build: linux 16 | os: ubuntu-latest 17 | rust: stable 18 | target: x86_64-unknown-linux-musl 19 | cross: false 20 | - build: linux 21 | os: ubuntu-latest 22 | rust: nightly 23 | target: x86_64-unknown-linux-musl 24 | cross: false 25 | - build: aarch64 26 | os: ubuntu-latest 27 | rust: stable 28 | target: aarch64-unknown-linux-gnu 29 | linker: gcc-aarch64-linux-gnu 30 | cross: true 31 | - build: aarch64 32 | os: ubuntu-latest 33 | rust: nightly 34 | target: aarch64-unknown-linux-gnu 35 | linker: gcc-aarch64-linux-gnu 36 | cross: true 37 | steps: 38 | - uses: actions/checkout@v2 39 | with: 40 | fetch-depth: 1 41 | - name: Cache 42 | uses: actions/cache@v2 43 | with: 44 | path: | 45 | ~/.cargo/registry 46 | ~/.cargo/git 47 | ~/.rustup 48 | target 49 | key: ${{ runner.os }}-${{ matrix.rust }} 50 | - name: Install Linker 51 | if: matrix.cross 52 | run: | 53 | sudo apt update 54 | sudo apt install ${{ matrix.linker }} 55 | - name: Install Rust 56 | run: | 57 | rustup install ${{ matrix.rust }} 58 | rustup target add ${{ matrix.target }} 59 | rustup show 60 | - name: Build 61 | run: cargo build --verbose --target ${{ matrix.target }} 62 | - name: Run tests 63 | run: cargo test --verbose 64 | fmt: 65 | name: Rustfmt 66 | runs-on: ubuntu-latest 67 | steps: 68 | - uses: actions/checkout@v2 69 | - uses: actions-rs/toolchain@v1 70 | with: 71 | profile: minimal 72 | toolchain: stable 73 | override: true 74 | - run: rustup component add rustfmt 75 | - uses: actions-rs/cargo@v1 76 | with: 77 | command: fmt 78 | args: --all -- --check 79 | clippy: 80 | name: Clippy 81 | runs-on: ubuntu-latest 82 | steps: 83 | - uses: actions/checkout@v2 84 | - uses: actions-rs/toolchain@v1 85 | with: 86 | toolchain: stable 87 | components: clippy 88 | override: true 89 | - uses: actions-rs/clippy-check@v1 90 | with: 91 | token: ${{ secrets.GITHUB_TOKEN }} 92 | args: --all-features 93 | name: Clippy Output 94 | -------------------------------------------------------------------------------- /src/file_set.rs: -------------------------------------------------------------------------------- 1 | use log::{error, info, trace, warn}; 2 | use std::{ 3 | fs, io, 4 | mem::{replace, swap}, 5 | }; 6 | 7 | use super::{index::*, segment::*, LogOptions, Offset}; 8 | use std::collections::BTreeMap; 9 | 10 | pub struct FileSet { 11 | active: (Index, Segment), 12 | closed: BTreeMap, 13 | opts: LogOptions, 14 | } 15 | 16 | impl FileSet { 17 | pub fn load_log(opts: LogOptions) -> io::Result { 18 | let mut segments = BTreeMap::new(); 19 | let mut indexes = BTreeMap::new(); 20 | 21 | let files = fs::read_dir(&opts.log_dir)? 22 | // ignore Err results 23 | .filter_map(|e| e.ok()) 24 | // ignore directories 25 | .filter(|e| e.metadata().map(|m| m.is_file()).unwrap_or(false)); 26 | 27 | for f in files { 28 | match f.path().extension() { 29 | Some(ext) if SEGMENT_FILE_NAME_EXTENSION.eq(ext) => { 30 | let segment = match Segment::open(f.path(), opts.log_max_bytes) { 31 | Ok(seg) => seg, 32 | Err(e) => { 33 | error!("Unable to open segment {:?}: {}", f.path(), e); 34 | return Err(e); 35 | } 36 | }; 37 | 38 | let offset = segment.starting_offset(); 39 | segments.insert(offset, segment); 40 | } 41 | Some(ext) if INDEX_FILE_NAME_EXTENSION.eq(ext) => { 42 | let index = match Index::open(f.path()) { 43 | Ok(ind) => ind, 44 | Err(e) => { 45 | error!("Unable to open index {:?}: {}", f.path(), e); 46 | return Err(e); 47 | } 48 | }; 49 | 50 | let offset = index.starting_offset(); 51 | indexes.insert(offset, index); 52 | // TODO: fix missing index updates (crash before write to 53 | // index) 54 | } 55 | _ => {} 56 | } 57 | } 58 | 59 | // pair up the index and segments (there should be an index per segment) 60 | let mut closed = segments 61 | .into_iter() 62 | .map(move |(i, s)| { 63 | match indexes.remove(&i) { 64 | Some(v) => (i, (v, s)), 65 | None => { 66 | // TODO: create the index from the segment 67 | panic!("No index found for segment starting at {}", i); 68 | } 69 | } 70 | }) 71 | .collect::>(); 72 | 73 | // try to reuse the last index if it is not full. otherwise, open a new index 74 | // at the correct offset 75 | let last_entry = closed.keys().next_back().cloned(); 76 | let (ind, seg) = match last_entry { 77 | Some(off) => { 78 | info!("Reusing index and segment starting at offset {}", off); 79 | closed.remove(&off).unwrap() 80 | } 81 | None => { 82 | info!("Starting new index and segment at offset 0"); 83 | let ind = Index::new(&opts.log_dir, 0, opts.index_max_bytes)?; 84 | let seg = Segment::new(&opts.log_dir, 0, opts.log_max_bytes)?; 85 | (ind, seg) 86 | } 87 | }; 88 | 89 | // mark all closed indexes as readonly (indexes are not opened as readonly) 90 | for &mut (ref mut ind, _) in closed.values_mut() { 91 | ind.set_readonly()?; 92 | } 93 | 94 | Ok(FileSet { 95 | active: (ind, seg), 96 | closed, 97 | opts, 98 | }) 99 | } 100 | 101 | pub fn active_segment_mut(&mut self) -> &mut Segment { 102 | &mut self.active.1 103 | } 104 | 105 | pub fn active_index_mut(&mut self) -> &mut Index { 106 | &mut self.active.0 107 | } 108 | 109 | pub fn active_index(&self) -> &Index { 110 | &self.active.0 111 | } 112 | 113 | pub fn find(&self, offset: u64) -> &(Index, Segment) { 114 | let active_seg_start_off = self.active.0.starting_offset(); 115 | if offset < active_seg_start_off { 116 | trace!( 117 | "Index is contained in the active index for offset {}", 118 | offset 119 | ); 120 | if let Some(entry) = self.closed.range(..=offset).next_back().map(|p| p.1) { 121 | return entry; 122 | } 123 | } 124 | &self.active 125 | } 126 | 127 | pub fn roll_segment(&mut self) -> io::Result<()> { 128 | self.active.0.set_readonly()?; 129 | self.active.1.flush_sync()?; 130 | 131 | let next_offset = self.active.0.next_offset(); 132 | 133 | info!("Starting new segment and index at offset {}", next_offset); 134 | 135 | // set the segment and index to the new active index/seg 136 | let mut p = { 137 | let seg = Segment::new(&self.opts.log_dir, next_offset, self.opts.log_max_bytes)?; 138 | let ind = Index::new(&self.opts.log_dir, next_offset, self.opts.index_max_bytes)?; 139 | (ind, seg) 140 | }; 141 | swap(&mut p, &mut self.active); 142 | self.closed.insert(p.1.starting_offset(), p); 143 | Ok(()) 144 | } 145 | 146 | pub fn remove_after(&mut self, offset: u64) -> Vec<(Index, Segment)> { 147 | if offset >= self.active.0.starting_offset() { 148 | return vec![]; 149 | } 150 | 151 | // find the midpoint 152 | // 153 | // E.g: 154 | // offset = 6 155 | // [0 5 10 15] => split key 5 156 | // 157 | // midpoint is then used as the active index/segment pair 158 | let split_key = match self 159 | .closed 160 | .range(..=offset) 161 | .next_back() 162 | .map(|p| p.0) 163 | .cloned() 164 | { 165 | Some(key) => { 166 | trace!("File set split key for truncation {}", key); 167 | key 168 | } 169 | None => { 170 | warn!("Split key before offset {} found", offset); 171 | return vec![]; 172 | } 173 | }; 174 | 175 | // split off the range of close segment/index pairs including 176 | // the midpoint (which will become the new active index/segment) 177 | let mut after = self.closed.split_off(&split_key); 178 | 179 | let mut active = after.remove(&split_key).unwrap(); 180 | trace!( 181 | "Setting active to segment starting {}", 182 | active.0.starting_offset() 183 | ); 184 | assert!(active.0.starting_offset() <= offset); 185 | 186 | swap(&mut active, &mut self.active); 187 | 188 | let mut pairs = after.into_iter().map(|p| p.1).collect::>(); 189 | pairs.push(active); 190 | pairs 191 | } 192 | 193 | pub fn remove_before(&mut self, offset: u64) -> Vec<(Index, Segment)> { 194 | // split such that self.closed contains [..offset), suffix=[offset,...] 195 | let split_point = { 196 | match self 197 | .closed 198 | .range(..=offset) 199 | .next_back() 200 | .map(|e| e.0) 201 | .cloned() 202 | { 203 | Some(off) => off, 204 | None => return vec![], 205 | } 206 | }; 207 | 208 | let suffix = self.closed.split_off(&split_point); 209 | 210 | // put the suffix back into the closed segments 211 | let prefix = replace(&mut self.closed, suffix); 212 | prefix.into_values().collect() 213 | } 214 | 215 | pub fn log_options(&self) -> &LogOptions { 216 | &self.opts 217 | } 218 | 219 | /// First offset written. This may not be 0 due to removal of the start of 220 | /// the log 221 | pub fn min_offset(&self) -> Option { 222 | if let Some(v) = self.closed.keys().next() { 223 | Some(*v) 224 | } else if !self.active.0.is_empty() { 225 | Some(self.active.0.starting_offset()) 226 | } else { 227 | None 228 | } 229 | } 230 | } 231 | -------------------------------------------------------------------------------- /src/segment.rs: -------------------------------------------------------------------------------- 1 | use super::{reader::*, *}; 2 | use std::{ 3 | fs::{self, File, OpenOptions}, 4 | io::{self, Write}, 5 | os::unix::fs::FileExt, 6 | path::{Path, PathBuf}, 7 | }; 8 | 9 | /// Number of bytes contained in the base name of the file. 10 | pub static SEGMENT_FILE_NAME_LEN: usize = 20; 11 | /// File extension for the segment file. 12 | pub static SEGMENT_FILE_NAME_EXTENSION: &str = "log"; 13 | 14 | /// Magic that appears in the header of the segment for version 1. 15 | /// 16 | /// There are a couple reasons for the magic. The primary reason is 17 | /// to allow versioning, when the time comes. The second is to remove 18 | /// the possibility of a 0 offset within the index. This helps to identity 19 | /// the start of new index entries. 20 | pub static VERSION_1_MAGIC: [u8; 2] = [0xff, 0xff]; 21 | 22 | #[derive(Debug)] 23 | pub enum SegmentAppendError { 24 | LogFull, 25 | IoError(io::Error), 26 | } 27 | 28 | impl From for SegmentAppendError { 29 | #[inline] 30 | fn from(e: io::Error) -> SegmentAppendError { 31 | SegmentAppendError::IoError(e) 32 | } 33 | } 34 | 35 | pub struct AppendMetadata { 36 | pub starting_position: usize, 37 | } 38 | 39 | /// A segment is a portion of the commit log. Segments are append-only logs 40 | /// written until the maximum size is reached. 41 | pub struct Segment { 42 | /// File descriptor 43 | file: File, 44 | 45 | /// Path to the file 46 | path: PathBuf, 47 | 48 | /// Base offset of the log 49 | base_offset: u64, 50 | 51 | /// current file position for the write 52 | write_pos: usize, 53 | 54 | /// Maximum number of bytes permitted to be appended to the log 55 | max_bytes: usize, 56 | } 57 | 58 | impl Segment { 59 | pub fn new

(log_dir: P, base_offset: u64, max_bytes: usize) -> io::Result 60 | where 61 | P: AsRef, 62 | { 63 | let log_path = { 64 | // the log is of the form BASE_OFFSET.log 65 | let mut path_buf = PathBuf::new(); 66 | path_buf.push(&log_dir); 67 | path_buf.push(format!("{:020}", base_offset)); 68 | path_buf.set_extension(SEGMENT_FILE_NAME_EXTENSION); 69 | path_buf 70 | }; 71 | 72 | let mut f = OpenOptions::new() 73 | .read(true) 74 | .create_new(true) 75 | .append(true) 76 | .open(&log_path)?; 77 | 78 | // add the magic 79 | f.write_all(&VERSION_1_MAGIC)?; 80 | 81 | Ok(Segment { 82 | file: f, 83 | path: log_path, 84 | base_offset, 85 | write_pos: 2, 86 | max_bytes, 87 | }) 88 | } 89 | 90 | pub fn open

(seg_path: P, max_bytes: usize) -> io::Result 91 | where 92 | P: AsRef, 93 | { 94 | let seg_file = OpenOptions::new() 95 | .read(true) 96 | .write(true) 97 | .append(true) 98 | .open(&seg_path)?; 99 | 100 | let filename = seg_path.as_ref().file_name().unwrap().to_str().unwrap(); 101 | let base_offset = match (&filename[0..SEGMENT_FILE_NAME_LEN]).parse::() { 102 | Ok(v) => v, 103 | Err(_) => { 104 | return Err(io::Error::new( 105 | io::ErrorKind::InvalidData, 106 | "Segment file name does not parse as u64", 107 | )); 108 | } 109 | }; 110 | 111 | let meta = seg_file.metadata()?; 112 | 113 | // check the magic 114 | { 115 | let mut bytes = [0u8; 2]; 116 | let size = seg_file.read_at(&mut bytes, 0)?; 117 | if size < 2 || bytes != VERSION_1_MAGIC { 118 | return Err(io::Error::new( 119 | io::ErrorKind::InvalidData, 120 | format!( 121 | "Segment file {} does not contain Version 1 \ 122 | magic", 123 | filename 124 | ), 125 | )); 126 | } 127 | } 128 | 129 | info!("Opened segment {}", filename); 130 | 131 | Ok(Segment { 132 | file: seg_file, 133 | path: seg_path.as_ref().to_path_buf(), 134 | write_pos: meta.len() as usize, 135 | base_offset, 136 | max_bytes, 137 | }) 138 | } 139 | 140 | pub fn size(&self) -> usize { 141 | self.write_pos 142 | } 143 | 144 | #[inline] 145 | pub fn starting_offset(&self) -> u64 { 146 | self.base_offset 147 | } 148 | 149 | pub fn append( 150 | &mut self, 151 | payload: &T, 152 | ) -> Result { 153 | // ensure we have the capacity 154 | let payload_len = payload.bytes().len(); 155 | if payload_len + self.write_pos > self.max_bytes { 156 | return Err(SegmentAppendError::LogFull); 157 | } 158 | 159 | let meta = AppendMetadata { 160 | starting_position: self.write_pos, 161 | }; 162 | 163 | self.file.write_all(payload.bytes())?; 164 | self.write_pos += payload_len; 165 | Ok(meta) 166 | } 167 | 168 | pub fn flush_sync(&mut self) -> io::Result<()> { 169 | self.file.flush() 170 | } 171 | 172 | pub fn read_slice( 173 | &self, 174 | reader: &mut T, 175 | file_pos: u32, 176 | bytes: u32, 177 | ) -> Result { 178 | reader.read_from(&self.file, file_pos, bytes as usize) 179 | } 180 | 181 | /// Removes the segment file. 182 | pub fn remove(self) -> io::Result<()> { 183 | let path = self.path.clone(); 184 | drop(self); 185 | 186 | info!("Removing segment file {}", path.display()); 187 | fs::remove_file(path) 188 | } 189 | 190 | /// Truncates the segment file to desired length. Other methods should 191 | /// ensure that the truncation is at the message boundary. 192 | pub fn truncate(&mut self, length: u32) -> io::Result<()> { 193 | self.file.set_len(u64::from(length))?; 194 | self.write_pos = length as usize; 195 | Ok(()) 196 | } 197 | } 198 | 199 | #[cfg(test)] 200 | mod tests { 201 | use super::{ 202 | super::{message::set_offsets, testutil::*}, 203 | *, 204 | }; 205 | use std::{fs, path::PathBuf}; 206 | 207 | #[test] 208 | pub fn log_append() { 209 | let path = TestDir::new(); 210 | let mut f = Segment::new(path, 0, 1024).unwrap(); 211 | 212 | { 213 | let mut buf = MessageBuf::default(); 214 | buf.push("12345").unwrap(); 215 | let meta = f.append(&mut buf).unwrap(); 216 | assert_eq!(2, meta.starting_position); 217 | } 218 | 219 | { 220 | let mut buf = MessageBuf::default(); 221 | buf.push("66666").unwrap(); 222 | buf.push("77777").unwrap(); 223 | let meta = f.append(&mut buf).unwrap(); 224 | assert_eq!(27, meta.starting_position); 225 | 226 | let mut it = buf.iter(); 227 | let p0 = it.next().unwrap(); 228 | assert_eq!(p0.total_bytes(), 25); 229 | 230 | let p1 = it.next().unwrap(); 231 | assert_eq!(p1.total_bytes(), 25); 232 | } 233 | 234 | f.flush_sync().unwrap(); 235 | } 236 | 237 | #[test] 238 | pub fn log_open() { 239 | let log_dir = TestDir::new(); 240 | 241 | { 242 | let mut f = Segment::new(&log_dir, 0, 1024).unwrap(); 243 | let mut buf = MessageBuf::default(); 244 | buf.push("12345").unwrap(); 245 | buf.push("66666").unwrap(); 246 | f.append(&mut buf).unwrap(); 247 | f.flush_sync().unwrap(); 248 | } 249 | 250 | // open it 251 | { 252 | let mut path_buf = PathBuf::new(); 253 | path_buf.push(&log_dir); 254 | path_buf.push(format!("{:020}", 0)); 255 | path_buf.set_extension(SEGMENT_FILE_NAME_EXTENSION); 256 | 257 | let res = Segment::open(&path_buf, 1024); 258 | assert!(res.is_ok(), "Err {:?}", res.err()); 259 | 260 | let f = res.unwrap(); 261 | assert_eq!(0, f.starting_offset()); 262 | } 263 | } 264 | 265 | #[test] 266 | pub fn log_read() { 267 | let log_dir = TestDir::new(); 268 | let mut f = Segment::new(&log_dir, 0, 1024).unwrap(); 269 | 270 | { 271 | let mut buf = MessageBuf::default(); 272 | buf.push("0123456789").unwrap(); 273 | buf.push("aaaaaaaaaa").unwrap(); 274 | buf.push("abc").unwrap(); 275 | set_offsets(&mut buf, 0); 276 | f.append(&mut buf).unwrap(); 277 | } 278 | 279 | let mut reader = MessageBufReader; 280 | let msgs = f.read_slice(&mut reader, 2, 83).unwrap(); 281 | assert_eq!(3, msgs.len()); 282 | 283 | for (i, m) in msgs.iter().enumerate() { 284 | assert_eq!(i as u64, m.offset()); 285 | } 286 | } 287 | 288 | #[test] 289 | pub fn log_read_with_size_limit() { 290 | let log_dir = TestDir::new(); 291 | let mut f = Segment::new(&log_dir, 0, 1024).unwrap(); 292 | 293 | let mut buf = MessageBuf::default(); 294 | buf.push("0123456789").unwrap(); 295 | buf.push("aaaaaaaaaa").unwrap(); 296 | buf.push("abc").unwrap(); 297 | set_offsets(&mut buf, 0); 298 | let meta = f.append(&mut buf).unwrap(); 299 | 300 | let second_msg_start = { 301 | let mut it = buf.iter(); 302 | let mut pos = meta.starting_position; 303 | pos += it.next().unwrap().total_bytes(); 304 | pos 305 | }; 306 | 307 | // byte max contains message 0 308 | let mut reader = MessageBufReader; 309 | let msgs = f 310 | .read_slice(&mut reader, 2, second_msg_start as u32 - 2) 311 | .unwrap(); 312 | 313 | assert_eq!(1, msgs.len()); 314 | } 315 | 316 | #[test] 317 | pub fn log_read_from_write() { 318 | let log_dir = TestDir::new(); 319 | let mut f = Segment::new(&log_dir, 0, 1024).unwrap(); 320 | 321 | { 322 | let mut buf = MessageBuf::default(); 323 | buf.push("0123456789").unwrap(); 324 | buf.push("aaaaaaaaaa").unwrap(); 325 | buf.push("abc").unwrap(); 326 | set_offsets(&mut buf, 0); 327 | f.append(&mut buf).unwrap(); 328 | } 329 | 330 | let mut reader = MessageBufReader; 331 | let msgs = f.read_slice(&mut reader, 2, 83).unwrap(); 332 | assert_eq!(3, msgs.len()); 333 | 334 | { 335 | let mut buf = MessageBuf::default(); 336 | buf.push("foo").unwrap(); 337 | set_offsets(&mut buf, 3); 338 | f.append(&mut buf).unwrap(); 339 | } 340 | 341 | let msgs = f.read_slice(&mut reader, 2, 106).unwrap(); 342 | assert_eq!(4, msgs.len()); 343 | 344 | for (i, m) in msgs.iter().enumerate() { 345 | assert_eq!(i as u64, m.offset()); 346 | } 347 | } 348 | 349 | #[test] 350 | pub fn log_remove() { 351 | let log_dir = TestDir::new(); 352 | let f = Segment::new(&log_dir, 0, 1024).unwrap(); 353 | 354 | let seg_exists = fs::read_dir(&log_dir) 355 | .unwrap() 356 | .find(|entry| { 357 | let path = entry.as_ref().unwrap().path(); 358 | path.file_name().unwrap() == "00000000000000000000.log" 359 | }) 360 | .is_some(); 361 | assert!(seg_exists, "Segment file does not exist?"); 362 | 363 | f.remove().unwrap(); 364 | 365 | let seg_exists = fs::read_dir(&log_dir) 366 | .unwrap() 367 | .find(|entry| { 368 | let path = entry.as_ref().unwrap().path(); 369 | path.file_name().unwrap() == "00000000000000000000.log" 370 | }) 371 | .is_some(); 372 | assert!(!seg_exists, "Segment file should have been removed"); 373 | } 374 | 375 | #[test] 376 | pub fn log_truncate() { 377 | let log_dir = TestDir::new(); 378 | let mut f = Segment::new(&log_dir, 0, 1024).unwrap(); 379 | 380 | let mut buf = MessageBuf::default(); 381 | buf.push("0123456789").unwrap(); 382 | buf.push("aaaaaaaaaa").unwrap(); 383 | buf.push("abc").unwrap(); 384 | set_offsets(&mut buf, 0); 385 | let meta = f.append(&mut buf).unwrap(); 386 | 387 | let mut reader = MessageBufReader; 388 | let msg_buf = f 389 | .read_slice(&mut reader, 2, f.size() as u32 - 2) 390 | .expect("Read after first append failed"); 391 | assert_eq!(3, msg_buf.len()); 392 | 393 | // find the second message starting point position in the segment 394 | let second_msg_start = { 395 | let mut it = buf.iter(); 396 | let mut pos = meta.starting_position; 397 | pos += it.next().unwrap().total_bytes(); 398 | pos 399 | }; 400 | 401 | // truncate to first message 402 | f.truncate(second_msg_start as u32).unwrap(); 403 | 404 | assert_eq!(second_msg_start, f.size()); 405 | 406 | let size = fs::metadata(&f.path).unwrap().len(); 407 | assert_eq!(second_msg_start as u64, size); 408 | 409 | let meta2 = { 410 | let mut buf = MessageBuf::default(); 411 | buf.push("zzzzzzzzzz").unwrap(); 412 | set_offsets(&mut buf, 1); 413 | f.append(&mut buf).unwrap() 414 | }; 415 | assert_eq!(second_msg_start, meta2.starting_position); 416 | 417 | let size = fs::metadata(&f.path).unwrap().len(); 418 | assert_eq!(f.size() as u64, size); 419 | 420 | // read the log 421 | let mut reader = MessageBufReader; 422 | let msg_buf = f 423 | .read_slice(&mut reader, 2, f.size() as u32 - 2) 424 | .expect("Read after second append failed"); 425 | assert_eq!(2, msg_buf.len()); 426 | } 427 | } 428 | -------------------------------------------------------------------------------- /src/message.rs: -------------------------------------------------------------------------------- 1 | //! Message encoding used for the on-disk format for the log. 2 | use super::Offset; 3 | use byteorder::{ByteOrder, LittleEndian}; 4 | use bytes::BufMut; 5 | use crc32c::{crc32c, crc32c_append}; 6 | use log::trace; 7 | use std::{ 8 | io::{self, Read}, 9 | iter::{FromIterator, IntoIterator}, 10 | u16, 11 | }; 12 | 13 | /// Error for the message encoding or decoding. 14 | #[derive(Debug)] 15 | pub enum MessageError { 16 | /// `std::io` Error 17 | IoError(io::Error), 18 | /// Invalid crc32c hash encountered 19 | InvalidHash, 20 | /// Message payload is mismatched by the size field. 21 | InvalidPayloadLength, 22 | } 23 | 24 | #[derive(Debug, Copy, Clone)] 25 | pub enum MessageSerializationError { 26 | /// The Metdata is too large to serialize 27 | MetadataExceedsLimit, 28 | /// The payload and metadata exceed the buffer size 29 | TotalSizeExceedsBuffer, 30 | } 31 | 32 | impl From for MessageError { 33 | fn from(e: io::Error) -> MessageError { 34 | MessageError::IoError(e) 35 | } 36 | } 37 | 38 | macro_rules! read_n { 39 | ($reader:expr, $buf:expr, $size:expr, $err_msg:expr) => {{ 40 | match $reader.read(&mut $buf) { 41 | Ok(s) if s == $size => (), 42 | Ok(_) => { 43 | return Err(MessageError::IoError(io::Error::new( 44 | io::ErrorKind::UnexpectedEof, 45 | $err_msg, 46 | ))); 47 | } 48 | Err(e) => return Err(MessageError::IoError(e)), 49 | } 50 | }}; 51 | ($reader:expr, $buf:expr, $size:expr) => {{ 52 | match $reader.read(&mut $buf) { 53 | Ok(s) if s == $size => (), 54 | Ok(_) => return Err(MessageError::InvalidPayloadLength), 55 | Err(e) => return Err(MessageError::IoError(e)), 56 | } 57 | }}; 58 | } 59 | 60 | pub const HEADER_SIZE: usize = 20; 61 | 62 | macro_rules! read_header { 63 | (offset, $buf:expr) => { 64 | LittleEndian::read_u64(&$buf[0..8]) 65 | }; 66 | (size, $buf:expr) => { 67 | LittleEndian::read_u32(&$buf[8..12]) 68 | }; 69 | (hash, $buf:expr) => { 70 | LittleEndian::read_u32(&$buf[12..16]) 71 | }; 72 | (meta_size, $buf:expr) => { 73 | LittleEndian::read_u16(&$buf[18..20]) 74 | }; 75 | } 76 | 77 | macro_rules! set_header { 78 | (offset, $buf:expr, $v:expr) => { 79 | LittleEndian::write_u64(&mut $buf[0..8], $v) 80 | }; 81 | (size, $buf:expr, $v:expr) => { 82 | LittleEndian::write_u32(&mut $buf[8..12], $v) 83 | }; 84 | (hash, $buf:expr, $v:expr) => { 85 | LittleEndian::write_u32(&mut $buf[12..16], $v) 86 | }; 87 | (meta_size, $buf:expr, $v:expr) => { 88 | LittleEndian::write_u16(&mut $buf[18..20], $v) 89 | }; 90 | } 91 | 92 | /// Serializes a new message into a buffer 93 | pub fn serialize, P: AsRef<[u8]>>( 94 | mut bytes: B, 95 | offset: u64, 96 | meta: M, 97 | payload: P, 98 | ) -> Result<(), MessageSerializationError> { 99 | let payload_slice = payload.as_ref(); 100 | let meta_slice = meta.as_ref(); 101 | if meta_slice.len() > (u16::MAX) as usize { 102 | return Err(MessageSerializationError::MetadataExceedsLimit); 103 | } 104 | 105 | let append_size = HEADER_SIZE + meta_slice.len() + payload_slice.len(); 106 | if bytes.remaining_mut() < append_size { 107 | return Err(MessageSerializationError::TotalSizeExceedsBuffer); 108 | } 109 | 110 | let mut buf = [0; HEADER_SIZE]; 111 | set_header!(offset, buf, offset); 112 | set_header!(size, buf, (meta_slice.len() + payload_slice.len()) as u32); 113 | set_header!(hash, buf, crc32c_append(crc32c(meta_slice), payload_slice)); 114 | set_header!(meta_size, buf, meta_slice.len() as u16); 115 | 116 | // add the header 117 | bytes.put_slice(&buf); 118 | 119 | // metadata 120 | bytes.put_slice(meta_slice); 121 | 122 | // payload 123 | bytes.put_slice(payload_slice); 124 | 125 | Ok(()) 126 | } 127 | 128 | /// Messages contain finite-sized binary values with an offset from 129 | /// the beginning of the log. 130 | /// 131 | /// | Bytes | Encoding | Value | 132 | /// | --------- | ----------------- | ------------------------------ | 133 | /// | 0-7 | Little Endian u64 | Offset | 134 | /// | 8-11 | Little Endian u32 | Payload and Metadata Size | 135 | /// | 12-15 | Little Endian u32 | CRC32C of payload and metadata | 136 | /// | 16-17 | | Reserved | 137 | /// | m: 18-19 | Little Endian u16 | Size of metadata | 138 | /// | 20-(20+m-1) | | Metadata | 139 | /// | (20+m) | | Payload | 140 | #[derive(Debug)] 141 | pub struct Message<'a> { 142 | bytes: &'a [u8], 143 | } 144 | 145 | impl<'a> Message<'a> { 146 | /// crc32c of the payload. 147 | #[inline] 148 | pub fn hash(&self) -> u32 { 149 | read_header!(hash, self.bytes) 150 | } 151 | 152 | /// Size of the payload. 153 | #[inline] 154 | pub fn size(&self) -> u32 { 155 | read_header!(size, self.bytes) 156 | } 157 | 158 | pub(crate) fn total_bytes(&self) -> usize { 159 | self.bytes.len() 160 | } 161 | 162 | /// Offset of the message in the log. 163 | #[inline] 164 | pub fn offset(&self) -> Offset { 165 | read_header!(offset, self.bytes) 166 | } 167 | 168 | /// Payload of the message. 169 | #[inline] 170 | pub fn payload(&self) -> &[u8] { 171 | &self.bytes[(HEADER_SIZE + self.metadata_size() as usize)..] 172 | } 173 | 174 | /// Size of the metadata bytes. 175 | #[inline] 176 | pub fn metadata_size(&self) -> u16 { 177 | read_header!(meta_size, self.bytes) 178 | } 179 | 180 | /// Metadata bytes of hte message. 181 | #[inline] 182 | pub fn metadata(&self) -> &[u8] { 183 | &self.bytes[HEADER_SIZE..(HEADER_SIZE + self.metadata_size() as usize)] 184 | } 185 | 186 | /// Check that the hash matches the hash of the payload. 187 | #[inline] 188 | pub fn verify_hash(&self) -> bool { 189 | self.hash() == crc32c(&self.bytes[HEADER_SIZE..]) 190 | } 191 | } 192 | 193 | /// Iterator for Messages allowing mutation of offsets 194 | #[derive(Debug)] 195 | pub struct MessageMut<'a> { 196 | bytes: &'a mut [u8], 197 | } 198 | 199 | impl<'a> MessageMut<'a> { 200 | /// crc32c of the payload. 201 | #[inline] 202 | pub fn hash(&self) -> u32 { 203 | read_header!(hash, self.bytes) 204 | } 205 | 206 | /// Size of the payload. 207 | #[inline] 208 | pub fn size(&self) -> u32 { 209 | read_header!(size, self.bytes) 210 | } 211 | 212 | /// Offset of the message in the log. 213 | #[inline] 214 | pub fn offset(&self) -> Offset { 215 | read_header!(offset, self.bytes) 216 | } 217 | 218 | /// Payload of the message. 219 | #[inline] 220 | pub fn payload(&self) -> &[u8] { 221 | &self.bytes[(HEADER_SIZE + self.metadata_size() as usize)..] 222 | } 223 | 224 | /// Size of the metadata bytes. 225 | #[inline] 226 | pub fn metadata_size(&self) -> u16 { 227 | read_header!(meta_size, self.bytes) 228 | } 229 | 230 | /// Metadata bytes of hte message. 231 | #[inline] 232 | pub fn metadata(&self) -> &[u8] { 233 | &self.bytes[HEADER_SIZE..(HEADER_SIZE + self.metadata_size() as usize)] 234 | } 235 | 236 | /// Check that the hash matches the hash of the payload. 237 | #[inline] 238 | pub fn verify_hash(&self) -> bool { 239 | self.hash() == crc32c(&self.bytes[HEADER_SIZE..]) 240 | } 241 | 242 | /// Sets the offset of the message 243 | #[inline] 244 | pub fn set_offset(&mut self, offset: u64) { 245 | set_header!(offset, self.bytes, offset); 246 | } 247 | } 248 | 249 | /// Iterator for `Message` within a `MessageSet`. 250 | pub struct MessageIter<'a> { 251 | bytes: &'a [u8], 252 | } 253 | 254 | impl<'a> Iterator for MessageIter<'a> { 255 | type Item = Message<'a>; 256 | 257 | fn next(&mut self) -> Option> { 258 | if self.bytes.len() < HEADER_SIZE { 259 | return None; 260 | } 261 | 262 | let size = read_header!(size, self.bytes) as usize; 263 | 264 | trace!("message iterator: size {} bytes", size); 265 | assert!(self.bytes.len() >= HEADER_SIZE + size); 266 | 267 | let message_slice = &self.bytes[0..HEADER_SIZE + size]; 268 | self.bytes = &self.bytes[HEADER_SIZE + size..]; 269 | Some(Message { 270 | bytes: message_slice, 271 | }) 272 | } 273 | } 274 | 275 | /// Iterator for `Message` within a `MessageSet`. 276 | pub struct MessageMutIter<'a> { 277 | bytes: &'a mut [u8], 278 | } 279 | 280 | impl<'a> Iterator for MessageMutIter<'a> { 281 | type Item = MessageMut<'a>; 282 | 283 | fn next(&mut self) -> Option> { 284 | let slice = std::mem::take(&mut self.bytes); 285 | if slice.len() < HEADER_SIZE { 286 | return None; 287 | } 288 | 289 | let size = read_header!(size, slice) as usize; 290 | 291 | trace!("message iterator: size {} bytes", size); 292 | assert!(slice.len() >= HEADER_SIZE + size); 293 | 294 | let (message_slice, rest) = slice.split_at_mut(HEADER_SIZE + size); 295 | self.bytes = rest; 296 | Some(MessageMut { 297 | bytes: message_slice, 298 | }) 299 | } 300 | } 301 | 302 | /// Serialized log message set. 303 | /// 304 | /// The bytes must be serialized in the format defined by `Message`. 305 | pub trait MessageSet { 306 | /// Bytes that make up the serialized message set. 307 | fn bytes(&self) -> &[u8]; 308 | 309 | /// Iterator on the messages in the message set. 310 | fn iter(&self) -> MessageIter { 311 | MessageIter { 312 | bytes: self.bytes(), 313 | } 314 | } 315 | 316 | /// Number of messages in the message set. 317 | fn len(&self) -> usize { 318 | self.iter().count() 319 | } 320 | 321 | /// Indicator of whether there are messages within the `MessageSet`. 322 | fn is_empty(&self) -> bool { 323 | self.bytes().is_empty() 324 | } 325 | 326 | /// Verifies the hashes of all the messages, returning the 327 | /// index of a corrupt message when found. 328 | fn verify_hashes(&self) -> Result<(), usize> { 329 | for (i, msg) in self.iter().enumerate() { 330 | if !msg.verify_hash() { 331 | return Err(i); 332 | } 333 | } 334 | Ok(()) 335 | } 336 | } 337 | 338 | /// Message set that can be mutated. 339 | /// 340 | /// The mutation occurs once the `MessageSet` has been appended to the log. The 341 | /// messages will contain the absolute offsets after the append opperation. 342 | pub trait MessageSetMut: MessageSet { 343 | /// Bytes of the buffer for mutation. 344 | fn bytes_mut(&mut self) -> &mut [u8]; 345 | 346 | /// Mutable iterator 347 | fn iter_mut(&mut self) -> MessageMutIter { 348 | MessageMutIter { 349 | bytes: self.bytes_mut(), 350 | } 351 | } 352 | } 353 | 354 | /// Mutable message buffer. 355 | /// 356 | /// The buffer will handle the serialization of the message into the proper 357 | /// format expected by the `CommitLog`. 358 | #[derive(Default)] 359 | pub struct MessageBuf { 360 | bytes: Vec, 361 | len: usize, 362 | } 363 | 364 | impl MessageSet for MessageBuf { 365 | /// Bytes that make up the serialized message set. 366 | fn bytes(&self) -> &[u8] { 367 | &self.bytes 368 | } 369 | 370 | /// Number of messages in the message set. 371 | fn len(&self) -> usize { 372 | self.len 373 | } 374 | } 375 | 376 | impl MessageSetMut for MessageBuf { 377 | fn bytes_mut(&mut self) -> &mut [u8] { 378 | &mut self.bytes 379 | } 380 | } 381 | 382 | impl> FromIterator for MessageBuf { 383 | fn from_iter(iter: T) -> MessageBuf 384 | where 385 | T: IntoIterator, 386 | { 387 | let mut buf = MessageBuf::default(); 388 | for v in iter.into_iter() { 389 | buf.push(v) 390 | .expect("Total size of messages exceeds usize::MAX"); 391 | } 392 | buf 393 | } 394 | } 395 | 396 | impl MessageBuf { 397 | /// Creates a message buffer from a previously serialized vector of bytes. 398 | /// Integrity checking is performed on the vector to ensure that it was 399 | /// properly serialized. 400 | pub fn from_bytes(bytes: Vec) -> Result { 401 | let mut msgs = 0usize; 402 | 403 | // iterate over the bytes to initialize size and ensure we have 404 | // a properly formed message set 405 | { 406 | let mut bytes = bytes.as_slice(); 407 | while !bytes.is_empty() { 408 | // check that the offset, size and hash are present 409 | if bytes.len() < HEADER_SIZE { 410 | return Err(MessageError::InvalidPayloadLength); 411 | } 412 | 413 | let size = read_header!(size, bytes) as usize; 414 | let hash = read_header!(hash, bytes); 415 | 416 | let next_msg_offset = HEADER_SIZE + size; 417 | if bytes.len() < next_msg_offset { 418 | return Err(MessageError::InvalidPayloadLength); 419 | } 420 | 421 | // check the hash 422 | let payload_hash = crc32c(&bytes[HEADER_SIZE..next_msg_offset]); 423 | if payload_hash != hash { 424 | return Err(MessageError::InvalidHash); 425 | } 426 | 427 | // update metadata 428 | msgs += 1; 429 | 430 | // move the slice along 431 | bytes = &bytes[next_msg_offset..]; 432 | } 433 | } 434 | Ok(MessageBuf { bytes, len: msgs }) 435 | } 436 | 437 | /// Clears the message buffer. 438 | pub fn clear(&mut self) { 439 | self.bytes.clear(); 440 | self.len = 0; 441 | } 442 | 443 | /// Clears the message buffer without dropping the contents. 444 | /// 445 | /// # Safety 446 | /// The bytes within the message buffer will remain. Implementations that 447 | /// wish to clear the buffer for security reasons should use `clear()`. 448 | pub unsafe fn unsafe_clear(&mut self) { 449 | self.bytes.set_len(0); 450 | self.len = 0; 451 | } 452 | 453 | /// Moves the underlying serialized bytes into a vector. 454 | pub fn into_bytes(self) -> Vec { 455 | self.bytes 456 | } 457 | 458 | /// Adds a new message to the buffer. 459 | pub fn push>(&mut self, payload: B) -> Result<(), MessageSerializationError> { 460 | // blank offset, expect the log to set the offsets 461 | // empty metadata 462 | let meta = [0u8; 0]; 463 | serialize(&mut self.bytes, 0u64, &meta, payload)?; 464 | self.len += 1; 465 | Ok(()) 466 | } 467 | 468 | /// Adds a new message with metadata. 469 | pub fn push_with_metadata, B: AsRef<[u8]>>( 470 | &mut self, 471 | metadata: M, 472 | payload: B, 473 | ) -> Result<(), MessageSerializationError> { 474 | // blank offset, expect the log to set the offsets 475 | // empty metadata 476 | serialize(&mut self.bytes, 0u64, metadata, payload)?; 477 | self.len += 1; 478 | Ok(()) 479 | } 480 | 481 | /// Reads a single message. The reader is expected to have a full message 482 | /// serialized. 483 | pub fn read(&mut self, reader: &mut R) -> Result<(), MessageError> { 484 | let mut buf = [0; HEADER_SIZE]; 485 | read_n!(reader, buf, HEADER_SIZE, "Unable to read header"); 486 | 487 | let size = read_header!(size, buf) as usize; 488 | let hash = read_header!(hash, buf); 489 | 490 | let mut bytes = vec![0; size]; 491 | read_n!(reader, bytes, size); 492 | 493 | let payload_hash = crc32c(&bytes); 494 | if payload_hash != hash { 495 | return Err(MessageError::InvalidHash); 496 | } 497 | 498 | self.bytes.extend_from_slice(&buf); 499 | self.bytes.extend(bytes); 500 | 501 | self.len += 1; 502 | 503 | Ok(()) 504 | } 505 | } 506 | 507 | /// Mutates the buffer with starting offset 508 | pub fn set_offsets(msg_set: &mut S, starting_offset: u64) { 509 | let mut offset = starting_offset; 510 | 511 | for mut msg in msg_set.iter_mut() { 512 | msg.set_offset(offset); 513 | offset += 1; 514 | } 515 | } 516 | 517 | #[cfg(test)] 518 | mod tests { 519 | use super::*; 520 | use env_logger; 521 | use std::io; 522 | 523 | #[test] 524 | fn message_construction() { 525 | env_logger::try_init().unwrap_or(()); 526 | let mut msg_buf = MessageBuf::default(); 527 | msg_buf.push("123456789").unwrap(); 528 | msg_buf.push("000000000").unwrap(); 529 | 530 | set_offsets(&mut msg_buf, 100); 531 | 532 | let mut msg_it = msg_buf.iter(); 533 | { 534 | let msg = msg_it.next().unwrap(); 535 | assert_eq!(msg.payload(), b"123456789"); 536 | assert_eq!(msg.hash(), 3808858755); 537 | assert_eq!(msg.size(), 9u32); 538 | assert_eq!(msg.offset(), 100); 539 | assert!(msg.verify_hash()); 540 | } 541 | { 542 | let msg = msg_it.next().unwrap(); 543 | assert_eq!(msg.payload(), b"000000000"); 544 | assert_eq!(msg.hash(), 49759193); 545 | assert_eq!(msg.size(), 9u32); 546 | assert_eq!(msg.offset(), 101); 547 | assert!(msg.verify_hash()); 548 | } 549 | 550 | assert!(msg_it.next().is_none()); 551 | } 552 | 553 | #[test] 554 | fn message_read() { 555 | let mut buf = Vec::new(); 556 | serialize(&mut buf, 120, b"", b"123456789").unwrap(); 557 | let mut buf_reader = io::BufReader::new(buf.as_slice()); 558 | 559 | let mut reader = MessageBuf::default(); 560 | let read_msg_result = reader.read(&mut buf_reader); 561 | assert!(read_msg_result.is_ok(), "result = {:?}", read_msg_result); 562 | 563 | let read_msg = reader.iter().next().unwrap(); 564 | assert_eq!(read_msg.payload(), b"123456789"); 565 | assert_eq!(read_msg.size(), 9u32); 566 | assert_eq!(read_msg.offset(), 120); 567 | } 568 | 569 | #[test] 570 | fn message_construction_with_metadata() { 571 | let mut buf = Vec::new(); 572 | serialize(&mut buf, 120, b"123", b"456789").unwrap(); 573 | let res = MessageBuf::from_bytes(buf).unwrap(); 574 | let msg = res.iter().next().unwrap(); 575 | assert_eq!(b"123", msg.metadata()); 576 | assert_eq!(b"456789", msg.payload()); 577 | } 578 | 579 | #[test] 580 | fn message_buf_push_with_meta() { 581 | let mut buf = MessageBuf::default(); 582 | buf.push_with_metadata(b"123", b"456789").unwrap(); 583 | let msg = buf.iter().next().unwrap(); 584 | assert_eq!(b"123", msg.metadata()); 585 | assert_eq!(b"456789", msg.payload()); 586 | } 587 | 588 | #[test] 589 | fn message_read_invalid_hash() { 590 | let mut buf = Vec::new(); 591 | serialize(&mut buf, 120, b"", b"123456789").unwrap(); 592 | // mess with the payload such that the hash does not match 593 | let last_ind = buf.len() - 1; 594 | buf[last_ind] ^= buf[last_ind] + 1; 595 | let mut buf_reader = io::BufReader::new(buf.as_slice()); 596 | 597 | let mut reader = MessageBuf::default(); 598 | let read_msg_result = reader.read(&mut buf_reader); 599 | let matches_invalid_hash = match read_msg_result { 600 | Err(MessageError::InvalidHash) => true, 601 | _ => false, 602 | }; 603 | assert!( 604 | matches_invalid_hash, 605 | "Invalid result, not Hash error. Result = {:?}", 606 | read_msg_result 607 | ); 608 | } 609 | 610 | #[test] 611 | fn message_read_invalid_payload_length() { 612 | let mut buf = Vec::new(); 613 | serialize(&mut buf, 120, b"", b"123456789").unwrap(); 614 | // pop the last byte 615 | buf.pop(); 616 | 617 | let mut buf_reader = io::BufReader::new(buf.as_slice()); 618 | let mut msg_reader = MessageBuf::default(); 619 | let read_msg_result = msg_reader.read(&mut buf_reader); 620 | let matches_invalid_hash = match read_msg_result { 621 | Err(MessageError::InvalidPayloadLength) => true, 622 | _ => false, 623 | }; 624 | assert!( 625 | matches_invalid_hash, 626 | "Invalid result, not Hasherror. Result = {:?}", 627 | read_msg_result 628 | ); 629 | } 630 | 631 | #[test] 632 | pub fn messagebuf_fromiterator() { 633 | let buf = vec!["test", "123"].iter().collect::(); 634 | assert_eq!(2, buf.len()); 635 | } 636 | 637 | #[test] 638 | pub fn messageset_deserialize() { 639 | let bytes = { 640 | let mut buf = MessageBuf::default(); 641 | buf.push("foo").unwrap(); 642 | buf.push("bar").unwrap(); 643 | buf.push("baz").unwrap(); 644 | set_offsets(&mut buf, 10); 645 | buf.into_bytes() 646 | }; 647 | 648 | let bytes_copy = bytes; 649 | 650 | // deserialize it 651 | let res = MessageBuf::from_bytes(bytes_copy); 652 | assert!(res.is_ok()); 653 | let res = res.unwrap(); 654 | assert_eq!(3, res.len()); 655 | 656 | let mut it = res.iter(); 657 | 658 | { 659 | let m0 = it.next().unwrap(); 660 | assert_eq!(10, m0.offset()); 661 | assert_eq!(b"foo", m0.payload()); 662 | } 663 | 664 | { 665 | let m1 = it.next().unwrap(); 666 | assert_eq!(11, m1.offset()); 667 | assert_eq!(b"bar", m1.payload()); 668 | } 669 | 670 | { 671 | let m2 = it.next().unwrap(); 672 | assert_eq!(12, m2.offset()); 673 | assert_eq!(b"baz", m2.payload()); 674 | } 675 | 676 | let n = it.next(); 677 | assert!(n.is_none()); 678 | } 679 | } 680 | -------------------------------------------------------------------------------- /src/index.rs: -------------------------------------------------------------------------------- 1 | use super::Offset; 2 | use byteorder::{ByteOrder, LittleEndian}; 3 | use log::{info, trace, warn}; 4 | use memmap2::MmapMut; 5 | use std::{ 6 | cmp::Ordering, 7 | fs::{self, File, OpenOptions}, 8 | io::{self, Write}, 9 | path::{Path, PathBuf}, 10 | u64, usize, 11 | }; 12 | 13 | /// Number of bytes in each entry pair 14 | pub const INDEX_ENTRY_BYTES: usize = 8; 15 | /// Number of bytes contained in the base name of the file. 16 | pub const INDEX_FILE_NAME_LEN: usize = 20; 17 | /// File extension for the index file. 18 | pub static INDEX_FILE_NAME_EXTENSION: &str = "index"; 19 | 20 | #[inline] 21 | fn binary_search(index: &[u8], f: F) -> usize 22 | where 23 | F: Fn(u32, u32) -> Ordering, 24 | { 25 | assert_eq!(index.len() % INDEX_ENTRY_BYTES, 0); 26 | 27 | let mut i = 0usize; 28 | let mut j = (index.len() / INDEX_ENTRY_BYTES) - 1; 29 | 30 | while i < j { 31 | // grab midpoint 32 | let m = i + ((j - i) / 2); 33 | 34 | // read the relative offset at the midpoint 35 | let mi = m * INDEX_ENTRY_BYTES; 36 | let rel_off = LittleEndian::read_u32(&index[mi..mi + 4]); 37 | let file_pos = LittleEndian::read_u32(&index[mi + 4..mi + 8]); 38 | 39 | match f(rel_off, file_pos) { 40 | Ordering::Equal => return m, 41 | Ordering::Less => { 42 | i = m + 1; 43 | } 44 | Ordering::Greater => { 45 | j = m; 46 | } 47 | } 48 | } 49 | i 50 | } 51 | 52 | macro_rules! entry { 53 | ($mem:ident, $pos:expr) => { 54 | ( 55 | LittleEndian::read_u32(&$mem[($pos)..($pos) + 4]), 56 | LittleEndian::read_u32(&$mem[($pos) + 4..($pos) + 8]), 57 | ) 58 | }; 59 | } 60 | 61 | #[derive(Copy, Clone, PartialEq, Eq, Debug)] 62 | pub enum RangeFindError { 63 | /// The starting offset supplied was not found. 64 | OffsetNotAppended, 65 | /// The offset requested exceeded the max bytes. 66 | MessageExceededMaxBytes, 67 | } 68 | 69 | /// Range within a single segment file of messages. 70 | #[derive(Copy, Clone, PartialEq, Eq, Debug)] 71 | pub struct MessageSetRange { 72 | file_pos: u32, 73 | bytes: u32, 74 | } 75 | 76 | impl MessageSetRange { 77 | pub fn file_position(self) -> u32 { 78 | self.file_pos 79 | } 80 | 81 | pub fn bytes(self) -> u32 { 82 | self.bytes 83 | } 84 | } 85 | 86 | /// An index is a file with pairs of relative offset to file position offset 87 | /// of messages at the relative offset messages. The index is Memory Mapped. 88 | pub struct Index { 89 | file: File, 90 | path: PathBuf, 91 | mmap: MmapMut, 92 | mode: AccessMode, 93 | 94 | /// next starting byte in index file offset to write 95 | next_write_pos: usize, 96 | last_flush_end_pos: usize, 97 | base_offset: u64, 98 | } 99 | 100 | /// Describes the access mode of the index 101 | #[derive(Clone, Copy, Debug, PartialEq, Eq)] 102 | pub enum AccessMode { 103 | /// Only reads are permitted. 104 | Read, 105 | /// This is the active index and can be read or written to. 106 | ReadWrite, 107 | } 108 | 109 | /// Buffer used to amortize writes to the index. 110 | pub struct IndexBuf(Vec, u64); 111 | 112 | impl IndexBuf { 113 | pub fn new(len: usize, starting_offset: u64) -> IndexBuf { 114 | IndexBuf(Vec::with_capacity(len * INDEX_ENTRY_BYTES), starting_offset) 115 | } 116 | 117 | pub fn push(&mut self, abs_offset: u64, position: u32) { 118 | // TODO: assert that the offset is > previous 119 | assert!( 120 | abs_offset >= self.1, 121 | "Attempt to append to an offset before base offset in index" 122 | ); 123 | 124 | let mut tmp_buf = [0u8; INDEX_ENTRY_BYTES]; 125 | LittleEndian::write_u32(&mut tmp_buf[0..4], (abs_offset - self.1) as u32); 126 | LittleEndian::write_u32(&mut tmp_buf[4..], position); 127 | self.0.extend_from_slice(&tmp_buf); 128 | } 129 | } 130 | 131 | #[inline] 132 | fn to_page_size(size: usize) -> usize { 133 | let truncated = size - (size & (page_size::get() - 1)); 134 | assert_eq!(truncated % page_size::get(), 0); 135 | assert!(truncated <= size); 136 | truncated 137 | } 138 | 139 | impl Index { 140 | pub fn new

(log_dir: P, base_offset: u64, file_bytes: usize) -> io::Result 141 | where 142 | P: AsRef, 143 | { 144 | // open the file, expecting to create it 145 | let index_path = { 146 | let mut path_buf = PathBuf::new(); 147 | path_buf.push(&log_dir); 148 | path_buf.push(format!("{:020}", base_offset)); 149 | path_buf.set_extension(INDEX_FILE_NAME_EXTENSION); 150 | path_buf 151 | }; 152 | 153 | info!("Creating index file {:?}", &index_path); 154 | 155 | let index_file = OpenOptions::new() 156 | .read(true) 157 | .write(true) 158 | .append(true) 159 | .create_new(true) 160 | .open(&index_path)?; 161 | 162 | // read the metadata and truncate 163 | let meta = index_file.metadata()?; 164 | let len = meta.len(); 165 | if len == 0 { 166 | index_file.set_len(file_bytes as u64)?; 167 | } 168 | 169 | let mmap = unsafe { MmapMut::map_mut(&index_file)? }; 170 | 171 | Ok(Index { 172 | file: index_file, 173 | path: index_path, 174 | mmap, 175 | mode: AccessMode::ReadWrite, 176 | next_write_pos: 0, 177 | last_flush_end_pos: 0, 178 | base_offset, 179 | }) 180 | } 181 | 182 | pub fn open

(index_path: P) -> io::Result 183 | where 184 | P: AsRef, 185 | { 186 | let index_file = OpenOptions::new() 187 | .read(true) 188 | .write(true) 189 | .append(true) 190 | .open(&index_path)?; 191 | 192 | let filename = index_path.as_ref().file_name().unwrap().to_str().unwrap(); 193 | let base_offset = match (&filename[0..INDEX_FILE_NAME_LEN]).parse::() { 194 | Ok(v) => v, 195 | Err(_) => { 196 | return Err(io::Error::new( 197 | io::ErrorKind::InvalidData, 198 | "Index file name does not parse as u64", 199 | )); 200 | } 201 | }; 202 | 203 | let mmap = unsafe { MmapMut::map_mut(&index_file)? }; 204 | 205 | let (next_write_pos, mode) = { 206 | let index = &mmap[..]; 207 | assert_eq!(index.len() % INDEX_ENTRY_BYTES, 0); 208 | 209 | // check if this is a full or partial index 210 | match entry!(index, index.len() - INDEX_ENTRY_BYTES) { 211 | (0, 0) => { 212 | // partial index, search for break point 213 | let write_pos = INDEX_ENTRY_BYTES 214 | * binary_search(index, |rel_off, file_off| { 215 | // find the first unwritten index entry: 216 | // +#############-----|----------------+ 217 | // written msgs empty msgs 218 | // if the file pos is 0, we're in the empty msgs, go left 219 | // otherwise, we're in the written msgs, go right 220 | // 221 | // NOTE: it is assumed the segment will never start at 0 222 | // since it contains at least 1 magic byte 223 | if file_off == 0 && rel_off == 0 { 224 | Ordering::Greater 225 | } else { 226 | Ordering::Less 227 | } 228 | }); 229 | (write_pos, AccessMode::ReadWrite) 230 | } 231 | _ => (index.len(), AccessMode::Read), 232 | } 233 | }; 234 | 235 | info!( 236 | "Opening index {}, next write pos {}, mode {:?}", 237 | filename, next_write_pos, mode 238 | ); 239 | 240 | Ok(Index { 241 | file: index_file, 242 | path: index_path.as_ref().to_path_buf(), 243 | mmap, 244 | mode, 245 | next_write_pos, 246 | last_flush_end_pos: next_write_pos, 247 | base_offset, 248 | }) 249 | } 250 | 251 | #[inline] 252 | pub fn starting_offset(&self) -> u64 { 253 | self.base_offset 254 | } 255 | 256 | pub fn is_empty(&self) -> bool { 257 | self.next_write_pos == 0 258 | } 259 | 260 | #[inline] 261 | pub fn size(&self) -> usize { 262 | self.mmap.len() 263 | } 264 | 265 | // TODO: use memremap on linux 266 | fn resize(&mut self) -> io::Result<()> { 267 | // increase length by 50% -= 7 for alignment 268 | let new_len = { 269 | let l = self.size(); 270 | let new_size = l + (l / 2); 271 | // align to byte size 272 | new_size - (new_size % INDEX_ENTRY_BYTES) 273 | }; 274 | 275 | // unmap the file (Set to dummy anonymous map) 276 | self.mmap = MmapMut::map_anon(32)?; 277 | self.file.set_len(new_len as u64)?; 278 | self.mmap = unsafe { MmapMut::map_mut(&self.file)? }; 279 | Ok(()) 280 | } 281 | 282 | pub fn append(&mut self, offsets: IndexBuf) -> io::Result<()> { 283 | // TODO: trace 284 | //trace!("Index append: {:?}", abs_offset, position); 285 | 286 | assert_eq!( 287 | self.base_offset, offsets.1, 288 | "Buffer starting offset does not match the index starting offset" 289 | ); 290 | assert_eq!( 291 | self.mode, 292 | AccessMode::ReadWrite, 293 | "Attempt to append to readonly index" 294 | ); 295 | 296 | // check if we need to resize 297 | if self.size() < (self.next_write_pos + offsets.0.len()) { 298 | self.resize()?; 299 | } 300 | 301 | let mem_slice: &mut [u8] = &mut self.mmap[..]; 302 | let start = self.next_write_pos; 303 | let end = start + offsets.0.len(); 304 | 305 | mem_slice[start..end].copy_from_slice(&offsets.0); 306 | 307 | self.next_write_pos = end; 308 | Ok(()) 309 | } 310 | 311 | pub fn set_readonly(&mut self) -> io::Result<()> { 312 | if self.mode != AccessMode::Read { 313 | self.mode = AccessMode::Read; 314 | 315 | // trim un-used entries by reducing mmap view and truncating file 316 | if self.next_write_pos < self.mmap.len() { 317 | // TODO: fix restrict 318 | // self.mmap.restrict(0, self.next_write_pos)?; 319 | if let Err(e) = self.file.set_len(self.next_write_pos as u64) { 320 | warn!( 321 | "Unable to truncate index file {:020}.{} to proper length: {:?}", 322 | self.base_offset, INDEX_FILE_NAME_EXTENSION, e 323 | ); 324 | } 325 | } 326 | 327 | self.flush_sync() 328 | } else { 329 | Ok(()) 330 | } 331 | } 332 | 333 | pub fn remove(self) -> io::Result<()> { 334 | let path = self.path.clone(); 335 | drop(self); 336 | 337 | info!("Removing index file {}", path.display()); 338 | fs::remove_file(path) 339 | } 340 | 341 | /// Truncates to an offset, inclusive. The file length of the 342 | /// segment for truncation is returned. 343 | pub fn truncate(&mut self, offset: Offset) -> Option { 344 | // find the next offset position in order to inform 345 | // the truncation of the segment 346 | let next_pos = match self.find_index_pos(offset + 1) { 347 | Some(i) => { 348 | trace!("Found offset mem offset {}", i); 349 | i 350 | } 351 | None => { 352 | trace!("No offset {} found in index", offset + 1); 353 | return None; 354 | } 355 | }; 356 | 357 | let mem = &mut self.mmap[..]; 358 | 359 | let (off, file_len) = entry!(mem, next_pos); 360 | 361 | // find_index_pos will find the right-most position, which may include 362 | // something <= the offset passed in, which we should reject for 363 | // truncation. This likely occurs when the last offset is the offset 364 | // requested for truncation OR the offset for truncation is > than the 365 | // last offset. 366 | if u64::from(off) + self.base_offset <= offset { 367 | trace!("Truncated to exact segment boundary, no need to truncate segment"); 368 | return None; 369 | } 370 | 371 | trace!( 372 | "Start of truncation at offset {}, to segment length {}", 373 | offset, 374 | file_len 375 | ); 376 | 377 | // override file positions > offset 378 | for elem in &mut mem[next_pos..self.next_write_pos].iter_mut() { 379 | *elem = 0; 380 | } 381 | 382 | // re-adjust the next file pos 383 | self.next_write_pos = next_pos; 384 | 385 | Some(file_len) 386 | } 387 | 388 | /// Flush the index at page boundaries. This may leave some indexed values 389 | /// not flushed during crash, which will be rehydrated on restart. 390 | pub fn flush_sync(&mut self) -> io::Result<()> { 391 | let start = to_page_size(self.last_flush_end_pos); 392 | let end = to_page_size(self.next_write_pos); 393 | 394 | if end > start { 395 | self.mmap.flush_range(start, end - start)?; 396 | self.last_flush_end_pos = end; 397 | self.file.flush() 398 | } else { 399 | Ok(()) 400 | } 401 | } 402 | 403 | pub fn next_offset(&self) -> Offset { 404 | if self.next_write_pos == 0 { 405 | self.base_offset 406 | } else { 407 | let entry = self 408 | .read_entry((self.next_write_pos / INDEX_ENTRY_BYTES) - 1) 409 | .unwrap(); 410 | entry.0 + 1 411 | } 412 | } 413 | 414 | pub fn read_entry(&self, i: usize) -> Option<(Offset, u32)> { 415 | if self.size() < (i + 1) * 8 { 416 | return None; 417 | } 418 | 419 | let mem_slice = &self.mmap[..]; 420 | let start = i * INDEX_ENTRY_BYTES; 421 | let offset = LittleEndian::read_u32(&mem_slice[start..start + 4]); 422 | if offset == 0 && i > 0 { 423 | None 424 | } else { 425 | let pos = LittleEndian::read_u32(&mem_slice[start + 4..start + 8]); 426 | Some((u64::from(offset) + self.base_offset, pos)) 427 | } 428 | } 429 | 430 | /// Finds the index entry corresponding to the offset. 431 | /// 432 | /// If the entry does not exist in the index buy an entry > the offset 433 | /// exists, that entry is used. 434 | /// 435 | /// If the entry does not exist and the last entry is < the desired, 436 | /// the offset has not been written to this index and None value is 437 | /// returned. 438 | #[allow(dead_code)] 439 | pub fn find(&self, offset: Offset) -> Option<(Offset, u32)> { 440 | self.find_index_pos(offset).and_then(|p| { 441 | let mem_slice = &self.mmap[..]; 442 | let (rel_off, file_pos) = entry!(mem_slice, p); 443 | let abs_off = u64::from(rel_off) + self.base_offset; 444 | if abs_off < offset { 445 | None 446 | } else { 447 | Some((abs_off, file_pos)) 448 | } 449 | }) 450 | } 451 | 452 | /// Finds the longest message set range within a single segment aligning to 453 | /// the `max_bytes` parameter. 454 | pub fn find_segment_range( 455 | &self, 456 | offset: Offset, 457 | max_bytes: u32, 458 | seg_bytes: u32, 459 | ) -> Result { 460 | assert!(max_bytes > 0, "Cannot request 0 bytes to be read"); 461 | 462 | // position within the index to start finding a sequence 463 | let start_ind_pos = match self.find_index_pos(offset) { 464 | Some(v) => v, 465 | _ => return Err(RangeFindError::OffsetNotAppended), 466 | }; 467 | 468 | let mem_slice = &self.mmap[..]; 469 | let (_, start_file_pos) = entry!(mem_slice, start_ind_pos); 470 | 471 | // try to get until the end of the segment 472 | if seg_bytes - start_file_pos < max_bytes { 473 | trace!("Requested range contains the rest of the segment, does not exceed max bytes"); 474 | return Ok(MessageSetRange { 475 | file_pos: start_file_pos, 476 | bytes: seg_bytes - start_file_pos, 477 | }); 478 | } 479 | 480 | let search_range = &mem_slice[start_ind_pos..self.next_write_pos]; 481 | if search_range.is_empty() { 482 | return Err(RangeFindError::MessageExceededMaxBytes); 483 | } 484 | 485 | let end_ind_pos = binary_search(search_range, |_, pos| { 486 | (pos - start_file_pos).cmp(&max_bytes) 487 | }); 488 | 489 | let pos = { 490 | // binary search will choose the next entry when the left value is less, and the 491 | // right value is greater and not equal, so fix by grabbing the left 492 | let (_, pos) = entry!(search_range, end_ind_pos * INDEX_ENTRY_BYTES); 493 | if end_ind_pos > 0 && pos - start_file_pos > max_bytes { 494 | trace!("Binary search yielded a range too large, trying entry before"); 495 | let (_, pos) = entry!(search_range, (end_ind_pos - 1) * INDEX_ENTRY_BYTES); 496 | pos 497 | } else { 498 | pos 499 | } 500 | }; 501 | 502 | let bytes = pos - start_file_pos; 503 | if bytes == 0 || bytes > max_bytes { 504 | Err(RangeFindError::MessageExceededMaxBytes) 505 | } else { 506 | trace!("Found slice range {}..{}", start_file_pos, pos); 507 | Ok(MessageSetRange { 508 | file_pos: start_file_pos, 509 | bytes, 510 | }) 511 | } 512 | } 513 | 514 | fn find_index_pos(&self, offset: Offset) -> Option { 515 | if offset < self.base_offset { 516 | // pathological case... not worth exposing Result 517 | return None; 518 | } 519 | 520 | let rel_offset = (offset - self.base_offset) as u32; 521 | 522 | let mem_slice = &self.mmap[..]; 523 | trace!("offset={} Next write pos = {}", offset, self.next_write_pos); 524 | 525 | // attempt to find the offset assuming no truncation 526 | // and fall back to binary search otherwise 527 | if (rel_offset as usize) < self.next_write_pos / INDEX_ENTRY_BYTES { 528 | trace!("Attempting to read offset from exact location"); 529 | // read exact entry 530 | let entry_pos = rel_offset as usize * INDEX_ENTRY_BYTES; 531 | let rel_offset_val = LittleEndian::read_u32(&mem_slice[entry_pos..entry_pos + 4]); 532 | trace!( 533 | "Found relative offset. rel_offset = {}, entry offset = {}", 534 | rel_offset, 535 | rel_offset_val 536 | ); 537 | if rel_offset_val == rel_offset { 538 | return Some(entry_pos); 539 | } 540 | } 541 | 542 | let i = binary_search(&mem_slice[0..self.next_write_pos], |v, _| { 543 | v.cmp(&rel_offset) 544 | }); 545 | trace!("Found offset {} at entry {}", offset, i); 546 | 547 | if i < self.next_write_pos / INDEX_ENTRY_BYTES { 548 | Some(i * INDEX_ENTRY_BYTES) 549 | } else { 550 | None 551 | } 552 | } 553 | } 554 | 555 | #[cfg(test)] 556 | mod tests { 557 | use super::{super::testutil::*, *}; 558 | 559 | use std::{fs, path::PathBuf}; 560 | 561 | #[test] 562 | pub fn index() { 563 | let path = TestDir::new(); 564 | let mut index = Index::new(&path, 9u64, 1000usize).unwrap(); 565 | 566 | assert_eq!(1000, index.size()); 567 | 568 | let mut buf = IndexBuf::new(2, 9u64); 569 | buf.push(11u64, 0xffff); 570 | buf.push(12u64, 0xeeee); 571 | index.append(buf).unwrap(); 572 | index.flush_sync().unwrap(); 573 | 574 | let e0 = index.read_entry(0).unwrap(); 575 | assert_eq!(11u64, e0.0); 576 | assert_eq!(0xffff, e0.1); 577 | 578 | let e1 = index.read_entry(1).unwrap(); 579 | assert_eq!(12u64, e1.0); 580 | assert_eq!(0xeeee, e1.1); 581 | 582 | // read an entry that does not exist 583 | let e2 = index.read_entry(2); 584 | assert_eq!(None, e2); 585 | } 586 | 587 | #[test] 588 | pub fn index_set_readonly() { 589 | let path = TestDir::new(); 590 | let mut index = Index::new(&path, 10u64, 1000usize).unwrap(); 591 | 592 | let mut buf = IndexBuf::new(2, 10u64); 593 | buf.push(11u64, 0xffff); 594 | buf.push(12u64, 0xeeee); 595 | index.append(buf).unwrap(); 596 | index.flush_sync().unwrap(); 597 | 598 | // set_readonly it 599 | index.set_readonly().expect("Unable to set readonly"); 600 | 601 | assert_eq!(AccessMode::Read, index.mode); 602 | 603 | let e1 = index.read_entry(1).unwrap(); 604 | assert_eq!(12u64, e1.0); 605 | assert_eq!(0xeeee, e1.1); 606 | 607 | // read an entry that does not exist 608 | let e2 = index.read_entry(2); 609 | assert_eq!(None, e2); 610 | } 611 | 612 | #[test] 613 | pub fn open_index() { 614 | let dir = TestDir::new(); 615 | // issue some writes 616 | { 617 | let mut index = Index::new(&dir, 10u64, 1000usize).unwrap(); 618 | 619 | { 620 | let mut buf = IndexBuf::new(3, 10u64); 621 | buf.push(10, 0); 622 | buf.push(11, 10); 623 | buf.push(12, 20); 624 | index.append(buf).unwrap(); 625 | } 626 | 627 | { 628 | let mut buf = IndexBuf::new(2, 10u64); 629 | buf.push(13, 30); 630 | buf.push(14, 40); 631 | index.append(buf).unwrap(); 632 | } 633 | 634 | index.flush_sync().unwrap(); 635 | index.set_readonly().unwrap(); 636 | } 637 | 638 | // now open it 639 | { 640 | let mut index_path = PathBuf::new(); 641 | index_path.push(&dir); 642 | index_path.push("00000000000000000010.index"); 643 | 644 | let meta = fs::metadata(&index_path).unwrap(); 645 | assert!(meta.is_file()); 646 | 647 | let index = Index::open(&index_path).unwrap(); 648 | 649 | for i in 0..5usize { 650 | let e = index.read_entry(i); 651 | assert!(e.is_some()); 652 | assert_eq!(e.unwrap().0, (i + 10) as u64); 653 | assert_eq!(e.unwrap().1, (i * 10) as u32); 654 | } 655 | } 656 | } 657 | 658 | #[test] 659 | pub fn open_index_with_one_message() { 660 | let dir = TestDir::new(); 661 | // issue some writes 662 | { 663 | let mut index = Index::new(&dir, 0u64, 1000usize).unwrap(); 664 | 665 | { 666 | let mut buf = IndexBuf::new(1, 0u64); 667 | buf.push(0, 2); 668 | index.append(buf).unwrap(); 669 | } 670 | 671 | index.flush_sync().unwrap(); 672 | } 673 | 674 | // now open it 675 | { 676 | let mut index_path = PathBuf::new(); 677 | index_path.push(&dir); 678 | index_path.push("00000000000000000000.index"); 679 | 680 | let meta = fs::metadata(&index_path).unwrap(); 681 | assert!(meta.is_file()); 682 | 683 | let mut index = Index::open(&index_path).unwrap(); 684 | 685 | // Issue a new write, to make sure we're not overwriting things 686 | { 687 | let mut buf = IndexBuf::new(1, 0u64); 688 | buf.push(1, 3); 689 | index.append(buf).unwrap(); 690 | } 691 | 692 | assert_eq!(index.next_write_pos, 16); 693 | 694 | let e = index.read_entry(0); 695 | assert!(e.is_some()); 696 | assert_eq!(e.unwrap().0, 0_u64); 697 | assert_eq!(e.unwrap().1, 2_u32); 698 | } 699 | } 700 | 701 | #[test] 702 | pub fn open_index_with_one_message_closed() { 703 | let dir = TestDir::new(); 704 | // issue some writes 705 | { 706 | let mut index = Index::new(&dir, 0u64, 1000usize).unwrap(); 707 | 708 | { 709 | let mut buf = IndexBuf::new(1, 0u64); 710 | buf.push(0, 2); 711 | index.append(buf).unwrap(); 712 | } 713 | 714 | index.flush_sync().unwrap(); 715 | index.set_readonly().unwrap(); 716 | } 717 | 718 | // now open it 719 | { 720 | let mut index_path = PathBuf::new(); 721 | index_path.push(&dir); 722 | index_path.push("00000000000000000000.index"); 723 | 724 | let meta = fs::metadata(&index_path).unwrap(); 725 | assert!(meta.is_file()); 726 | 727 | let index = Index::open(&index_path).unwrap(); 728 | assert_eq!(index.next_write_pos, 8); 729 | assert_eq!(AccessMode::Read, index.mode); 730 | 731 | let e = index.read_entry(0); 732 | assert!(e.is_some()); 733 | assert_eq!(e.unwrap().0, 0_u64); 734 | assert_eq!(e.unwrap().1, 2_u32); 735 | } 736 | } 737 | 738 | #[test] 739 | pub fn find() { 740 | let dir = TestDir::new(); 741 | let mut index = Index::new(&dir, 10u64, 1000usize).unwrap(); 742 | let mut buf = IndexBuf::new(8, 10u64); 743 | buf.push(10, 1); 744 | buf.push(11, 2); 745 | buf.push(12, 3); 746 | buf.push(15, 4); 747 | buf.push(16, 5); 748 | buf.push(17, 6); 749 | buf.push(18, 7); 750 | buf.push(20, 8); 751 | index.append(buf).unwrap(); 752 | 753 | let res = index.find(16).unwrap(); 754 | assert_eq!(16, res.0); 755 | assert_eq!(5, res.1); 756 | } 757 | 758 | #[test] 759 | pub fn find_exact() { 760 | env_logger::try_init().unwrap_or(()); 761 | 762 | let dir = TestDir::new(); 763 | let mut index = Index::new(&dir, 10u64, 1000usize).unwrap(); 764 | let mut buf = IndexBuf::new(8, 10u64); 765 | buf.push(10, 1); 766 | buf.push(11, 2); 767 | buf.push(12, 3); 768 | buf.push(13, 4); 769 | buf.push(14, 5); 770 | buf.push(15, 6); 771 | buf.push(16, 7); 772 | buf.push(17, 8); 773 | index.append(buf).unwrap(); 774 | 775 | let res = index.find(16).unwrap(); 776 | assert_eq!(16, res.0); 777 | assert_eq!(7, res.1); 778 | } 779 | 780 | #[test] 781 | pub fn find_nonexistant_value_finds_next() { 782 | let dir = TestDir::new(); 783 | let mut index = Index::new(&dir, 10u64, 1000usize).unwrap(); 784 | let mut buf = IndexBuf::new(8, 10u64); 785 | buf.push(10, 1); 786 | buf.push(11, 2); 787 | buf.push(12, 3); 788 | buf.push(15, 4); 789 | buf.push(16, 5); 790 | buf.push(17, 6); 791 | buf.push(18, 7); 792 | buf.push(20, 8); 793 | index.append(buf).unwrap(); 794 | 795 | let res = index.find(14).unwrap(); 796 | assert_eq!(15, res.0); 797 | assert_eq!(4, res.1); 798 | } 799 | 800 | #[test] 801 | pub fn find_nonexistant_value_greater_than_max() { 802 | let dir = TestDir::new(); 803 | let mut index = Index::new(&dir, 10u64, 1000usize).unwrap(); 804 | let mut buf = IndexBuf::new(8, 10u64); 805 | buf.push(10, 1); 806 | buf.push(11, 2); 807 | buf.push(12, 3); 808 | buf.push(15, 4); 809 | buf.push(16, 5); 810 | buf.push(17, 6); 811 | buf.push(18, 7); 812 | buf.push(20, 8); 813 | index.append(buf).unwrap(); 814 | 815 | let res = index.find(21); 816 | assert!(res.is_none()); 817 | } 818 | 819 | #[test] 820 | pub fn find_out_of_bounds() { 821 | let dir = TestDir::new(); 822 | let mut index = Index::new(&dir, 10u64, 1000usize).unwrap(); 823 | let mut buf = IndexBuf::new(8, 10u64); 824 | buf.push(10, 1); 825 | buf.push(11, 2); 826 | buf.push(12, 3); 827 | buf.push(15, 4); 828 | buf.push(16, 5); 829 | buf.push(17, 6); 830 | buf.push(18, 7); 831 | buf.push(20, 8); 832 | index.append(buf).unwrap(); 833 | 834 | let res = index.find(2); 835 | assert!(res.is_none()); 836 | } 837 | 838 | #[test] 839 | pub fn reopen_partial_index() { 840 | env_logger::try_init().unwrap_or(()); 841 | let dir = TestDir::new(); 842 | { 843 | let mut index = Index::new(&dir, 10u64, 1000usize).unwrap(); 844 | let mut buf = IndexBuf::new(8, 10u64); 845 | buf.push(10, 1); 846 | buf.push(11, 2); 847 | index.append(buf).unwrap(); 848 | index.flush_sync().unwrap(); 849 | } 850 | 851 | { 852 | let mut index_path = PathBuf::new(); 853 | index_path.push(&dir); 854 | index_path.push("00000000000000000010.index"); 855 | let index = Index::open(&index_path).unwrap(); 856 | 857 | let e0 = index.find(10); 858 | assert!(e0.is_some()); 859 | assert_eq!(10, e0.unwrap().0); 860 | 861 | let e1 = index.find(11); 862 | assert!(e1.is_some()); 863 | assert_eq!(11, e1.unwrap().0); 864 | 865 | let e2 = index.find(12); 866 | assert!(e2.is_none()); 867 | 868 | assert_eq!(12, index.next_offset()); 869 | 870 | // assert_eq!(16, index.size()); 871 | assert_eq!(AccessMode::ReadWrite, index.mode); 872 | } 873 | } 874 | 875 | #[test] 876 | pub fn reopen_full_index() { 877 | env_logger::try_init().unwrap_or(()); 878 | let dir = TestDir::new(); 879 | { 880 | let mut index = Index::new(&dir, 10u64, 16usize).unwrap(); 881 | let mut buf = IndexBuf::new(2, 10u64); 882 | buf.push(10, 1); 883 | buf.push(11, 2); 884 | index.append(buf).unwrap(); 885 | index.flush_sync().unwrap(); 886 | } 887 | 888 | { 889 | let mut index_path = PathBuf::new(); 890 | index_path.push(&dir); 891 | index_path.push("00000000000000000010.index"); 892 | let index = Index::open(&index_path).unwrap(); 893 | 894 | let e0 = index.find(10); 895 | assert!(e0.is_some()); 896 | assert_eq!(10, e0.unwrap().0); 897 | 898 | let e1 = index.find(11); 899 | assert!(e1.is_some()); 900 | assert_eq!(11, e1.unwrap().0); 901 | 902 | let e2 = index.find(12); 903 | assert!(e2.is_none()); 904 | 905 | assert_eq!(12, index.next_offset()); 906 | } 907 | } 908 | 909 | #[test] 910 | fn find_segment_range_offset() { 911 | env_logger::try_init().unwrap_or(()); 912 | let dir = TestDir::new(); 913 | let mut index = Index::new(&dir, 10u64, 40usize).unwrap(); 914 | // ----- 915 | // INSERTION POINT 916 | // => 5 messages, each 10 bytes 917 | // ----- 918 | let mut buf = IndexBuf::new(5, 10u64); 919 | buf.push(10, 10); 920 | buf.push(11, 20); 921 | buf.push(12, 30); 922 | buf.push(13, 40); 923 | buf.push(14, 50); 924 | index.append(buf).unwrap(); 925 | 926 | // test offset not in index 927 | let res = index.find_segment_range(9, 50, 60); 928 | assert_eq!(Err(RangeFindError::OffsetNotAppended), res); 929 | 930 | // test message exceeds max bytes 931 | let res = index.find_segment_range(10, 5, 60); 932 | assert_eq!(Err(RangeFindError::MessageExceededMaxBytes), res); 933 | 934 | // test message within range, not including last message 935 | let res = index.find_segment_range(10, 20, 60); 936 | assert_eq!( 937 | Ok(MessageSetRange { 938 | file_pos: 10, 939 | bytes: 20 940 | }), 941 | res 942 | ); 943 | 944 | // test message within range, not including last message, not first 945 | let res = index.find_segment_range(11, 20, 60); 946 | assert_eq!( 947 | Ok(MessageSetRange { 948 | file_pos: 20, 949 | bytes: 20 950 | }), 951 | res 952 | ); 953 | 954 | // test message within rest of range, not including last message 955 | let res = index.find_segment_range(11, 80, 60); 956 | assert_eq!( 957 | Ok(MessageSetRange { 958 | file_pos: 20, 959 | bytes: 40 960 | }), 961 | res 962 | ); 963 | } 964 | 965 | #[test] 966 | fn index_resize() { 967 | env_logger::try_init().unwrap_or(()); 968 | let dir = TestDir::new(); 969 | let mut index = Index::new(&dir, 10u64, 32usize).unwrap(); 970 | assert_eq!(32, index.size()); 971 | let mut buf = IndexBuf::new(4, 10u64); 972 | buf.push(10, 10); 973 | buf.push(11, 20); 974 | buf.push(12, 30); 975 | buf.push(13, 40); 976 | index.append(buf).unwrap(); 977 | assert_eq!(32, index.size()); 978 | 979 | let mut buf = IndexBuf::new(1, 10u64); 980 | buf.push(14, 50); 981 | assert!(index.append(buf).is_ok()); 982 | 983 | // make sure the index was resized 984 | assert_eq!(48, index.size()); 985 | 986 | assert_eq!(50, index.find(14).unwrap().1); 987 | } 988 | 989 | #[test] 990 | fn index_remove() { 991 | env_logger::try_init().unwrap_or(()); 992 | let dir = TestDir::new(); 993 | let index = Index::new(&dir, 0u64, 32usize).unwrap(); 994 | 995 | let ind_exists = fs::read_dir(&dir) 996 | .unwrap() 997 | .find(|entry| { 998 | let path = entry.as_ref().unwrap().path(); 999 | path.file_name().unwrap() == "00000000000000000000.index" 1000 | }) 1001 | .is_some(); 1002 | assert!(ind_exists, "Index file does not exist?"); 1003 | 1004 | // remove the index 1005 | index.remove().expect("Unable to remove file"); 1006 | 1007 | let ind_exists = fs::read_dir(&dir) 1008 | .unwrap() 1009 | .find(|entry| { 1010 | let path = entry.as_ref().unwrap().path(); 1011 | path.file_name().unwrap() == "00000000000000000000.index" 1012 | }) 1013 | .is_some(); 1014 | assert!(!ind_exists, "Index should not exist"); 1015 | } 1016 | 1017 | #[test] 1018 | fn index_truncate() { 1019 | env_logger::try_init().unwrap_or(()); 1020 | let dir = TestDir::new(); 1021 | let mut index = Index::new(&dir, 10u64, 128usize).unwrap(); 1022 | let mut buf = IndexBuf::new(5, 10u64); 1023 | buf.push(10, 10); 1024 | buf.push(11, 20); 1025 | buf.push(12, 30); 1026 | buf.push(13, 40); 1027 | buf.push(14, 50); 1028 | index.append(buf).unwrap(); 1029 | 1030 | let file_len = index.truncate(12); 1031 | assert_eq!(Some(40), file_len); 1032 | assert_eq!(13, index.next_offset()); 1033 | assert_eq!(3 * INDEX_ENTRY_BYTES, index.next_write_pos); 1034 | 1035 | // ensure we've zeroed the entries 1036 | let mem = &index.mmap[..]; 1037 | for i in (3 * INDEX_ENTRY_BYTES)..(5 * INDEX_ENTRY_BYTES) { 1038 | assert_eq!(0, mem[i], "Expected 0 at index {}", i); 1039 | } 1040 | } 1041 | 1042 | #[test] 1043 | fn index_truncate_at_boundary() { 1044 | env_logger::try_init().unwrap_or(()); 1045 | let dir = TestDir::new(); 1046 | let mut index = Index::new(&dir, 10u64, 128usize).unwrap(); 1047 | let mut buf = IndexBuf::new(5, 10u64); 1048 | buf.push(10, 10); 1049 | buf.push(11, 20); 1050 | buf.push(12, 30); 1051 | buf.push(13, 40); 1052 | buf.push(14, 50); 1053 | index.append(buf).unwrap(); 1054 | 1055 | let file_len = index.truncate(14); 1056 | assert_eq!(None, file_len); 1057 | assert_eq!(15, index.next_offset()); 1058 | assert_eq!(5 * INDEX_ENTRY_BYTES, index.next_write_pos); 1059 | } 1060 | } 1061 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! The commit log is an append-only data structure that can be used in a 2 | //! variety of use-cases, such as tracking sequences of events, transactions 3 | //! or replicated state machines. 4 | //! 5 | //! This implementation of the commit log data structure uses log segments 6 | //! that roll over at pre-defined maximum size boundaries. The messages appended 7 | //! to the log have a unique, monotonically increasing offset that can be used 8 | //! as a pointer to a log entry. 9 | //! 10 | //! The index of the commit log logically stores the offset to a position in a 11 | //! log segment. The index and segments are separated, in that a 12 | //! segment file does not necessarily correspond to one particular segment file, 13 | //! it could contain file pointers to many segment files. In addition, index 14 | //! files are memory-mapped for efficient read and write access. 15 | //! 16 | //! ## Example 17 | //! 18 | //! ```rust 19 | //! use commitlog::*; 20 | //! use commitlog::message::*; 21 | //! 22 | //! fn main() { 23 | //! // open a directory called 'log' for segment and index storage 24 | //! let opts = LogOptions::new("log"); 25 | //! let mut log = CommitLog::new(opts).unwrap(); 26 | //! 27 | //! // append to the log 28 | //! log.append_msg("hello world").unwrap(); // offset 0 29 | //! log.append_msg("second message").unwrap(); // offset 1 30 | //! 31 | //! // read the messages 32 | //! let messages = log.read(0, ReadLimit::default()).unwrap(); 33 | //! for msg in messages.iter() { 34 | //! println!("{} - {}", msg.offset(), String::from_utf8_lossy(msg.payload())); 35 | //! } 36 | //! 37 | //! // prints: 38 | //! // 0 - hello world 39 | //! // 1 - second message 40 | //! } 41 | //! ``` 42 | 43 | use log::{info, trace}; 44 | 45 | mod file_set; 46 | mod index; 47 | pub mod message; 48 | pub mod reader; 49 | mod segment; 50 | #[cfg(test)] 51 | mod testutil; 52 | 53 | use index::*; 54 | use segment::SegmentAppendError; 55 | use std::{ 56 | error, fmt, fs, io, 57 | iter::{DoubleEndedIterator, ExactSizeIterator}, 58 | path::{Path, PathBuf}, 59 | }; 60 | 61 | #[cfg(feature = "internals")] 62 | pub use crate::{index::Index, index::IndexBuf, segment::Segment}; 63 | use file_set::FileSet; 64 | use message::{MessageBuf, MessageError, MessageSet, MessageSetMut}; 65 | use reader::{LogSliceReader, MessageBufReader}; 66 | 67 | /// Offset of an appended log segment. 68 | pub type Offset = u64; 69 | 70 | /// Offset range of log append. 71 | #[derive(Copy, Clone, PartialEq, Eq, Debug)] 72 | pub struct OffsetRange(u64, usize); 73 | 74 | impl OffsetRange { 75 | /// Starting offset of the range. 76 | pub fn first(&self) -> Offset { 77 | self.0 78 | } 79 | 80 | /// Number of offsets within the range. 81 | pub fn len(&self) -> usize { 82 | self.1 83 | } 84 | 85 | /// Boolean indicating whether the range has offsets. 86 | pub fn is_empty(&self) -> bool { 87 | self.1 == 0 88 | } 89 | 90 | /// Iterator containing all offsets within the offset range. 91 | pub fn iter(&self) -> OffsetRangeIter { 92 | OffsetRangeIter { 93 | pos: self.0, 94 | end: self.0 + (self.1 as u64), 95 | size: self.1, 96 | } 97 | } 98 | } 99 | 100 | /// Iterator of offsets within an `OffsetRange`. 101 | #[derive(Copy, Clone, Debug)] 102 | pub struct OffsetRangeIter { 103 | pos: u64, 104 | end: u64, 105 | size: usize, 106 | } 107 | 108 | impl Iterator for OffsetRangeIter { 109 | type Item = Offset; 110 | fn next(&mut self) -> Option { 111 | if self.pos >= self.end { 112 | None 113 | } else { 114 | let v = self.pos; 115 | self.pos += 1; 116 | Some(v) 117 | } 118 | } 119 | 120 | fn size_hint(&self) -> (usize, Option) { 121 | (0, Some(self.size)) 122 | } 123 | } 124 | 125 | impl ExactSizeIterator for OffsetRangeIter { 126 | fn len(&self) -> usize { 127 | self.size 128 | } 129 | } 130 | 131 | impl DoubleEndedIterator for OffsetRangeIter { 132 | fn next_back(&mut self) -> Option { 133 | if self.pos >= self.end { 134 | None 135 | } else { 136 | let v = self.end - 1; 137 | self.end -= 1; 138 | Some(v) 139 | } 140 | } 141 | } 142 | 143 | /// Error enum for commit log Append operation. 144 | #[derive(Debug)] 145 | pub enum AppendError { 146 | /// The underlying file operations failed during the append attempt. 147 | Io(io::Error), 148 | /// A new index was created, but was unable to receive writes 149 | /// during the append operation. This could point to exhaustion 150 | /// of machine resources or other I/O issue. 151 | FreshIndexNotWritable, 152 | /// A new segment was created, but was unable to receive writes 153 | /// during the append operation. This could point to exhaustion 154 | /// of machine resources or other I/O issue. 155 | FreshSegmentNotWritable, 156 | /// If a message that is larger than the per message size is tried to be 157 | /// appended it will not be allowed an will return an error 158 | MessageSizeExceeded, 159 | /// The buffer contains an invalid offset value 160 | InvalidOffset, 161 | } 162 | 163 | impl From for AppendError { 164 | fn from(e: io::Error) -> AppendError { 165 | AppendError::Io(e) 166 | } 167 | } 168 | 169 | impl error::Error for AppendError { 170 | fn description(&self) -> &str { 171 | match *self { 172 | AppendError::Io(_) => "File IO error occurred while appending to the log", 173 | AppendError::FreshIndexNotWritable => { 174 | "While attempting to create a new index, the new index was not writabe" 175 | } 176 | AppendError::FreshSegmentNotWritable => { 177 | "While attempting to create a new segment, the new segment was not writabe" 178 | } 179 | AppendError::MessageSizeExceeded => { 180 | "While attempting to write a message, the per message size was exceeded" 181 | } 182 | AppendError::InvalidOffset => "Invalid offsets set on buffer of messages", 183 | } 184 | } 185 | 186 | fn cause(&self) -> Option<&dyn error::Error> { 187 | match *self { 188 | AppendError::Io(ref e) => Some(e), 189 | _ => None, 190 | } 191 | } 192 | } 193 | 194 | impl fmt::Display for AppendError { 195 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 196 | match *self { 197 | AppendError::Io(_) => write!(f, "IO Error"), 198 | AppendError::FreshIndexNotWritable => write!(f, "Fresh index error"), 199 | AppendError::FreshSegmentNotWritable => write!(f, "Fresh segment error"), 200 | AppendError::MessageSizeExceeded => write!(f, "Message Size exceeded error"), 201 | AppendError::InvalidOffset => write!(f, "Invalid offsets set of buffer of messages"), 202 | } 203 | } 204 | } 205 | 206 | /// Error enum for commit log read operation. 207 | #[derive(Debug)] 208 | pub enum ReadError { 209 | /// Underlying IO error encountered by reading from the log 210 | Io(io::Error), 211 | /// A segment in the log is corrupt, or the index itself is corrupt 212 | CorruptLog, 213 | /// Offset supplied was not invalid. 214 | NoSuchSegment, 215 | } 216 | 217 | /// Batch size limitation on read. 218 | #[derive(Copy, Clone, Eq, PartialEq, Debug, Ord, PartialOrd)] 219 | pub struct ReadLimit(usize); 220 | impl ReadLimit { 221 | /// Read limit byte number of bytes. 222 | pub fn max_bytes(n: usize) -> ReadLimit { 223 | ReadLimit(n) 224 | } 225 | } 226 | 227 | impl Default for ReadLimit { 228 | fn default() -> ReadLimit { 229 | // 8kb default 230 | ReadLimit(8 * 1024) 231 | } 232 | } 233 | 234 | impl error::Error for ReadError { 235 | fn description(&self) -> &str { 236 | match *self { 237 | ReadError::Io(_) => "File IO error occurred while reading to the log", 238 | ReadError::CorruptLog => "Corrupt log segment has been detected", 239 | ReadError::NoSuchSegment => "The offset requested does not exist in the log", 240 | } 241 | } 242 | 243 | fn cause(&self) -> Option<&dyn error::Error> { 244 | match *self { 245 | ReadError::Io(ref e) => Some(e), 246 | _ => None, 247 | } 248 | } 249 | } 250 | 251 | impl fmt::Display for ReadError { 252 | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { 253 | match *self { 254 | ReadError::Io(_) => write!(f, "IO Error"), 255 | ReadError::CorruptLog => write!(f, "Corrupt Log Error"), 256 | ReadError::NoSuchSegment => write!(f, "Offset does not exist"), 257 | } 258 | } 259 | } 260 | 261 | impl From for ReadError { 262 | fn from(e: io::Error) -> ReadError { 263 | ReadError::Io(e) 264 | } 265 | } 266 | 267 | impl From for ReadError { 268 | fn from(e: MessageError) -> ReadError { 269 | match e { 270 | MessageError::IoError(e) => ReadError::Io(e), 271 | MessageError::InvalidHash | MessageError::InvalidPayloadLength => ReadError::CorruptLog, 272 | } 273 | } 274 | } 275 | 276 | impl From for ReadError { 277 | fn from(e: RangeFindError) -> ReadError { 278 | match e { 279 | RangeFindError::OffsetNotAppended => ReadError::NoSuchSegment, 280 | RangeFindError::MessageExceededMaxBytes => ReadError::Io(io::Error::new( 281 | io::ErrorKind::InvalidInput, 282 | "Message exceeded max byte size", 283 | )), 284 | } 285 | } 286 | } 287 | 288 | /// Commit log options allow customization of the commit 289 | /// log behavior. 290 | #[derive(Clone, Debug)] 291 | pub struct LogOptions { 292 | log_dir: PathBuf, 293 | log_max_bytes: usize, 294 | index_max_bytes: usize, 295 | message_max_bytes: usize, 296 | } 297 | 298 | impl LogOptions { 299 | /// Creates minimal log options value with a directory containing the log. 300 | /// 301 | /// The default values are: 302 | /// - *segment_max_bytes*: 1GB 303 | /// - *index_max_entries*: 100,000 304 | /// - *message_max_bytes*: 1mb 305 | pub fn new

(log_dir: P) -> LogOptions 306 | where 307 | P: AsRef, 308 | { 309 | LogOptions { 310 | log_dir: log_dir.as_ref().to_owned(), 311 | log_max_bytes: 1_000_000_000, 312 | index_max_bytes: 800_000, 313 | message_max_bytes: 1_000_000, 314 | } 315 | } 316 | 317 | /// Bounds the size of a log segment to a number of bytes. 318 | #[inline] 319 | pub fn segment_max_bytes(&mut self, bytes: usize) -> &mut LogOptions { 320 | self.log_max_bytes = bytes; 321 | self 322 | } 323 | 324 | /// Bounds the size of an individual memory-mapped index file. 325 | #[inline] 326 | pub fn index_max_items(&mut self, items: usize) -> &mut LogOptions { 327 | // TODO: this should be renamed to starting bytes 328 | self.index_max_bytes = items * INDEX_ENTRY_BYTES; 329 | self 330 | } 331 | 332 | /// Bounds the size of a message to a number of bytes. 333 | #[inline] 334 | pub fn message_max_bytes(&mut self, bytes: usize) -> &mut LogOptions { 335 | self.message_max_bytes = bytes; 336 | self 337 | } 338 | } 339 | 340 | /// The commit log is an append-only sequence of messages. 341 | pub struct CommitLog { 342 | file_set: FileSet, 343 | } 344 | 345 | impl CommitLog { 346 | /// Creates or opens an existing commit log. 347 | pub fn new(opts: LogOptions) -> io::Result { 348 | fs::create_dir_all(&opts.log_dir).unwrap_or(()); 349 | 350 | info!("Opening log in directory {:?}", &opts.log_dir.to_str()); 351 | 352 | let fs = FileSet::load_log(opts)?; 353 | Ok(CommitLog { file_set: fs }) 354 | } 355 | 356 | /// Appends a single message to the log, returning the offset appended. 357 | #[inline] 358 | pub fn append_msg>(&mut self, payload: B) -> Result { 359 | let mut buf = MessageBuf::default(); 360 | buf.push(payload).expect("Payload size exceeds usize::MAX"); 361 | let res = self.append(&mut buf)?; 362 | assert_eq!(res.len(), 1); 363 | Ok(res.first()) 364 | } 365 | 366 | /// Appends log entrites to the commit log, returning the offsets appended. 367 | #[inline] 368 | pub fn append(&mut self, buf: &mut T) -> Result 369 | where 370 | T: MessageSetMut, 371 | { 372 | let start_off = self.file_set.active_index_mut().next_offset(); 373 | message::set_offsets(buf, start_off); 374 | self.append_with_offsets(buf) 375 | } 376 | 377 | /// Appends log entrites to the commit log, returning the offsets appended. 378 | /// 379 | /// The offsets are expected to already be set within the buffer. 380 | pub fn append_with_offsets(&mut self, buf: &T) -> Result 381 | where 382 | T: MessageSet, 383 | { 384 | let buf_len = buf.len(); 385 | if buf_len == 0 { 386 | return Ok(OffsetRange(0, 0)); 387 | } 388 | 389 | //Check if given message exceeded the max size 390 | if buf.bytes().len() > self.file_set.log_options().message_max_bytes { 391 | return Err(AppendError::MessageSizeExceeded); 392 | } 393 | 394 | // first write to the current segment 395 | let start_off = self.next_offset(); 396 | 397 | // check to make sure the first message matches the starting offset 398 | if buf.iter().next().unwrap().offset() != start_off { 399 | return Err(AppendError::InvalidOffset); 400 | } 401 | 402 | let meta = match self.file_set.active_segment_mut().append(buf) { 403 | Ok(meta) => meta, 404 | // if the log is full, gracefully close the current segment 405 | // and create new one starting from the new offset 406 | Err(SegmentAppendError::LogFull) => { 407 | self.file_set.roll_segment()?; 408 | 409 | // try again, giving up if we have to 410 | self.file_set 411 | .active_segment_mut() 412 | .append(buf) 413 | .map_err(|_| AppendError::FreshSegmentNotWritable)? 414 | } 415 | Err(SegmentAppendError::IoError(e)) => return Err(AppendError::Io(e)), 416 | }; 417 | 418 | // write to the index 419 | { 420 | // TODO: reduce indexing of every message 421 | let index = self.file_set.active_index_mut(); 422 | let mut index_pos_buf = IndexBuf::new(buf_len, index.starting_offset()); 423 | let mut pos = meta.starting_position; 424 | for m in buf.iter() { 425 | index_pos_buf.push(m.offset(), pos as u32); 426 | pos += m.total_bytes(); 427 | } 428 | // TODO: what happens when this errors out? Do we truncate the log...? 429 | index.append(index_pos_buf)?; 430 | } 431 | 432 | Ok(OffsetRange(start_off, buf_len)) 433 | } 434 | 435 | /// Gets the last written offset. 436 | pub fn last_offset(&self) -> Option { 437 | let next_off = self.file_set.active_index().next_offset(); 438 | if next_off == 0 { 439 | None 440 | } else { 441 | Some(next_off - 1) 442 | } 443 | } 444 | 445 | /// Gets the latest offset 446 | #[inline] 447 | pub fn next_offset(&self) -> Offset { 448 | self.file_set.active_index().next_offset() 449 | } 450 | 451 | /// Reads a portion of the log, starting with the `start` offset, inclusive, 452 | /// up to the limit. 453 | #[inline] 454 | pub fn read(&self, start: Offset, limit: ReadLimit) -> Result { 455 | let mut rd = MessageBufReader; 456 | match self.reader(&mut rd, start, limit)? { 457 | Some(v) => Ok(v), 458 | None => Ok(MessageBuf::default()), 459 | } 460 | } 461 | 462 | /// Reads a portion of the log, starting with the `start` offset, inclusive, 463 | /// up to the limit via the reader. 464 | pub fn reader( 465 | &self, 466 | reader: &mut R, 467 | mut start: Offset, 468 | limit: ReadLimit, 469 | ) -> Result, ReadError> { 470 | // TODO: can this be caught at the index level insead? 471 | if start >= self.file_set.active_index().next_offset() { 472 | return Ok(None); 473 | } 474 | 475 | // adjust for the minimum offset (e.g. reader requests an offset that was 476 | // truncated) 477 | match self.file_set.min_offset() { 478 | Some(min_off) if min_off > start => { 479 | start = min_off; 480 | } 481 | None => return Ok(None), 482 | _ => {} 483 | } 484 | 485 | let max_bytes = limit.0 as u32; 486 | 487 | // find the correct segment 488 | let &(ref ind, ref seg) = self.file_set.find(start); 489 | let seg_bytes = seg.size() as u32; 490 | 491 | // grab the range from the contained index 492 | let range = ind.find_segment_range(start, max_bytes, seg_bytes)?; 493 | if range.bytes() == 0 { 494 | Ok(None) 495 | } else { 496 | Ok(Some(seg.read_slice( 497 | reader, 498 | range.file_position(), 499 | range.bytes(), 500 | )?)) 501 | } 502 | } 503 | 504 | /// Truncates a file after the offset supplied. The resulting log will 505 | /// contain entries up to the offset. 506 | pub fn truncate(&mut self, offset: Offset) -> io::Result<()> { 507 | info!("Truncating log to offset {}", offset); 508 | 509 | // remove index/segment files rolled after the offset 510 | let segments_to_remove = self.file_set.remove_after(offset); 511 | Self::delete_segments(segments_to_remove)?; 512 | 513 | // truncate the current index 514 | match self.file_set.active_index_mut().truncate(offset) { 515 | Some(len) => self.file_set.active_segment_mut().truncate(len), 516 | // index outside of appended range 517 | None => Ok(()), 518 | } 519 | } 520 | 521 | /// Removes segment files that are before (strictly less than) the specified 522 | /// offset. The log might contain some messages before the offset 523 | /// provided is in the middle of a segment. 524 | pub fn trim_segments_before(&mut self, offset: Offset) -> io::Result<()> { 525 | let segments_to_remove = self.file_set.remove_before(offset); 526 | Self::delete_segments(segments_to_remove) 527 | } 528 | 529 | /// Removes segment files that are read-only. 530 | pub fn trim_inactive_segments(&mut self) -> io::Result<()> { 531 | let active_offset_start = self.file_set.active_index().starting_offset(); 532 | self.trim_segments_before(active_offset_start) 533 | } 534 | 535 | /// Forces a flush of the log. 536 | pub fn flush(&mut self) -> io::Result<()> { 537 | self.file_set.active_segment_mut().flush_sync()?; 538 | self.file_set.active_index_mut().flush_sync() 539 | } 540 | 541 | fn delete_segments(segments: Vec<(Index, segment::Segment)>) -> io::Result<()> { 542 | for p in segments { 543 | trace!( 544 | "Removing segment and index starting at {}", 545 | p.0.starting_offset() 546 | ); 547 | p.0.remove()?; 548 | p.1.remove()?; 549 | } 550 | Ok(()) 551 | } 552 | } 553 | 554 | #[cfg(test)] 555 | mod tests { 556 | use super::{message::*, testutil::*, *}; 557 | 558 | use std::{collections::HashSet, fs}; 559 | 560 | #[test] 561 | pub fn offset_range() { 562 | let range = OffsetRange(2, 6); 563 | 564 | assert_eq!(vec![2, 3, 4, 5, 6, 7], range.iter().collect::>()); 565 | 566 | assert_eq!( 567 | vec![7, 6, 5, 4, 3, 2], 568 | range.iter().rev().collect::>() 569 | ); 570 | } 571 | 572 | #[test] 573 | pub fn append() { 574 | let dir = TestDir::new(); 575 | let mut log = CommitLog::new(LogOptions::new(&dir)).unwrap(); 576 | assert_eq!(log.append_msg("123456").unwrap(), 0); 577 | assert_eq!(log.append_msg("abcdefg").unwrap(), 1); 578 | assert_eq!(log.append_msg("foobarbaz").unwrap(), 2); 579 | assert_eq!(log.append_msg("bing").unwrap(), 3); 580 | log.flush().unwrap(); 581 | } 582 | 583 | #[test] 584 | pub fn append_multiple() { 585 | let dir = TestDir::new(); 586 | let mut log = CommitLog::new(LogOptions::new(&dir)).unwrap(); 587 | let mut buf = { 588 | let mut buf = MessageBuf::default(); 589 | buf.push(b"123456").unwrap(); 590 | buf.push(b"789012").unwrap(); 591 | buf.push(b"345678").unwrap(); 592 | buf 593 | }; 594 | let range = log.append(&mut buf).unwrap(); 595 | assert_eq!(0, range.first()); 596 | assert_eq!(3, range.len()); 597 | assert_eq!(vec![0, 1, 2], range.iter().collect::>()); 598 | } 599 | 600 | #[test] 601 | pub fn append_new_segment() { 602 | let dir = TestDir::new(); 603 | let mut opts = LogOptions::new(&dir); 604 | opts.segment_max_bytes(62); 605 | 606 | { 607 | let mut log = CommitLog::new(opts).unwrap(); 608 | // first 2 entries fit (both 30 bytes with encoding) 609 | log.append_msg("0123456789").unwrap(); 610 | log.append_msg("0123456789").unwrap(); 611 | 612 | // this one should roll the log 613 | log.append_msg("0123456789").unwrap(); 614 | log.flush().unwrap(); 615 | } 616 | 617 | expect_files( 618 | &dir, 619 | vec![ 620 | "00000000000000000000.index", 621 | "00000000000000000000.log", 622 | "00000000000000000002.log", 623 | "00000000000000000002.index", 624 | ], 625 | ); 626 | } 627 | 628 | #[test] 629 | pub fn read_entries() { 630 | env_logger::try_init().unwrap_or(()); 631 | 632 | let dir = TestDir::new(); 633 | let mut opts = LogOptions::new(&dir); 634 | opts.index_max_items(20); 635 | opts.segment_max_bytes(1000); 636 | let mut log = CommitLog::new(opts).unwrap(); 637 | 638 | for i in 0..100 { 639 | let s = format!("-data {}", i); 640 | log.append_msg(s.as_str()).unwrap(); 641 | } 642 | log.flush().unwrap(); 643 | 644 | { 645 | let active_index_read = log.read(82, ReadLimit::max_bytes(168)).unwrap(); 646 | assert_eq!(6, active_index_read.len()); 647 | assert_eq!( 648 | vec![82, 83, 84, 85, 86, 87], 649 | active_index_read 650 | .iter() 651 | .map(|v| v.offset()) 652 | .collect::>() 653 | ); 654 | } 655 | 656 | { 657 | let old_index_read = log.read(5, ReadLimit::max_bytes(112)).unwrap(); 658 | assert_eq!(4, old_index_read.len()); 659 | assert_eq!( 660 | vec![5, 6, 7, 8], 661 | old_index_read 662 | .iter() 663 | .map(|v| v.offset()) 664 | .collect::>() 665 | ); 666 | } 667 | 668 | // read at the boundary (not going to get full message limit) 669 | { 670 | // log rolls at offset 36 671 | let boundary_read = log.read(33, ReadLimit::max_bytes(100)).unwrap(); 672 | assert_eq!(3, boundary_read.len()); 673 | assert_eq!( 674 | vec![33, 34, 35], 675 | boundary_read.iter().map(|v| v.offset()).collect::>() 676 | ); 677 | } 678 | } 679 | 680 | #[test] 681 | pub fn reopen_log() { 682 | env_logger::try_init().unwrap_or(()); 683 | 684 | let dir = TestDir::new(); 685 | let mut opts = LogOptions::new(&dir); 686 | opts.index_max_items(20); 687 | opts.segment_max_bytes(1000); 688 | 689 | { 690 | let mut log = CommitLog::new(opts.clone()).unwrap(); 691 | 692 | for i in 0..99 { 693 | let s = format!("some data {}", i); 694 | let off = log.append_msg(s.as_str()).unwrap(); 695 | assert_eq!(i, off); 696 | } 697 | log.flush().unwrap(); 698 | } 699 | 700 | { 701 | let mut log = CommitLog::new(opts).unwrap(); 702 | 703 | let active_index_read = log.read(82, ReadLimit::max_bytes(130)).unwrap(); 704 | 705 | assert_eq!(4, active_index_read.len()); 706 | assert_eq!( 707 | vec![82, 83, 84, 85], 708 | active_index_read 709 | .iter() 710 | .map(|v| v.offset()) 711 | .collect::>() 712 | ); 713 | 714 | let off = log.append_msg("moar data").unwrap(); 715 | assert_eq!(99, off); 716 | } 717 | } 718 | 719 | #[test] 720 | pub fn reopen_log_without_segment_write() { 721 | env_logger::try_init().unwrap_or(()); 722 | 723 | let dir = TestDir::new(); 724 | let mut opts = LogOptions::new(&dir); 725 | opts.index_max_items(20); 726 | opts.segment_max_bytes(1000); 727 | 728 | { 729 | let mut log = CommitLog::new(opts.clone()).unwrap(); 730 | log.flush().unwrap(); 731 | } 732 | 733 | { 734 | CommitLog::new(opts.clone()).expect("Should be able to reopen log without writes"); 735 | } 736 | 737 | { 738 | CommitLog::new(opts).expect("Should be able to reopen log without writes"); 739 | } 740 | } 741 | 742 | #[test] 743 | pub fn reopen_log_with_one_segment_write() { 744 | env_logger::try_init().unwrap_or(()); 745 | let dir = TestDir::new(); 746 | let opts = LogOptions::new(&dir); 747 | { 748 | let mut log = CommitLog::new(opts.clone()).unwrap(); 749 | log.append_msg("Test").unwrap(); 750 | log.flush().unwrap(); 751 | } 752 | { 753 | let log = CommitLog::new(opts).unwrap(); 754 | assert_eq!(1, log.next_offset()); 755 | } 756 | } 757 | 758 | #[test] 759 | pub fn append_message_greater_than_max() { 760 | let dir = TestDir::new(); 761 | let mut log = CommitLog::new(LogOptions::new(&dir)).unwrap(); 762 | //create vector with 1.2mb of size, u8 = 1 byte thus, 763 | //1mb = 1000000 bytes, 1200000 items needed 764 | let mut value = String::new(); 765 | let mut target = 0; 766 | while target != 2000000 { 767 | value.push('a'); 768 | target += 1; 769 | } 770 | let res = log.append_msg(value); 771 | //will fail if no error is found which means a message greater than the limit 772 | // passed through 773 | assert!(res.is_err()); 774 | log.flush().unwrap(); 775 | } 776 | 777 | #[test] 778 | pub fn truncate_from_active() { 779 | let dir = TestDir::new(); 780 | let mut log = CommitLog::new(LogOptions::new(&dir)).unwrap(); 781 | 782 | // append 5 messages 783 | { 784 | let mut buf = MessageBuf::default(); 785 | buf.push(b"123456").unwrap(); 786 | buf.push(b"789012").unwrap(); 787 | buf.push(b"345678").unwrap(); 788 | buf.push(b"aaaaaa").unwrap(); 789 | buf.push(b"bbbbbb").unwrap(); 790 | log.append(&mut buf).unwrap(); 791 | } 792 | 793 | // truncate to offset 2 (should remove 2 messages) 794 | log.truncate(2).expect("Unable to truncate file"); 795 | 796 | assert_eq!(Some(2), log.last_offset()); 797 | } 798 | 799 | #[test] 800 | pub fn truncate_after_offset_removes_segments() { 801 | env_logger::try_init().unwrap_or(()); 802 | let dir = TestDir::new(); 803 | 804 | let mut opts = LogOptions::new(&dir); 805 | opts.index_max_items(20); 806 | opts.segment_max_bytes(52); 807 | let mut log = CommitLog::new(opts).unwrap(); 808 | 809 | // append 6 messages (4 segments) 810 | { 811 | for _ in 0..7 { 812 | log.append_msg(b"12345").unwrap(); 813 | } 814 | } 815 | 816 | // ensure we have the expected index/logs 817 | expect_files( 818 | &dir, 819 | vec![ 820 | "00000000000000000000.index", 821 | "00000000000000000000.log", 822 | "00000000000000000002.log", 823 | "00000000000000000002.index", 824 | "00000000000000000004.log", 825 | "00000000000000000004.index", 826 | "00000000000000000006.log", 827 | "00000000000000000006.index", 828 | ], 829 | ); 830 | 831 | // truncate to offset 2 (should remove 2 messages) 832 | log.truncate(3).expect("Unable to truncate file"); 833 | 834 | assert_eq!(Some(3), log.last_offset()); 835 | 836 | // ensure we have the expected index/logs 837 | expect_files( 838 | &dir, 839 | vec![ 840 | "00000000000000000000.index", 841 | "00000000000000000000.log", 842 | "00000000000000000002.log", 843 | "00000000000000000002.index", 844 | ], 845 | ); 846 | } 847 | 848 | #[test] 849 | pub fn truncate_at_segment_boundary_removes_segments() { 850 | env_logger::try_init().unwrap_or(()); 851 | let dir = TestDir::new(); 852 | 853 | let mut opts = LogOptions::new(&dir); 854 | opts.index_max_items(20); 855 | opts.segment_max_bytes(52); 856 | let mut log = CommitLog::new(opts).unwrap(); 857 | 858 | // append 6 messages (4 segments) 859 | { 860 | for _ in 0..7 { 861 | log.append_msg(b"12345").unwrap(); 862 | } 863 | } 864 | 865 | // ensure we have the expected index/logs 866 | expect_files( 867 | &dir, 868 | vec![ 869 | "00000000000000000000.index", 870 | "00000000000000000000.log", 871 | "00000000000000000002.log", 872 | "00000000000000000002.index", 873 | "00000000000000000004.log", 874 | "00000000000000000004.index", 875 | "00000000000000000006.log", 876 | "00000000000000000006.index", 877 | ], 878 | ); 879 | 880 | // truncate to offset 2 (should remove 2 messages) 881 | log.truncate(2).expect("Unable to truncate file"); 882 | 883 | assert_eq!(Some(2), log.last_offset()); 884 | 885 | // ensure we have the expected index/logs 886 | expect_files( 887 | &dir, 888 | vec![ 889 | "00000000000000000000.index", 890 | "00000000000000000000.log", 891 | "00000000000000000002.log", 892 | "00000000000000000002.index", 893 | ], 894 | ); 895 | } 896 | 897 | #[test] 898 | pub fn truncate_after_last_append_does_nothing() { 899 | env_logger::try_init().unwrap_or(()); 900 | let dir = TestDir::new(); 901 | 902 | let mut opts = LogOptions::new(&dir); 903 | opts.index_max_items(20); 904 | opts.segment_max_bytes(52); 905 | let mut log = CommitLog::new(opts).unwrap(); 906 | 907 | // append 6 messages (4 segments) 908 | { 909 | for _ in 0..7 { 910 | log.append_msg(b"12345").unwrap(); 911 | } 912 | } 913 | 914 | // ensure we have the expected index/logs 915 | expect_files( 916 | &dir, 917 | vec![ 918 | "00000000000000000000.index", 919 | "00000000000000000000.log", 920 | "00000000000000000002.log", 921 | "00000000000000000002.index", 922 | "00000000000000000004.log", 923 | "00000000000000000004.index", 924 | "00000000000000000006.log", 925 | "00000000000000000006.index", 926 | ], 927 | ); 928 | 929 | // truncate to offset 2 (should remove 2 messages) 930 | log.truncate(7).expect("Unable to truncate file"); 931 | 932 | assert_eq!(Some(6), log.last_offset()); 933 | 934 | // ensure we have the expected index/logs 935 | expect_files( 936 | &dir, 937 | vec![ 938 | "00000000000000000000.index", 939 | "00000000000000000000.log", 940 | "00000000000000000002.log", 941 | "00000000000000000002.index", 942 | "00000000000000000004.log", 943 | "00000000000000000004.index", 944 | "00000000000000000006.log", 945 | "00000000000000000006.index", 946 | ], 947 | ); 948 | } 949 | 950 | #[test] 951 | pub fn trim_segments_before_removes_segments() { 952 | env_logger::try_init().unwrap_or(()); 953 | let dir = TestDir::new(); 954 | 955 | let mut opts = LogOptions::new(&dir); 956 | opts.index_max_items(20); 957 | opts.segment_max_bytes(52); 958 | let mut log = CommitLog::new(opts).unwrap(); 959 | 960 | // append 6 messages (4 segments) 961 | { 962 | for _ in 0..7 { 963 | log.append_msg(b"12345").unwrap(); 964 | } 965 | } 966 | 967 | // ensure we have the expected index/logs 968 | expect_files( 969 | &dir, 970 | vec![ 971 | "00000000000000000000.index", 972 | "00000000000000000000.log", 973 | "00000000000000000002.log", 974 | "00000000000000000002.index", 975 | "00000000000000000004.log", 976 | "00000000000000000004.index", 977 | "00000000000000000006.log", 978 | "00000000000000000006.index", 979 | ], 980 | ); 981 | 982 | // remove segments < 3 which is just segment 0 983 | log.trim_segments_before(3) 984 | .expect("Unable to truncate file"); 985 | 986 | assert_eq!(Some(6), log.last_offset()); 987 | 988 | // ensure we have the expected index/logs 989 | expect_files( 990 | &dir, 991 | vec![ 992 | "00000000000000000002.index", 993 | "00000000000000000002.log", 994 | "00000000000000000004.log", 995 | "00000000000000000004.index", 996 | "00000000000000000006.log", 997 | "00000000000000000006.index", 998 | ], 999 | ); 1000 | 1001 | // make sure the messages are really gone 1002 | let reader = log 1003 | .read(0, ReadLimit::default()) 1004 | .expect("Unabled to grab reader"); 1005 | assert_eq!(2, reader.iter().next().unwrap().offset()); 1006 | } 1007 | 1008 | #[test] 1009 | pub fn trim_segments_before_removes_segments_at_boundary() { 1010 | env_logger::try_init().unwrap_or(()); 1011 | let dir = TestDir::new(); 1012 | 1013 | let mut opts = LogOptions::new(&dir); 1014 | opts.index_max_items(20); 1015 | opts.segment_max_bytes(52); 1016 | let mut log = CommitLog::new(opts).unwrap(); 1017 | 1018 | // append 6 messages (4 segments) 1019 | { 1020 | for _ in 0..7 { 1021 | log.append_msg(b"12345").unwrap(); 1022 | } 1023 | } 1024 | 1025 | // ensure we have the expected index/logs 1026 | expect_files( 1027 | &dir, 1028 | vec![ 1029 | "00000000000000000000.index", 1030 | "00000000000000000000.log", 1031 | "00000000000000000002.log", 1032 | "00000000000000000002.index", 1033 | "00000000000000000004.log", 1034 | "00000000000000000004.index", 1035 | "00000000000000000006.log", 1036 | "00000000000000000006.index", 1037 | ], 1038 | ); 1039 | 1040 | // remove segments < 3 which is just segment 0 1041 | log.trim_segments_before(4) 1042 | .expect("Unable to truncate file"); 1043 | 1044 | assert_eq!(Some(6), log.last_offset()); 1045 | 1046 | // ensure we have the expected index/logs 1047 | expect_files( 1048 | &dir, 1049 | vec![ 1050 | "00000000000000000004.log", 1051 | "00000000000000000004.index", 1052 | "00000000000000000006.log", 1053 | "00000000000000000006.index", 1054 | ], 1055 | ); 1056 | 1057 | // make sure the messages are really gone 1058 | let reader = log 1059 | .read(0, ReadLimit::default()) 1060 | .expect("Unabled to grab reader"); 1061 | assert_eq!(4, reader.iter().next().unwrap().offset()); 1062 | } 1063 | 1064 | #[test] 1065 | pub fn trim_start_logic_check() { 1066 | env_logger::try_init().unwrap_or(()); 1067 | const TOTAL_MESSAGES: u64 = 20; 1068 | const TESTED_TRIM_START: u64 = TOTAL_MESSAGES + 1; 1069 | 1070 | for trim_off in 0..TESTED_TRIM_START { 1071 | let dir = TestDir::new(); 1072 | let mut opts = LogOptions::new(&dir); 1073 | opts.index_max_items(20); 1074 | opts.segment_max_bytes(52); 1075 | let mut log = CommitLog::new(opts).unwrap(); 1076 | 1077 | // append the messages 1078 | { 1079 | for _ in 0..TOTAL_MESSAGES { 1080 | log.append_msg(b"12345").unwrap(); 1081 | } 1082 | } 1083 | 1084 | log.trim_segments_before(trim_off) 1085 | .expect("Unable to truncate file"); 1086 | assert_eq!(Some(TOTAL_MESSAGES - 1), log.last_offset()); 1087 | 1088 | // make sure the messages are really gone 1089 | let reader = log 1090 | .read(0, ReadLimit::default()) 1091 | .expect("Unabled to grab reader"); 1092 | let start_off = reader.iter().next().unwrap().offset(); 1093 | assert!(start_off <= trim_off); 1094 | } 1095 | } 1096 | 1097 | #[test] 1098 | pub fn multiple_trim_start_calls() { 1099 | env_logger::try_init().unwrap_or(()); 1100 | const TOTAL_MESSAGES: u64 = 20; 1101 | let dir = TestDir::new(); 1102 | let mut opts = LogOptions::new(&dir); 1103 | opts.index_max_items(20); 1104 | opts.segment_max_bytes(52); 1105 | let mut log = CommitLog::new(opts).unwrap(); 1106 | 1107 | // append the messages 1108 | { 1109 | for _ in 0..TOTAL_MESSAGES { 1110 | log.append_msg(b"12345").unwrap(); 1111 | } 1112 | } 1113 | 1114 | log.trim_segments_before(2).unwrap(); 1115 | 1116 | { 1117 | let reader = log 1118 | .read(0, ReadLimit::default()) 1119 | .expect("Unabled to grab reader"); 1120 | assert_eq!(2, reader.iter().next().unwrap().offset()); 1121 | } 1122 | 1123 | log.trim_segments_before(10).unwrap(); 1124 | 1125 | { 1126 | let reader = log 1127 | .read(0, ReadLimit::default()) 1128 | .expect("Unabled to grab reader"); 1129 | assert_eq!(10, reader.iter().next().unwrap().offset()); 1130 | } 1131 | } 1132 | 1133 | #[test] 1134 | pub fn trim_inactive_logic_check() { 1135 | env_logger::try_init().unwrap_or(()); 1136 | const TOTAL_MESSAGES: u64 = 20; 1137 | 1138 | let dir = TestDir::new(); 1139 | let mut opts = LogOptions::new(&dir); 1140 | opts.index_max_items(20); 1141 | opts.segment_max_bytes(52); 1142 | let mut log = CommitLog::new(opts).unwrap(); 1143 | 1144 | // append the messages 1145 | { 1146 | for _ in 0..TOTAL_MESSAGES { 1147 | log.append_msg(b"12345").unwrap(); 1148 | } 1149 | } 1150 | 1151 | log.trim_inactive_segments() 1152 | .expect("Unable to truncate file"); 1153 | assert_eq!(Some(TOTAL_MESSAGES - 1), log.last_offset()); 1154 | 1155 | // make sure the messages are really gone 1156 | let reader = log 1157 | .read(0, ReadLimit::default()) 1158 | .expect("Unabled to grab reader"); 1159 | let start_off = reader.iter().next().unwrap().offset(); 1160 | assert_eq!(16, start_off); 1161 | } 1162 | 1163 | #[test] 1164 | pub fn trim_inactive_logic_check_zero_messages() { 1165 | env_logger::try_init().unwrap_or(()); 1166 | 1167 | let dir = TestDir::new(); 1168 | let mut opts = LogOptions::new(&dir); 1169 | opts.index_max_items(20); 1170 | opts.segment_max_bytes(52); 1171 | let mut log = CommitLog::new(opts).unwrap(); 1172 | 1173 | log.trim_inactive_segments() 1174 | .expect("Unable to truncate file"); 1175 | assert_eq!(None, log.last_offset()); 1176 | 1177 | // append the messages 1178 | log.append_msg(b"12345").unwrap(); 1179 | 1180 | // make sure the messages are really gone 1181 | let reader = log 1182 | .read(0, ReadLimit::default()) 1183 | .expect("Unabled to grab reader"); 1184 | let start_off = reader.iter().next().unwrap().offset(); 1185 | assert_eq!(0, start_off); 1186 | } 1187 | 1188 | fn expect_files, I>(dir: P, files: I) 1189 | where 1190 | I: IntoIterator, 1191 | { 1192 | let dir_files = fs::read_dir(&dir) 1193 | .unwrap() 1194 | .map(|e| { 1195 | e.unwrap() 1196 | .path() 1197 | .file_name() 1198 | .unwrap() 1199 | .to_str() 1200 | .unwrap() 1201 | .to_string() 1202 | }) 1203 | .collect::>(); 1204 | let expected = files 1205 | .into_iter() 1206 | .map(|s| s.to_string()) 1207 | .collect::>(); 1208 | assert_eq!( 1209 | dir_files.len(), 1210 | expected.len(), 1211 | "Invalid file count, expected {:?} got {:?}", 1212 | expected, 1213 | dir_files 1214 | ); 1215 | assert_eq!( 1216 | dir_files.intersection(&expected).count(), 1217 | expected.len(), 1218 | "Invalid file count, expected {:?} got {:?}", 1219 | expected, 1220 | dir_files 1221 | ); 1222 | } 1223 | } 1224 | 1225 | #[doc = include_str!("../README.md")] 1226 | #[cfg(doctest)] 1227 | pub struct ReadmeDoctests; 1228 | --------------------------------------------------------------------------------