├── .gitignore ├── rust-toolchain ├── README.md ├── src ├── workload.rs ├── generator │ ├── constant_generator.rs │ ├── counter_generator.rs │ ├── uniform_long_generator.rs │ ├── discrete_generator.rs │ ├── acknowledged_counter_generator.rs │ └── zipfian_generator.rs ├── db.rs ├── generator.rs ├── sqlite.rs ├── properties.rs ├── main.rs └── workload │ └── core_workload.rs ├── Cargo.toml ├── .github └── workflows │ └── rust.yml ├── workloads ├── workloada.toml ├── workloadc.toml ├── workloadb.toml ├── workloadf.toml ├── workloadd.toml ├── workloade.toml └── workload_template.toml ├── LICENSE └── Cargo.lock /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | -------------------------------------------------------------------------------- /rust-toolchain: -------------------------------------------------------------------------------- 1 | stable 2 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # YCSB for Rust 2 | 3 | This is a port of [YCSB](https://github.com/brianfrankcooper/YCSB) to Rust. 4 | 5 | ## Authors 6 | 7 | * [Pekka Enberg](https://github.com/penberg) 8 | * [Alex Chi](https://github.com/skyzh) 9 | -------------------------------------------------------------------------------- /src/workload.rs: -------------------------------------------------------------------------------- 1 | mod core_workload; 2 | 3 | pub use core_workload::CoreWorkload; 4 | 5 | use crate::db::DB; 6 | use std::rc::Rc; 7 | 8 | pub trait Workload { 9 | fn do_insert(&self, db: Rc); 10 | fn do_transaction(&self, db: Rc); 11 | } 12 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "ycsb" 3 | version = "0.0.0" 4 | authors = ["Pekka Enberg "] 5 | edition = "2021" 6 | 7 | [dependencies] 8 | anyhow = "1.0" 9 | log = "0.4" 10 | rand = {version = "0.8", features = ["small_rng"]} 11 | serde = { version = "1.0.130", features = ["derive"] } 12 | sql-builder = "3.1" 13 | sqlite = "0.26.0" 14 | structopt = "0.3.23" 15 | toml = "0.5.8" 16 | 17 | [profile.release] 18 | debug = true 19 | -------------------------------------------------------------------------------- /src/generator/constant_generator.rs: -------------------------------------------------------------------------------- 1 | use rand::prelude::SmallRng; 2 | 3 | use super::Generator; 4 | 5 | pub struct ConstantGenerator { 6 | value: T, 7 | } 8 | 9 | impl ConstantGenerator { 10 | pub fn new(value: T) -> Self { 11 | Self { value } 12 | } 13 | } 14 | 15 | impl Generator for ConstantGenerator { 16 | fn next_value(&self, _rng: &mut SmallRng) -> T { 17 | self.value.clone() 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /src/generator/counter_generator.rs: -------------------------------------------------------------------------------- 1 | use super::Generator; 2 | use rand::prelude::*; 3 | use std::sync::atomic::AtomicU64; 4 | 5 | pub struct CounterGenerator { 6 | counter: AtomicU64, 7 | } 8 | 9 | impl CounterGenerator { 10 | pub fn new(count_start: u64) -> Self { 11 | Self { 12 | counter: AtomicU64::new(count_start), 13 | } 14 | } 15 | } 16 | 17 | impl Generator for CounterGenerator { 18 | fn next_value(&self, _rng: &mut SmallRng) -> u64 { 19 | self.counter 20 | .fetch_add(1, std::sync::atomic::Ordering::SeqCst) 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /src/db.rs: -------------------------------------------------------------------------------- 1 | use crate::sqlite::SQLite; 2 | use anyhow::{anyhow, Result}; 3 | use std::collections::HashMap; 4 | use std::rc::Rc; 5 | 6 | pub trait DB { 7 | fn init(&self) -> Result<()>; 8 | fn insert(&self, table: &str, key: &str, values: &HashMap<&str, String>) -> Result<()>; 9 | fn read(&self, table: &str, key: &str, result: &mut HashMap) -> Result<()>; 10 | } 11 | 12 | pub fn create_db(db: &str) -> Result> { 13 | match db { 14 | "sqlite" => Ok(Rc::new(SQLite::new()?)), 15 | db => Err(anyhow!("{} is an invalid database name", db)), 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /.github/workflows/rust.yml: -------------------------------------------------------------------------------- 1 | name: Rust 2 | 3 | on: 4 | push: 5 | branches: [main] 6 | pull_request: 7 | branches: [main] 8 | 9 | env: 10 | CARGO_TERM_COLOR: always 11 | 12 | jobs: 13 | build: 14 | runs-on: ubuntu-latest 15 | 16 | steps: 17 | - uses: actions/checkout@v2 18 | - name: Install rust toolchain 19 | uses: actions-rs/toolchain@v1 20 | with: 21 | components: rustfmt, clippy 22 | - name: Format Check 23 | run: cargo fmt --all -- --check 24 | - name: Clippy Check 25 | run: cargo clippy --all-targets --all-features -- -D warnings 26 | - name: Build 27 | run: cargo build --verbose 28 | - name: Run Tests 29 | run: cargo test --verbose 30 | -------------------------------------------------------------------------------- /src/generator/uniform_long_generator.rs: -------------------------------------------------------------------------------- 1 | use super::{Generator, NumberGenerator}; 2 | use rand::prelude::*; 3 | 4 | pub struct UniformLongGenerator { 5 | lower_bound: u64, 6 | upper_bound: u64, 7 | } 8 | 9 | impl UniformLongGenerator { 10 | pub fn new(lower_bound: u64, upper_bound: u64) -> Self { 11 | Self { 12 | lower_bound, 13 | upper_bound, 14 | } 15 | } 16 | } 17 | 18 | impl Generator for UniformLongGenerator { 19 | fn next_value(&self, rng: &mut SmallRng) -> u64 { 20 | rng.gen_range(self.lower_bound..=self.upper_bound) 21 | } 22 | } 23 | 24 | impl NumberGenerator for UniformLongGenerator { 25 | fn mean(&self) -> u64 { 26 | (self.lower_bound + self.upper_bound) / 2 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /src/generator.rs: -------------------------------------------------------------------------------- 1 | mod acknowledged_counter_generator; 2 | mod constant_generator; 3 | mod counter_generator; 4 | mod discrete_generator; 5 | mod uniform_long_generator; 6 | mod zipfian_generator; 7 | 8 | pub use acknowledged_counter_generator::AcknowledgedCounterGenerator; 9 | pub use constant_generator::ConstantGenerator; 10 | pub use counter_generator::CounterGenerator; 11 | pub use discrete_generator::{DiscreteGenerator, WeightPair}; 12 | use rand::prelude::SmallRng; 13 | pub use uniform_long_generator::UniformLongGenerator; 14 | pub use zipfian_generator::ZipfianGenerator; 15 | 16 | use std::string::ToString; 17 | 18 | pub trait Generator { 19 | fn next_value(&self, rng: &mut SmallRng) -> T; 20 | } 21 | 22 | pub trait NumberGenerator: Generator { 23 | fn mean(&self) -> T; 24 | } 25 | 26 | pub struct GeneratorImpl> { 27 | last_value: Option, 28 | generator: G, 29 | } 30 | 31 | impl GeneratorImpl 32 | where 33 | G: Generator, 34 | T: ToString + Clone + Send, 35 | { 36 | pub fn new(generator: G) -> Self { 37 | Self { 38 | generator, 39 | last_value: None, 40 | } 41 | } 42 | 43 | pub fn next_value(&mut self, rng: &mut SmallRng) -> T { 44 | let v = self.generator.next_value(rng); 45 | self.last_value = Some(v.clone()); 46 | v 47 | } 48 | 49 | pub fn last_value(&self) -> T { 50 | self.last_value.clone().unwrap() 51 | } 52 | 53 | pub fn next_string(&mut self, rng: &mut SmallRng) -> String { 54 | self.next_value(rng).to_string() 55 | } 56 | 57 | pub fn last_string(&self) -> String { 58 | self.last_value().to_string() 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /src/generator/discrete_generator.rs: -------------------------------------------------------------------------------- 1 | use super::Generator; 2 | use rand::prelude::*; 3 | 4 | pub struct WeightPair { 5 | weight: f64, 6 | value: T, 7 | } 8 | 9 | impl WeightPair { 10 | pub fn new(weight: f64, value: impl Into) -> Self { 11 | Self { 12 | weight, 13 | value: value.into(), 14 | } 15 | } 16 | } 17 | 18 | pub struct DiscreteGenerator { 19 | values: Vec>, 20 | sum: f64, 21 | } 22 | 23 | impl DiscreteGenerator { 24 | pub fn new(values: Vec>) -> Self { 25 | let mut sum = 0.0; 26 | for WeightPair { weight, .. } in &values { 27 | sum += *weight; 28 | } 29 | Self { values, sum } 30 | } 31 | } 32 | 33 | impl Generator for DiscreteGenerator { 34 | fn next_value(&self, rng: &mut SmallRng) -> T { 35 | let mut val = rng.gen::(); 36 | for WeightPair { weight, value } in &self.values { 37 | let pw = *weight / self.sum; 38 | if val < pw { 39 | return value.clone(); 40 | } 41 | val -= pw; 42 | } 43 | unreachable!(); 44 | } 45 | } 46 | 47 | #[cfg(test)] 48 | mod tests { 49 | use super::*; 50 | 51 | #[test] 52 | fn test_discrete_generator() { 53 | let weight_pairs = vec![WeightPair::new(0.3, "test"), WeightPair::new(0.7, "b")]; 54 | let generator = DiscreteGenerator::::new(weight_pairs); 55 | let mut result = std::collections::HashMap::new(); 56 | let mut rng = SmallRng::from_entropy(); 57 | for _i in 0..10000 { 58 | let val = generator.next_value(&mut rng); 59 | result.entry(val).and_modify(|x| *x += 1).or_insert(1); 60 | } 61 | println!("{:?}", result); 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /src/sqlite.rs: -------------------------------------------------------------------------------- 1 | use crate::db::DB; 2 | 3 | use anyhow::Result; 4 | use sql_builder::SqlBuilder; 5 | use sqlite::{Connection, OpenFlags, State}; 6 | use std::collections::HashMap; 7 | 8 | const PRIMARY_KEY: &str = "y_id"; 9 | 10 | pub struct SQLite { 11 | conn: Connection, 12 | } 13 | 14 | impl SQLite { 15 | pub fn new() -> Result { 16 | let flags = OpenFlags::new().set_read_write().set_no_mutex(); 17 | let mut conn = Connection::open_with_flags("test.db", flags)?; 18 | conn.set_busy_timeout(5000)?; 19 | Ok(SQLite { conn }) 20 | } 21 | } 22 | 23 | impl DB for SQLite { 24 | fn init(&self) -> Result<()> { 25 | Ok(()) 26 | } 27 | 28 | fn insert(&self, table: &str, key: &str, values: &HashMap<&str, String>) -> Result<()> { 29 | // TODO: cache prepared statement 30 | let mut sql = SqlBuilder::insert_into(table); 31 | let mut vals: Vec = Vec::new(); 32 | sql.field(PRIMARY_KEY); 33 | vals.push(format!(":{}", PRIMARY_KEY)); 34 | for key in values.keys() { 35 | sql.field(key); 36 | let marker = format!(":{}", key); 37 | vals.push(marker); 38 | } 39 | sql.values(&vals); 40 | let sql = sql.sql()?; 41 | let mut stmt = self.conn.prepare(sql)?; 42 | let marker = format!(":{}", PRIMARY_KEY); 43 | stmt.bind_by_name(&marker, key)?; 44 | for (key, value) in values { 45 | let marker = format!(":{}", key); 46 | stmt.bind_by_name(&marker, &value[..])?; 47 | } 48 | let state = stmt.next()?; 49 | assert!(state == State::Done); 50 | Ok(()) 51 | } 52 | 53 | fn read(&self, table: &str, key: &str, result: &mut HashMap) -> Result<()> { 54 | // TODO: cache prepared statement 55 | let mut sql = SqlBuilder::select_from(table); 56 | sql.field("*"); 57 | // TODO: fields 58 | sql.and_where(format!("{} = :{}", PRIMARY_KEY, PRIMARY_KEY)); 59 | let sql = sql.sql()?; 60 | let mut stmt = self.conn.prepare(sql)?; 61 | let marker = format!(":{}", PRIMARY_KEY); 62 | stmt.bind_by_name(&marker, key)?; 63 | while let State::Row = stmt.next().unwrap() { 64 | for idx in 0..stmt.column_count() { 65 | let key = stmt.column_name(idx); 66 | let value = stmt.read::(idx).unwrap(); 67 | result.insert(key.to_string(), value); 68 | } 69 | } 70 | // TODO: results 71 | Ok(()) 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /src/properties.rs: -------------------------------------------------------------------------------- 1 | use serde::Deserialize; 2 | 3 | fn zero_u64() -> u64 { 4 | 0 5 | } 6 | 7 | fn thread_count_default() -> u64 { 8 | 200 9 | } 10 | 11 | fn field_length_distribution_default() -> String { 12 | "constant".to_string() 13 | } 14 | 15 | fn request_distribution_default() -> String { 16 | "uniform".to_string() 17 | } 18 | 19 | fn field_length_default() -> u64 { 20 | 100 21 | } 22 | 23 | fn read_proportion_default() -> f64 { 24 | 0.95 25 | } 26 | 27 | fn update_proportion_default() -> f64 { 28 | 0.95 29 | } 30 | 31 | fn insert_proportion_default() -> f64 { 32 | 0.0 33 | } 34 | 35 | fn scan_proportion_default() -> f64 { 36 | 0.0 37 | } 38 | 39 | fn read_modify_write_proportion_default() -> f64 { 40 | 0.0 41 | } 42 | 43 | #[derive(Deserialize, Debug)] 44 | pub struct Properties { 45 | #[serde(default = "zero_u64", rename = "insertstart")] 46 | pub insert_start: u64, 47 | #[serde(default = "zero_u64", rename = "insertcount")] 48 | pub insert_count: u64, 49 | #[serde(rename = "operationcount")] 50 | pub operation_count: u64, 51 | #[serde(default = "zero_u64", rename = "recordcount")] 52 | pub record_count: u64, 53 | #[serde(default = "thread_count_default", rename = "threacount")] 54 | pub thread_count: u64, 55 | #[serde(rename = "maxexecutiontime")] 56 | pub max_execution_time: Option, 57 | #[serde(rename = "warmuptime")] 58 | pub warmup_time: Option, 59 | // field length 60 | #[serde( 61 | default = "field_length_distribution_default", 62 | rename = "fieldlengthdistribution" 63 | )] 64 | pub field_length_distribution: String, 65 | #[serde( 66 | default = "request_distribution_default", 67 | rename = "requestdistribution" 68 | )] 69 | pub request_distribution: String, 70 | #[serde(default = "field_length_default", rename = "fieldlength")] 71 | pub field_length: u64, 72 | 73 | // read, update, insert, scan, read-modify-write 74 | #[serde(default = "read_proportion_default", rename = "readproportion")] 75 | pub read_proportion: f64, 76 | #[serde(default = "update_proportion_default", rename = "updateproportion")] 77 | pub update_proportion: f64, 78 | #[serde(default = "insert_proportion_default", rename = "insertproportion")] 79 | pub insert_proportion: f64, 80 | #[serde(default = "scan_proportion_default", rename = "scanproportion")] 81 | pub scan_proportion: f64, 82 | #[serde( 83 | default = "read_modify_write_proportion_default", 84 | rename = "readmodifywriteproportion" 85 | )] 86 | pub read_modify_write_proportion: f64, 87 | } 88 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | use crate::db::DB; 2 | use crate::workload::Workload; 3 | use anyhow::{bail, Result}; 4 | use properties::Properties; 5 | use std::fs; 6 | use std::rc::Rc; 7 | use std::sync::Arc; 8 | use std::thread; 9 | use std::time::Instant; 10 | use structopt::StructOpt; 11 | use workload::CoreWorkload; 12 | 13 | pub mod db; 14 | pub mod generator; 15 | pub mod properties; 16 | pub mod sqlite; 17 | pub mod workload; 18 | 19 | #[derive(StructOpt, Debug)] 20 | #[structopt(name = "ycsb")] 21 | struct Opt { 22 | #[structopt(name = "COMMANDS")] 23 | commands: Vec, 24 | #[structopt(short, long)] 25 | database: String, 26 | #[structopt(short, long)] 27 | workload: String, 28 | #[structopt(short, long, default_value = "1")] 29 | threads: usize, 30 | } 31 | 32 | fn load(wl: Arc, db: Rc, operation_count: usize) { 33 | for _ in 0..operation_count { 34 | wl.do_insert(db.clone()); 35 | } 36 | } 37 | 38 | fn run(wl: Arc, db: Rc, operation_count: usize) { 39 | for _ in 0..operation_count { 40 | wl.do_transaction(db.clone()); 41 | } 42 | } 43 | 44 | fn main() -> Result<()> { 45 | let opt = Opt::from_args(); 46 | 47 | let raw_props = fs::read_to_string(&opt.workload)?; 48 | 49 | let props: Properties = toml::from_str(&raw_props)?; 50 | 51 | let props = Arc::new(props); 52 | 53 | let wl = Arc::new(CoreWorkload::new(&props)); 54 | 55 | if opt.commands.is_empty() { 56 | bail!("no command specified"); 57 | } 58 | 59 | let database = opt.database.clone(); 60 | let thread_operation_count = props.operation_count as usize / opt.threads; 61 | for cmd in opt.commands { 62 | let start = Instant::now(); 63 | let mut threads = vec![]; 64 | for _ in 0..opt.threads { 65 | let database = database.clone(); 66 | let wl = wl.clone(); 67 | let cmd = cmd.clone(); 68 | threads.push(thread::spawn(move || { 69 | let db = db::create_db(&database).unwrap(); 70 | 71 | db.init().unwrap(); 72 | 73 | match &cmd[..] { 74 | "load" => load(wl.clone(), db, thread_operation_count as usize), 75 | "run" => run(wl.clone(), db, thread_operation_count as usize), 76 | cmd => panic!("invalid command: {}", cmd), 77 | }; 78 | })); 79 | } 80 | for t in threads { 81 | let _ = t.join(); 82 | } 83 | let runtime = start.elapsed().as_millis(); 84 | println!("[OVERALL], ThreadCount, {}", opt.threads); 85 | println!("[OVERALL], RunTime(ms), {}", runtime); 86 | let throughput = props.operation_count as f64 / (runtime as f64 / 1000.0); 87 | println!("[OVERALL], Throughput(ops/sec), {}", throughput); 88 | } 89 | 90 | Ok(()) 91 | } 92 | -------------------------------------------------------------------------------- /workloads/workloada.toml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010 Yahoo! Inc. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you 4 | # may not use this file except in compliance with the License. You 5 | # may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 12 | # implied. See the License for the specific language governing 13 | # permissions and limitations under the License. See accompanying 14 | # LICENSE file. 15 | 16 | # Yahoo! Cloud System Benchmark 17 | # Workload A: Update heavy workload 18 | # Application example: Session store recording recent actions 19 | # 20 | # Read/update ratio: 50/50 21 | # Default data size: 1 KB records (10 fields, 100 bytes each, plus key) 22 | # Request distribution: zipfian 23 | 24 | operationcount = 1000 25 | recordcount = 1000 26 | workload = "core" 27 | 28 | readallfields = true 29 | 30 | insertproportion = 0 31 | readproportion = 0.5 32 | scanproportion = 0 33 | updateproportion = 0.5 34 | 35 | requestdistribution = "uniform" 36 | -------------------------------------------------------------------------------- /src/generator/acknowledged_counter_generator.rs: -------------------------------------------------------------------------------- 1 | use super::{CounterGenerator, Generator}; 2 | use rand::prelude::*; 3 | use std::sync::{ 4 | atomic::{AtomicBool, AtomicU64, Ordering}, 5 | Mutex, 6 | }; 7 | 8 | const WINDOW_SIZE: u64 = 1 << 20; 9 | const WINDOW_MASK: u64 = WINDOW_SIZE - 1; 10 | 11 | pub struct AcknowledgedCounterGenerator { 12 | counter: CounterGenerator, 13 | window: Vec, 14 | limit: AtomicU64, 15 | core: Mutex<()>, 16 | } 17 | 18 | impl AcknowledgedCounterGenerator { 19 | pub fn new(count_start: u64) -> Self { 20 | let counter = CounterGenerator::new(count_start); 21 | let mut window = Vec::with_capacity(WINDOW_SIZE as usize); 22 | for _i in 0..WINDOW_SIZE { 23 | window.push(AtomicBool::new(false)); 24 | } 25 | Self { 26 | counter, 27 | window, 28 | limit: AtomicU64::new(count_start - 1), 29 | core: Mutex::new(()), 30 | } 31 | } 32 | 33 | pub fn acknowledge(&self, value: u64) { 34 | let current_slot = value & WINDOW_MASK; 35 | let slot = &self.window[current_slot as usize]; 36 | if slot.swap(true, Ordering::SeqCst) { 37 | panic!("too many unacknowledged requests"); 38 | } 39 | if let Ok(_lock) = self.core.try_lock() { 40 | let limit = self.limit.load(Ordering::SeqCst); 41 | let before_first_slot = limit & WINDOW_MASK; 42 | let mut index = limit + 1; 43 | let new_index = loop { 44 | if index != before_first_slot { 45 | let slot = (index & WINDOW_MASK) as usize; 46 | if !self.window[slot].load(Ordering::SeqCst) { 47 | break index; 48 | } 49 | self.window[slot].store(false, Ordering::SeqCst); 50 | } else { 51 | break index; 52 | } 53 | index += 1; 54 | }; 55 | self.limit.store(new_index - 1, Ordering::SeqCst); 56 | } 57 | } 58 | 59 | pub fn last_value(&self) -> u64 { 60 | self.limit.load(Ordering::SeqCst) 61 | } 62 | } 63 | 64 | impl Generator for AcknowledgedCounterGenerator { 65 | fn next_value(&self, rng: &mut SmallRng) -> u64 { 66 | self.counter.next_value(rng) 67 | } 68 | } 69 | 70 | #[cfg(test)] 71 | mod tests { 72 | use super::*; 73 | 74 | #[test] 75 | fn test_counter() { 76 | let generator = AcknowledgedCounterGenerator::new(1); 77 | let mut rng = SmallRng::from_entropy(); 78 | assert_eq!(generator.next_value(&mut rng), 1); 79 | assert_eq!(generator.last_value(), 0); 80 | assert_eq!(generator.next_value(&mut rng), 2); 81 | assert_eq!(generator.last_value(), 0); 82 | generator.acknowledge(1); 83 | assert_eq!(generator.last_value(), 1); 84 | generator.acknowledge(2); 85 | assert_eq!(generator.last_value(), 2); 86 | generator.acknowledge(1); 87 | assert_eq!(generator.last_value(), 2); 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /workloads/workloadc.toml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010 Yahoo! Inc. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you 4 | # may not use this file except in compliance with the License. You 5 | # may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 12 | # implied. See the License for the specific language governing 13 | # permissions and limitations under the License. See accompanying 14 | # LICENSE file. 15 | 16 | # Yahoo! Cloud System Benchmark 17 | # Workload C: Read only 18 | # Application example: user profile cache, where profiles are constructed elsewhere (e.g., Hadoop) 19 | # 20 | # Read/update ratio: 100/0 21 | # Default data size: 1 KB records (10 fields, 100 bytes each, plus key) 22 | # Request distribution: zipfian 23 | 24 | operationcount = 1000 25 | recordcount = 1000 26 | workload = "core" 27 | 28 | readallfields = true 29 | 30 | insertproportion = 0 31 | readproportion = 1 32 | scanproportion = 0 33 | updateproportion = 0 34 | 35 | requestdistribution = "uniform" 36 | -------------------------------------------------------------------------------- /workloads/workloadb.toml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010 Yahoo! Inc. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you 4 | # may not use this file except in compliance with the License. You 5 | # may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 12 | # implied. See the License for the specific language governing 13 | # permissions and limitations under the License. See accompanying 14 | # LICENSE file. 15 | 16 | # Yahoo! Cloud System Benchmark 17 | # Workload B: Read mostly workload 18 | # Application example: photo tagging; add a tag is an update, but most operations are to read tags 19 | # 20 | # Read/update ratio: 95/5 21 | # Default data size: 1 KB records (10 fields, 100 bytes each, plus key) 22 | # Request distribution: zipfian 23 | 24 | operationcount = 1000 25 | recordcount = 1000 26 | workload = "core" 27 | 28 | readallfields = true 29 | 30 | insertproportion = 0 31 | readproportion = 0.95 32 | scanproportion = 0 33 | updateproportion = 0.05 34 | 35 | requestdistribution = "uniform" 36 | -------------------------------------------------------------------------------- /workloads/workloadf.toml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010 Yahoo! Inc. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you 4 | # may not use this file except in compliance with the License. You 5 | # may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 12 | # implied. See the License for the specific language governing 13 | # permissions and limitations under the License. See accompanying 14 | # LICENSE file. 15 | 16 | # Yahoo! Cloud System Benchmark 17 | # Workload F: Read-modify-write workload 18 | # Application example: user database, where user records are read and modified by the user or to record user activity. 19 | # 20 | # Read/read-modify-write ratio: 50/50 21 | # Default data size: 1 KB records (10 fields, 100 bytes each, plus key) 22 | # Request distribution: zipfian 23 | 24 | operationcount = 1000 25 | recordcount = 1000 26 | workload = "core" 27 | 28 | readallfields = true 29 | 30 | insertproportion = 0 31 | readmodifywriteproportion = 0.5 32 | readproportion = 0.5 33 | scanproportion = 0 34 | updateproportion = 0 35 | 36 | requestdistribution = "uniform" 37 | -------------------------------------------------------------------------------- /workloads/workloadd.toml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010 Yahoo! Inc. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you 4 | # may not use this file except in compliance with the License. You 5 | # may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 12 | # implied. See the License for the specific language governing 13 | # permissions and limitations under the License. See accompanying 14 | # LICENSE file. 15 | 16 | # Yahoo! Cloud System Benchmark 17 | # Workload D: Read latest workload 18 | # Application example: user status updates; people want to read the latest 19 | # 20 | # Read/update/insert ratio: 95/0/5 21 | # Default data size: 1 KB records (10 fields, 100 bytes each, plus key) 22 | # Request distribution: latest 23 | 24 | # The insert order for this is hashed, not ordered. The "latest" items may be 25 | # scattered around the keyspace if they are keyed by userid.timestamp. A workload 26 | # which orders items purely by time, and demands the latest, is very different than 27 | # workload here (which we believe is more typical of how people build systems.) 28 | 29 | operationcount = 1000 30 | recordcount = 1000 31 | workload = "core" 32 | 33 | readallfields = true 34 | 35 | insertproportion = 0.05 36 | readproportion = 0.95 37 | scanproportion = 0 38 | updateproportion = 0 39 | 40 | requestdistribution = "latest" 41 | -------------------------------------------------------------------------------- /workloads/workloade.toml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2010 Yahoo! Inc. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you 4 | # may not use this file except in compliance with the License. You 5 | # may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 12 | # implied. See the License for the specific language governing 13 | # permissions and limitations under the License. See accompanying 14 | # LICENSE file. 15 | 16 | # Yahoo! Cloud System Benchmark 17 | # Workload E: Short ranges 18 | # Application example: threaded conversations, where each scan is for the posts in a given thread (assumed to be clustered by thread id) 19 | # 20 | # Scan/insert ratio: 95/5 21 | # Default data size: 1 KB records (10 fields, 100 bytes each, plus key) 22 | # Request distribution: zipfian 23 | 24 | # The insert order is hashed, not ordered. Although the scans are ordered, it does not necessarily 25 | # follow that the data is inserted in order. For example, posts for thread 342 may not be inserted contiguously, but 26 | # instead interspersed with posts from lots of other threads. The way the YCSB client works is that it will pick a start 27 | # key, and then request a number of records; this works fine even for hashed insertion. 28 | 29 | operationcount = 1000 30 | recordcount = 1000 31 | workload = "core" 32 | 33 | readallfields = true 34 | 35 | insertproportion = 0.05 36 | readproportion = 0 37 | scanproportion = 0.95 38 | updateproportion = 0 39 | 40 | requestdistribution = "uniform" 41 | 42 | maxscanlength = 1 43 | 44 | scanlengthdistribution = "uniform" 45 | -------------------------------------------------------------------------------- /src/generator/zipfian_generator.rs: -------------------------------------------------------------------------------- 1 | use super::{Generator, NumberGenerator}; 2 | use rand::prelude::*; 3 | 4 | pub const ZIPFIAN_CONSTANT: f64 = 0.99; 5 | 6 | #[allow(dead_code)] 7 | struct ZipfianParameters { 8 | alpha: f64, 9 | zetan: f64, 10 | eta: f64, 11 | theta: f64, 12 | zeta2theta: f64, 13 | } 14 | 15 | #[allow(dead_code)] 16 | pub struct ZipfianGenerator { 17 | items: u64, 18 | base: u64, 19 | zipfian_constant: f64, 20 | zipfian_parameters: ZipfianParameters, 21 | count_for_zeta: u64, 22 | allow_item_count_decrease: bool, 23 | } 24 | 25 | fn zeta_4(st: u64, n: u64, theta: f64, initial_sum: f64) -> f64 { 26 | let mut sum = initial_sum; 27 | for i in st..n { 28 | sum += 1.0 / (i as f64 + 1.0).powf(theta); 29 | } 30 | sum 31 | } 32 | 33 | fn zeta_2(n: u64, theta: f64) -> f64 { 34 | zeta_4(0, n, theta, 0.0) 35 | } 36 | 37 | impl ZipfianGenerator { 38 | pub fn from_items(items: u64) -> Self { 39 | Self::from_range(0, items - 1) 40 | } 41 | 42 | pub fn from_range(min: u64, max: u64) -> Self { 43 | Self::from_range_const(min, max, ZIPFIAN_CONSTANT) 44 | } 45 | 46 | pub fn from_range_const(min: u64, max: u64, zipfian_constant: f64) -> Self { 47 | Self::new( 48 | min, 49 | max, 50 | zipfian_constant, 51 | zeta_2(max - min + 1, zipfian_constant), 52 | ) 53 | } 54 | 55 | pub fn new(min: u64, max: u64, zipfian_constant: f64, zetan: f64) -> Self { 56 | let theta = zipfian_constant; 57 | let zeta2theta = zeta_2(2, theta); 58 | let items = max - min + 1; 59 | let zipfian_parameters = ZipfianParameters { 60 | alpha: 1.0 / (1.0 - theta), 61 | zetan, 62 | eta: (1.0 - (2.0 / items as f64).powf(1.0 - theta)) / (1.0 - zeta2theta / zetan), 63 | theta, 64 | zeta2theta, 65 | }; 66 | Self { 67 | items, 68 | base: min, 69 | zipfian_constant, 70 | zipfian_parameters, 71 | count_for_zeta: items, 72 | allow_item_count_decrease: false, 73 | } 74 | } 75 | 76 | fn next_long(&self, item_count: u64, rng: &mut SmallRng) -> u64 { 77 | if item_count != self.count_for_zeta { 78 | /* 79 | if item_count > self.count_for_zeta { 80 | warn!("incrementally recomputing Zipfian distribtion (increase)"); 81 | self.zipfian_parameters.zetan = zeta_4( 82 | self.count_for_zeta, 83 | item_count, 84 | self.zipfian_parameters.theta, 85 | self.zipfian_parameters.zetan, 86 | ); 87 | } 88 | if item_count < self.count_for_zeta && self.allow_item_count_decrease { 89 | warn!("incrementally recomputing Zipfian distribtion (decrease). This is slow and should be avoided."); 90 | self.zipfian_parameters.zetan = zeta_2(item_count, self.zipfian_parameters.theta); 91 | } 92 | 93 | self.count_for_zeta = item_count; 94 | self.zipfian_parameters.eta = (1.0 95 | - (2.0 / self.items as f64).powf(1.0 - self.zipfian_parameters.theta)) 96 | / (1.0 - self.zipfian_parameters.zeta2theta / self.zipfian_parameters.zetan); 97 | */ 98 | todo!("change item count after creating zipfian is not yet supported"); 99 | } 100 | 101 | let u = rng.gen::(); 102 | let uz = u * self.zipfian_parameters.zetan; 103 | 104 | if uz < 1.0 { 105 | return self.base; 106 | } 107 | 108 | if uz < 1.0 + (0.5_f64).powf(self.zipfian_parameters.theta) { 109 | return self.base + 1; 110 | } 111 | 112 | self.base 113 | + (item_count as f64 114 | * (self.zipfian_parameters.eta * u - self.zipfian_parameters.eta + 1.0) 115 | .powf(self.zipfian_parameters.alpha)) as u64 116 | } 117 | } 118 | 119 | impl Generator for ZipfianGenerator { 120 | fn next_value(&self, rng: &mut SmallRng) -> u64 { 121 | self.next_long(self.items, rng) 122 | } 123 | } 124 | 125 | impl NumberGenerator for ZipfianGenerator { 126 | fn mean(&self) -> u64 { 127 | todo!("implement ZipfianGenerator::mean") 128 | } 129 | } 130 | 131 | #[cfg(test)] 132 | mod tests { 133 | use super::*; 134 | 135 | #[test] 136 | fn test_min_and_max_parameter() { 137 | let min = 5; 138 | let max = 10; 139 | let zipfian = ZipfianGenerator::from_range(min, max); 140 | let mut result = std::collections::HashMap::new(); 141 | let mut rng = SmallRng::from_entropy(); 142 | for _i in 0..100000 { 143 | let val = zipfian.next_value(&mut rng); 144 | assert!(val >= min); 145 | assert!(val <= max); 146 | result.entry(val).and_modify(|x| *x += 1).or_insert(1); 147 | } 148 | println!("{:?}", result); 149 | } 150 | } 151 | -------------------------------------------------------------------------------- /src/workload/core_workload.rs: -------------------------------------------------------------------------------- 1 | use crate::db::DB; 2 | use crate::workload::Workload; 3 | use rand::distributions::{Alphanumeric, DistString}; 4 | use rand::rngs::SmallRng; 5 | use rand::SeedableRng; 6 | use std::collections::HashMap; 7 | use std::rc::Rc; 8 | use std::sync::Mutex; 9 | 10 | use crate::generator::{ 11 | AcknowledgedCounterGenerator, ConstantGenerator, CounterGenerator, DiscreteGenerator, 12 | Generator, UniformLongGenerator, WeightPair, ZipfianGenerator, 13 | }; 14 | use crate::properties::Properties; 15 | 16 | #[derive(Copy, Clone, Debug)] 17 | pub enum CoreOperation { 18 | Read, 19 | Update, 20 | Insert, 21 | Scan, 22 | ReadModifyWrite, 23 | } 24 | 25 | impl std::fmt::Display for CoreOperation { 26 | fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 27 | write!(f, "{:?}", self) 28 | } 29 | } 30 | 31 | #[allow(dead_code)] 32 | pub struct CoreWorkload { 33 | rng: Mutex, 34 | table: String, 35 | field_count: u64, 36 | field_names: Vec, 37 | field_length_generator: Mutex + Send>>, 38 | read_all_fields: bool, 39 | write_all_fields: bool, 40 | data_integrity: bool, 41 | key_sequence: Mutex + Send>>, 42 | operation_chooser: Mutex>, 43 | key_chooser: Mutex + Send>>, 44 | //field_chooser: Box>, 45 | transaction_insert_key_sequence: Mutex, 46 | //scan_length: Box>, 47 | ordered_inserts: bool, 48 | record_count: usize, 49 | zero_padding: usize, 50 | insertion_retry_limit: u64, 51 | insertion_retry_interval: u64, 52 | } 53 | 54 | impl CoreWorkload { 55 | pub fn new(prop: &Properties) -> Self { 56 | let rng = SmallRng::from_entropy(); 57 | let field_name_prefix = "field"; 58 | let field_count = 10; 59 | let mut field_names = vec![]; 60 | for i in 0..field_count { 61 | field_names.push(format!("{}{}", field_name_prefix, i)); 62 | } 63 | CoreWorkload { 64 | rng: Mutex::new(rng), 65 | table: String::from("usertable"), 66 | field_count, 67 | field_names, 68 | field_length_generator: Mutex::new(get_field_length_generator(prop)), 69 | read_all_fields: true, 70 | write_all_fields: true, 71 | data_integrity: true, 72 | key_sequence: Mutex::new(Box::new(CounterGenerator::new(prop.insert_start))), 73 | operation_chooser: Mutex::new(create_operation_generator(prop)), 74 | key_chooser: Mutex::new(get_key_chooser_generator(prop)), 75 | //field_chooser: Box>, 76 | transaction_insert_key_sequence: Mutex::new(AcknowledgedCounterGenerator::new(1)), 77 | //scan_length: Box>, 78 | ordered_inserts: true, 79 | record_count: 1, 80 | zero_padding: 1, 81 | insertion_retry_limit: 0, 82 | insertion_retry_interval: 0, 83 | } 84 | } 85 | 86 | fn do_transaction_read(&self, db: Rc) { 87 | let keynum = self.next_key_num(); 88 | let dbkey = format!("{}", fnvhash64(keynum)); 89 | let mut result = HashMap::new(); 90 | db.read(&self.table, &dbkey, &mut result).unwrap(); 91 | // TODO: verify rows 92 | } 93 | 94 | fn next_key_num(&self) -> u64 { 95 | // FIXME: Handle case where keychooser is an ExponentialGenerator. 96 | // FIXME: Handle case where keynum is > transactioninsertkeysequence's last value 97 | self.key_chooser 98 | .lock() 99 | .unwrap() 100 | .next_value(&mut self.rng.lock().unwrap()) 101 | } 102 | } 103 | 104 | impl Workload for CoreWorkload { 105 | fn do_insert(&self, db: Rc) { 106 | let dbkey = self 107 | .key_sequence 108 | .lock() 109 | .unwrap() 110 | .next_value(&mut self.rng.lock().unwrap()); 111 | let dbkey = format!("{}", fnvhash64(dbkey)); 112 | let mut values = HashMap::new(); 113 | for field_name in &self.field_names { 114 | let field_len = self 115 | .field_length_generator 116 | .lock() 117 | .unwrap() 118 | .next_value(&mut self.rng.lock().unwrap()); 119 | let s = Alphanumeric 120 | .sample_string::(&mut self.rng.lock().unwrap(), field_len as usize); 121 | values.insert(&field_name[..], s); 122 | } 123 | db.insert(&self.table, &dbkey, &values).unwrap(); 124 | } 125 | 126 | fn do_transaction(&self, db: Rc) { 127 | let op = self 128 | .operation_chooser 129 | .lock() 130 | .unwrap() 131 | .next_value(&mut self.rng.lock().unwrap()); 132 | match op { 133 | CoreOperation::Read => { 134 | self.do_transaction_read(db); 135 | } 136 | _ => todo!(), 137 | } 138 | } 139 | } 140 | 141 | // http://en.wikipedia.org/wiki/Fowler_Noll_Vo_hash 142 | fn fnvhash64(val: u64) -> u64 { 143 | let mut val = val; 144 | let prime = 0xcbf29ce484222325; 145 | let mut hashval = prime; 146 | for _ in 0..8 { 147 | let octet = val & 0x00ff; 148 | val >>= 8; 149 | hashval ^= octet; 150 | hashval = hashval.wrapping_mul(prime); 151 | } 152 | hashval 153 | } 154 | 155 | fn get_field_length_generator(prop: &Properties) -> Box + Send> { 156 | match prop.field_length_distribution.to_lowercase().as_str() { 157 | "constant" => Box::new(ConstantGenerator::new(prop.field_length)), 158 | "uniform" => Box::new(UniformLongGenerator::new(1, prop.field_length)), 159 | "zipfian" => Box::new(ZipfianGenerator::from_range(1, prop.field_length)), 160 | "histogram" => unimplemented!(), 161 | _ => panic!( 162 | "unknown field length distribution {}", 163 | prop.field_length_distribution 164 | ), 165 | } 166 | } 167 | 168 | fn get_key_chooser_generator(prop: &Properties) -> Box + Send> { 169 | let insert_count = if prop.insert_count > 1 { 170 | prop.insert_count 171 | } else { 172 | prop.record_count - prop.insert_start 173 | }; 174 | assert!(insert_count > 1); 175 | match prop.request_distribution.to_lowercase().as_str() { 176 | "uniform" => Box::new(UniformLongGenerator::new( 177 | prop.insert_start, 178 | prop.insert_start + insert_count - 1, 179 | )), 180 | _ => todo!(), 181 | } 182 | } 183 | 184 | fn create_operation_generator(prop: &Properties) -> DiscreteGenerator { 185 | let mut pairs = vec![]; 186 | if prop.read_proportion > 0.0 { 187 | pairs.push(WeightPair::new(prop.read_proportion, CoreOperation::Read)); 188 | } 189 | if prop.update_proportion > 0.0 { 190 | pairs.push(WeightPair::new( 191 | prop.update_proportion, 192 | CoreOperation::Update, 193 | )); 194 | } 195 | if prop.insert_proportion > 0.0 { 196 | pairs.push(WeightPair::new( 197 | prop.insert_proportion, 198 | CoreOperation::Insert, 199 | )); 200 | } 201 | if prop.scan_proportion > 0.0 { 202 | pairs.push(WeightPair::new(prop.scan_proportion, CoreOperation::Scan)); 203 | } 204 | if prop.read_modify_write_proportion > 0.0 { 205 | pairs.push(WeightPair::new( 206 | prop.read_modify_write_proportion, 207 | CoreOperation::ReadModifyWrite, 208 | )); 209 | } 210 | 211 | DiscreteGenerator::new(pairs) 212 | } 213 | -------------------------------------------------------------------------------- /workloads/workload_template.toml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2012-2016 YCSB contributors. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you 4 | # may not use this file except in compliance with the License. You 5 | # may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 12 | # implied. See the License for the specific language governing 13 | # permissions and limitations under the License. See accompanying 14 | # LICENSE file. 15 | 16 | # Yahoo! Cloud System Benchmark 17 | # Workload Template: Default Values 18 | # 19 | # File contains all properties that can be set to define a 20 | # YCSB session. All properties are set to their default 21 | # value if one exists. If not, the property is commented 22 | # out. When a property has a finite number of settings, 23 | # the default is enabled and the alternates are shown in 24 | # comments below it. 25 | # 26 | # Use of most explained through comments in Client.java or 27 | # CoreWorkload.java or on the YCSB wiki page: 28 | # https://github.com/brianfrankcooper/YCSB/wiki/Core-Properties 29 | 30 | # The name of the workload class to use 31 | workload = "core" 32 | 33 | # There is no default setting for recordcount but it is 34 | # required to be set. 35 | # The number of records in the table to be inserted in 36 | # the load phase or the number of records already in the 37 | # table before the run phase. 38 | recordcount = 1000000 39 | 40 | # There is no default setting for operationcount but it is 41 | # required to be set. 42 | # The number of operations to use during the run phase. 43 | operationcount = 3000000 44 | 45 | # The number of thread. 46 | threadcount = 500 47 | 48 | # The number of insertions to do, if different from recordcount. 49 | # Used with insertstart to grow an existing table. 50 | #insertcount= 51 | 52 | # The offset of the first insertion 53 | insertstart = 0 54 | 55 | # The number of fields in a record 56 | fieldcount = 10 57 | 58 | # The size of each field (in bytes) 59 | fieldlength = 100 60 | 61 | # Should read all fields 62 | readallfields = true 63 | 64 | # Should write all fields on update 65 | writeallfields = false 66 | 67 | # The distribution used to choose the length of a field 68 | fieldlengthdistribution = "constant" 69 | #fieldlengthdistribution = "uniform" 70 | #fieldlengthdistribution = "zipfian" 71 | 72 | # What proportion of operations are reads 73 | readproportion = 0.95 74 | 75 | # What proportion of operations are updates 76 | updateproportion = 0.05 77 | 78 | # What proportion of operations are inserts 79 | insertproportion = 0 80 | 81 | # What proportion of operations read then modify a record 82 | readmodifywriteproportion = 0 83 | 84 | # What proportion of operations are scans 85 | scanproportion = 0 86 | 87 | # On a single scan, the maximum number of records to access 88 | maxscanlength = 1000 89 | 90 | # The distribution used to choose the number of records to access on a scan 91 | scanlengthdistribution = "uniform" 92 | #scanlengthdistribution = "zipfian" 93 | 94 | # Should records be inserted in order or pseudo-randomly 95 | insertorder = "hashed" 96 | #insertorder = "ordered" 97 | 98 | # The distribution of requests across the keyspace 99 | requestdistribution = "zipfian" 100 | #requestdistribution = "uniform" 101 | #requestdistribution = "latest" 102 | 103 | # Percentage of data items that constitute the hot set 104 | hotspotdatafraction = 0.2 105 | 106 | # Percentage of operations that access the hot set 107 | hotspotopnfraction = 0.8 108 | 109 | # Maximum execution time in seconds 110 | #maxexecutiontime= 111 | 112 | # The name of the database table to run queries against 113 | table = "usertable" 114 | 115 | # The column family of fields (required by some databases) 116 | #columnfamily= 117 | 118 | # How the latency measurements are presented 119 | measurementtype = "histogram" 120 | #measurementtype = "timeseries" 121 | #measurementtype = "raw" 122 | # When measurementtype is set to raw, measurements will be output 123 | # as RAW datapoints in the following csv format: 124 | # "operation, timestamp of the measurement, latency in us" 125 | # 126 | # Raw datapoints are collected in-memory while the test is running. Each 127 | # data point consumes about 50 bytes (including java object overhead). 128 | # For a typical run of 1 million to 10 million operations, this should 129 | # fit into memory most of the time. If you plan to do 100s of millions of 130 | # operations per run, consider provisioning a machine with larger RAM when using 131 | # the RAW measurement type, or split the run into multiple runs. 132 | # 133 | # Optionally, you can specify an output file to save raw datapoints. 134 | # Otherwise, raw datapoints will be written to stdout. 135 | # The output file will be appended to if it already exists, otherwise 136 | # a new output file will be created. 137 | #measurement.raw.output_file = /tmp/your_output_file_for_this_run 138 | 139 | # JVM Reporting. 140 | # 141 | # Measure JVM information over time including GC counts, max and min memory 142 | # used, max and min thread counts, max and min system load and others. This 143 | # setting must be enabled in conjunction with the "-s" flag to run the status 144 | # thread. Every "status.interval", the status thread will capture JVM 145 | # statistics and record the results. At the end of the run, max and mins will 146 | # be recorded. 147 | # measurement.trackjvm = false 148 | 149 | [histogram] 150 | # The range of latencies to track in the histogram (milliseconds) 151 | buckets = 1000 152 | 153 | [timeseries] 154 | # Granularity for time series (in milliseconds) 155 | granularity = 1000 156 | 157 | # Latency reporting. 158 | # 159 | # YCSB records latency of failed operations separately from successful ones. 160 | # Latency of all OK operations will be reported under their operation name, 161 | # such as [READ], [UPDATE], etc. 162 | # 163 | # For failed operations: 164 | # By default we don't track latency numbers of specific error status. 165 | # We just report latency of all failed operation under one measurement name 166 | # such as [READ-FAILED]. But optionally, user can configure to have either: 167 | # 1. Record and report latency for each and every error status code by 168 | # setting reportLatencyForEachError to true, or 169 | # 2. Record and report latency for a select set of error status codes by 170 | # providing a CSV list of Status codes via the "latencytrackederrors" 171 | # property. 172 | # reportlatencyforeacherror=false 173 | # latencytrackederrors="" 174 | 175 | # Insertion error retry for the core workload. 176 | # 177 | # By default, the YCSB core workload does not retry any operations. 178 | # However, during the load process, if any insertion fails, the entire 179 | # load process is terminated. 180 | # If a user desires to have more robust behavior during this phase, they can 181 | # enable retry for insertion by setting the following property to a positive 182 | # number. 183 | # core_workload_insertion_retry_limit = 0 184 | # 185 | # the following number controls the interval between retries (in seconds): 186 | # core_workload_insertion_retry_interval = 3 187 | 188 | # Distributed Tracing via Apache HTrace (http://htrace.incubator.apache.org/) 189 | # 190 | # Defaults to blank / no tracing 191 | # Below sends to a local file, sampling at 0.1% 192 | # 193 | # htrace.sampler.classes=ProbabilitySampler 194 | # htrace.sampler.fraction=0.001 195 | # htrace.span.receiver.classes=org.apache.htrace.core.LocalFileSpanReceiver 196 | # htrace.local.file.span.receiver.path=/some/path/to/local/file 197 | # 198 | # To capture all spans, use the AlwaysSampler 199 | # 200 | # htrace.sampler.classes=AlwaysSampler 201 | # 202 | # To send spans to an HTraced receiver, use the below and ensure 203 | # your classpath contains the htrace-htraced jar (i.e. when invoking the ycsb 204 | # command add -cp /path/to/htrace-htraced.jar) 205 | # 206 | # htrace.span.receiver.classes=org.apache.htrace.impl.HTracedSpanReceiver 207 | # htrace.htraced.receiver.address=example.com:9075 208 | # htrace.htraced.error.log.period.ms=10000 209 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /Cargo.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Cargo. 2 | # It is not intended for manual editing. 3 | version = 3 4 | 5 | [[package]] 6 | name = "ansi_term" 7 | version = "0.12.1" 8 | source = "registry+https://github.com/rust-lang/crates.io-index" 9 | checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" 10 | dependencies = [ 11 | "winapi", 12 | ] 13 | 14 | [[package]] 15 | name = "anyhow" 16 | version = "1.0.51" 17 | source = "registry+https://github.com/rust-lang/crates.io-index" 18 | checksum = "8b26702f315f53b6071259e15dd9d64528213b44d61de1ec926eca7715d62203" 19 | 20 | [[package]] 21 | name = "atty" 22 | version = "0.2.14" 23 | source = "registry+https://github.com/rust-lang/crates.io-index" 24 | checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" 25 | dependencies = [ 26 | "hermit-abi", 27 | "libc", 28 | "winapi", 29 | ] 30 | 31 | [[package]] 32 | name = "bitflags" 33 | version = "1.3.2" 34 | source = "registry+https://github.com/rust-lang/crates.io-index" 35 | checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" 36 | 37 | [[package]] 38 | name = "cc" 39 | version = "1.0.72" 40 | source = "registry+https://github.com/rust-lang/crates.io-index" 41 | checksum = "22a9137b95ea06864e018375b72adfb7db6e6f68cfc8df5a04d00288050485ee" 42 | 43 | [[package]] 44 | name = "cfg-if" 45 | version = "1.0.0" 46 | source = "registry+https://github.com/rust-lang/crates.io-index" 47 | checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" 48 | 49 | [[package]] 50 | name = "clap" 51 | version = "2.34.0" 52 | source = "registry+https://github.com/rust-lang/crates.io-index" 53 | checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" 54 | dependencies = [ 55 | "ansi_term", 56 | "atty", 57 | "bitflags", 58 | "strsim", 59 | "textwrap", 60 | "unicode-width", 61 | "vec_map", 62 | ] 63 | 64 | [[package]] 65 | name = "getrandom" 66 | version = "0.2.3" 67 | source = "registry+https://github.com/rust-lang/crates.io-index" 68 | checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" 69 | dependencies = [ 70 | "cfg-if", 71 | "libc", 72 | "wasi", 73 | ] 74 | 75 | [[package]] 76 | name = "heck" 77 | version = "0.3.3" 78 | source = "registry+https://github.com/rust-lang/crates.io-index" 79 | checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" 80 | dependencies = [ 81 | "unicode-segmentation", 82 | ] 83 | 84 | [[package]] 85 | name = "hermit-abi" 86 | version = "0.1.19" 87 | source = "registry+https://github.com/rust-lang/crates.io-index" 88 | checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" 89 | dependencies = [ 90 | "libc", 91 | ] 92 | 93 | [[package]] 94 | name = "lazy_static" 95 | version = "1.4.0" 96 | source = "registry+https://github.com/rust-lang/crates.io-index" 97 | checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" 98 | 99 | [[package]] 100 | name = "libc" 101 | version = "0.2.109" 102 | source = "registry+https://github.com/rust-lang/crates.io-index" 103 | checksum = "f98a04dce437184842841303488f70d0188c5f51437d2a834dc097eafa909a01" 104 | 105 | [[package]] 106 | name = "log" 107 | version = "0.4.14" 108 | source = "registry+https://github.com/rust-lang/crates.io-index" 109 | checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" 110 | dependencies = [ 111 | "cfg-if", 112 | ] 113 | 114 | [[package]] 115 | name = "pkg-config" 116 | version = "0.3.23" 117 | source = "registry+https://github.com/rust-lang/crates.io-index" 118 | checksum = "d1a3ea4f0dd7f1f3e512cf97bf100819aa547f36a6eccac8dbaae839eb92363e" 119 | 120 | [[package]] 121 | name = "ppv-lite86" 122 | version = "0.2.15" 123 | source = "registry+https://github.com/rust-lang/crates.io-index" 124 | checksum = "ed0cfbc8191465bed66e1718596ee0b0b35d5ee1f41c5df2189d0fe8bde535ba" 125 | 126 | [[package]] 127 | name = "proc-macro-error" 128 | version = "1.0.4" 129 | source = "registry+https://github.com/rust-lang/crates.io-index" 130 | checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" 131 | dependencies = [ 132 | "proc-macro-error-attr", 133 | "proc-macro2", 134 | "quote", 135 | "syn", 136 | "version_check", 137 | ] 138 | 139 | [[package]] 140 | name = "proc-macro-error-attr" 141 | version = "1.0.4" 142 | source = "registry+https://github.com/rust-lang/crates.io-index" 143 | checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" 144 | dependencies = [ 145 | "proc-macro2", 146 | "quote", 147 | "version_check", 148 | ] 149 | 150 | [[package]] 151 | name = "proc-macro2" 152 | version = "1.0.33" 153 | source = "registry+https://github.com/rust-lang/crates.io-index" 154 | checksum = "fb37d2df5df740e582f28f8560cf425f52bb267d872fe58358eadb554909f07a" 155 | dependencies = [ 156 | "unicode-xid", 157 | ] 158 | 159 | [[package]] 160 | name = "quote" 161 | version = "1.0.10" 162 | source = "registry+https://github.com/rust-lang/crates.io-index" 163 | checksum = "38bc8cc6a5f2e3655e0899c1b848643b2562f853f114bfec7be120678e3ace05" 164 | dependencies = [ 165 | "proc-macro2", 166 | ] 167 | 168 | [[package]] 169 | name = "rand" 170 | version = "0.8.4" 171 | source = "registry+https://github.com/rust-lang/crates.io-index" 172 | checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" 173 | dependencies = [ 174 | "libc", 175 | "rand_chacha", 176 | "rand_core", 177 | "rand_hc", 178 | ] 179 | 180 | [[package]] 181 | name = "rand_chacha" 182 | version = "0.3.1" 183 | source = "registry+https://github.com/rust-lang/crates.io-index" 184 | checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" 185 | dependencies = [ 186 | "ppv-lite86", 187 | "rand_core", 188 | ] 189 | 190 | [[package]] 191 | name = "rand_core" 192 | version = "0.6.3" 193 | source = "registry+https://github.com/rust-lang/crates.io-index" 194 | checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" 195 | dependencies = [ 196 | "getrandom", 197 | ] 198 | 199 | [[package]] 200 | name = "rand_hc" 201 | version = "0.3.1" 202 | source = "registry+https://github.com/rust-lang/crates.io-index" 203 | checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7" 204 | dependencies = [ 205 | "rand_core", 206 | ] 207 | 208 | [[package]] 209 | name = "serde" 210 | version = "1.0.130" 211 | source = "registry+https://github.com/rust-lang/crates.io-index" 212 | checksum = "f12d06de37cf59146fbdecab66aa99f9fe4f78722e3607577a5375d66bd0c913" 213 | dependencies = [ 214 | "serde_derive", 215 | ] 216 | 217 | [[package]] 218 | name = "serde_derive" 219 | version = "1.0.130" 220 | source = "registry+https://github.com/rust-lang/crates.io-index" 221 | checksum = "d7bc1a1ab1961464eae040d96713baa5a724a8152c1222492465b54322ec508b" 222 | dependencies = [ 223 | "proc-macro2", 224 | "quote", 225 | "syn", 226 | ] 227 | 228 | [[package]] 229 | name = "sql-builder" 230 | version = "3.1.1" 231 | source = "registry+https://github.com/rust-lang/crates.io-index" 232 | checksum = "b1008d95d2ec2d062959352527be30e10fec42a1aa5e5a48d990a5ff0fb9bdc0" 233 | dependencies = [ 234 | "anyhow", 235 | "thiserror", 236 | ] 237 | 238 | [[package]] 239 | name = "sqlite" 240 | version = "0.26.0" 241 | source = "registry+https://github.com/rust-lang/crates.io-index" 242 | checksum = "3fb1a534c07ec276fbbe0e55a1c00814d8563da3a2f4d9d9d4c802bd1278db6a" 243 | dependencies = [ 244 | "libc", 245 | "sqlite3-sys", 246 | ] 247 | 248 | [[package]] 249 | name = "sqlite3-src" 250 | version = "0.3.0" 251 | source = "registry+https://github.com/rust-lang/crates.io-index" 252 | checksum = "a260b07ce75a0644c6f5891f34f46db9869e731838e95293469ab17999abcfa3" 253 | dependencies = [ 254 | "cc", 255 | "pkg-config", 256 | ] 257 | 258 | [[package]] 259 | name = "sqlite3-sys" 260 | version = "0.13.0" 261 | source = "registry+https://github.com/rust-lang/crates.io-index" 262 | checksum = "04d2f028faeb14352df7934b4771806f60d61ce61be1928ec92396d7492e2e54" 263 | dependencies = [ 264 | "libc", 265 | "sqlite3-src", 266 | ] 267 | 268 | [[package]] 269 | name = "strsim" 270 | version = "0.8.0" 271 | source = "registry+https://github.com/rust-lang/crates.io-index" 272 | checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" 273 | 274 | [[package]] 275 | name = "structopt" 276 | version = "0.3.25" 277 | source = "registry+https://github.com/rust-lang/crates.io-index" 278 | checksum = "40b9788f4202aa75c240ecc9c15c65185e6a39ccdeb0fd5d008b98825464c87c" 279 | dependencies = [ 280 | "clap", 281 | "lazy_static", 282 | "structopt-derive", 283 | ] 284 | 285 | [[package]] 286 | name = "structopt-derive" 287 | version = "0.4.18" 288 | source = "registry+https://github.com/rust-lang/crates.io-index" 289 | checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" 290 | dependencies = [ 291 | "heck", 292 | "proc-macro-error", 293 | "proc-macro2", 294 | "quote", 295 | "syn", 296 | ] 297 | 298 | [[package]] 299 | name = "syn" 300 | version = "1.0.82" 301 | source = "registry+https://github.com/rust-lang/crates.io-index" 302 | checksum = "8daf5dd0bb60cbd4137b1b587d2fc0ae729bc07cf01cd70b36a1ed5ade3b9d59" 303 | dependencies = [ 304 | "proc-macro2", 305 | "quote", 306 | "unicode-xid", 307 | ] 308 | 309 | [[package]] 310 | name = "textwrap" 311 | version = "0.11.0" 312 | source = "registry+https://github.com/rust-lang/crates.io-index" 313 | checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" 314 | dependencies = [ 315 | "unicode-width", 316 | ] 317 | 318 | [[package]] 319 | name = "thiserror" 320 | version = "1.0.30" 321 | source = "registry+https://github.com/rust-lang/crates.io-index" 322 | checksum = "854babe52e4df1653706b98fcfc05843010039b406875930a70e4d9644e5c417" 323 | dependencies = [ 324 | "thiserror-impl", 325 | ] 326 | 327 | [[package]] 328 | name = "thiserror-impl" 329 | version = "1.0.30" 330 | source = "registry+https://github.com/rust-lang/crates.io-index" 331 | checksum = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b" 332 | dependencies = [ 333 | "proc-macro2", 334 | "quote", 335 | "syn", 336 | ] 337 | 338 | [[package]] 339 | name = "toml" 340 | version = "0.5.8" 341 | source = "registry+https://github.com/rust-lang/crates.io-index" 342 | checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" 343 | dependencies = [ 344 | "serde", 345 | ] 346 | 347 | [[package]] 348 | name = "unicode-segmentation" 349 | version = "1.8.0" 350 | source = "registry+https://github.com/rust-lang/crates.io-index" 351 | checksum = "8895849a949e7845e06bd6dc1aa51731a103c42707010a5b591c0038fb73385b" 352 | 353 | [[package]] 354 | name = "unicode-width" 355 | version = "0.1.9" 356 | source = "registry+https://github.com/rust-lang/crates.io-index" 357 | checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" 358 | 359 | [[package]] 360 | name = "unicode-xid" 361 | version = "0.2.2" 362 | source = "registry+https://github.com/rust-lang/crates.io-index" 363 | checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" 364 | 365 | [[package]] 366 | name = "vec_map" 367 | version = "0.8.2" 368 | source = "registry+https://github.com/rust-lang/crates.io-index" 369 | checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" 370 | 371 | [[package]] 372 | name = "version_check" 373 | version = "0.9.3" 374 | source = "registry+https://github.com/rust-lang/crates.io-index" 375 | checksum = "5fecdca9a5291cc2b8dcf7dc02453fee791a280f3743cb0905f8822ae463b3fe" 376 | 377 | [[package]] 378 | name = "wasi" 379 | version = "0.10.2+wasi-snapshot-preview1" 380 | source = "registry+https://github.com/rust-lang/crates.io-index" 381 | checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" 382 | 383 | [[package]] 384 | name = "winapi" 385 | version = "0.3.9" 386 | source = "registry+https://github.com/rust-lang/crates.io-index" 387 | checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" 388 | dependencies = [ 389 | "winapi-i686-pc-windows-gnu", 390 | "winapi-x86_64-pc-windows-gnu", 391 | ] 392 | 393 | [[package]] 394 | name = "winapi-i686-pc-windows-gnu" 395 | version = "0.4.0" 396 | source = "registry+https://github.com/rust-lang/crates.io-index" 397 | checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" 398 | 399 | [[package]] 400 | name = "winapi-x86_64-pc-windows-gnu" 401 | version = "0.4.0" 402 | source = "registry+https://github.com/rust-lang/crates.io-index" 403 | checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" 404 | 405 | [[package]] 406 | name = "ycsb" 407 | version = "0.0.0" 408 | dependencies = [ 409 | "anyhow", 410 | "log", 411 | "rand", 412 | "serde", 413 | "sql-builder", 414 | "sqlite", 415 | "structopt", 416 | "toml", 417 | ] 418 | --------------------------------------------------------------------------------