├── randread ├── .gitignore ├── maxprocs.txt └── main.go ├── populate ├── .gitignore ├── record-heap.sh ├── lmdb_txn_bench_test.go └── main.go ├── rw-bench ├── rw-bench ├── README.md └── bench.go ├── bench.go ├── .gitignore ├── rdb ├── dynflag.go ├── embedflag.go ├── snapshot.go ├── slice.go ├── options.go ├── options_write.go ├── options_read.go ├── cache.go ├── write_batch.go ├── util.go ├── checkpoint.go ├── LICENSE ├── filter_policy.go ├── options_block_based_table.go ├── iterator.go ├── db.go ├── rdbc.h └── rdbc.cc ├── extra ├── write_test.go ├── README.md └── read_test.go ├── README.md ├── run.sh ├── store ├── README.md ├── store.go └── store_test.go ├── LICENSE ├── bench_test.go ├── BENCH-rocks.txt └── write_benchmarks /randread/.gitignore: -------------------------------------------------------------------------------- 1 | /randread 2 | -------------------------------------------------------------------------------- /populate/.gitignore: -------------------------------------------------------------------------------- 1 | /populate 2 | tmp* 3 | -------------------------------------------------------------------------------- /rw-bench/rw-bench: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dgraph-io/badger-bench/HEAD/rw-bench/rw-bench -------------------------------------------------------------------------------- /bench.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | ) 6 | 7 | func main() { 8 | fmt.Println("badger-bench") 9 | } 10 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | data 2 | *.pprof 3 | *.prof 4 | *.svg 5 | badger-bench 6 | ramdisk 7 | tmp 8 | bench-tmp 9 | badger-bench.test 10 | logs/ -------------------------------------------------------------------------------- /rdb/dynflag.go: -------------------------------------------------------------------------------- 1 | // +build !embed 2 | 3 | package rdb 4 | 5 | // #cgo CXXFLAGS: -std=c++11 -O2 6 | // #cgo LDFLAGS: -L/usr/local/lib -lrocksdb -lstdc++ -lm -lz -lbz2 -lsnappy 7 | // #include 8 | // #include 9 | // #include "rdbc.h" 10 | import "C" 11 | -------------------------------------------------------------------------------- /extra/write_test.go: -------------------------------------------------------------------------------- 1 | package extra 2 | 3 | import ( 4 | "io/ioutil" 5 | "os" 6 | "testing" 7 | 8 | "github.com/dgraph-io/badger/y" 9 | ) 10 | 11 | // BenchmarkWrite gives us write speed to some drive. 12 | func BenchmarkWrite(b *testing.B) { 13 | f, err := ioutil.TempFile("/tmp/ramdisk", "table_") 14 | defer f.Close() 15 | defer os.Remove(f.Name()) 16 | 17 | y.Check(err) 18 | buf := make([]byte, b.N*1000) 19 | _, err = f.Write(buf) 20 | } 21 | -------------------------------------------------------------------------------- /populate/record-heap.sh: -------------------------------------------------------------------------------- 1 | bigest=0 2 | while true 3 | do 4 | in_use=$(curl -S -s -X GET http://localhost:8081/debug/vars | tr "," "\n" | grep HeapInuse | awk -F: '{print $2}') 5 | echo $in_use, $bigest 6 | if [ "$in_use" -gt "$bigest" ] 7 | then 8 | echo RECORD 9 | bigest=$in_use 10 | curl -sS -X GET http://localhost:8081/debug/pprof/heap > ${bigest}.prof 11 | fi 12 | sleep 10 13 | done -------------------------------------------------------------------------------- /rdb/embedflag.go: -------------------------------------------------------------------------------- 1 | // +build embed 2 | 3 | package rdb 4 | 5 | // #cgo CXXFLAGS: -std=c++11 6 | // #cgo CPPFLAGS: -I${SRCDIR}/../vendor/github.com/cockroachdb/c-lz4/internal/lib 7 | // #cgo CPPFLAGS: -I${SRCDIR}/../vendor/github.com/cockroachdb/c-rocksdb/internal/include 8 | // #cgo CPPFLAGS: -I${SRCDIR}/../vendor/github.com/cockroachdb/c-snappy/internal 9 | // #cgo LDFLAGS: -lstdc++ 10 | // #cgo darwin LDFLAGS: -Wl,-undefined -Wl,dynamic_lookup 11 | // #cgo !darwin LDFLAGS: -Wl,-unresolved-symbols=ignore-all -lrt 12 | import "C" 13 | 14 | import ( 15 | _ "github.com/cockroachdb/c-lz4" 16 | _ "github.com/cockroachdb/c-rocksdb" 17 | _ "github.com/cockroachdb/c-snappy" 18 | ) 19 | -------------------------------------------------------------------------------- /rw-bench/README.md: -------------------------------------------------------------------------------- 1 | To run: go test -v 2 | 3 | === RUN TestPutAndIterate 4 | Value size: 1024 5 | Num unique keys: 2855231 6 | rocks iteration time: 1.717578587s 7 | rocks time: 13.540254938s 8 | Num unique keys: 2855231 9 | badger iteration time: 1.231884332s 10 | badger time: 16.91380382s 11 | 12 | === RUN TestPutAndIterate 13 | Value size: 128 14 | Num unique keys: 2855316 15 | rocks iteration time: 1.641643682s 16 | rocks time: 9.72966573s 17 | Num unique keys: 2855316 18 | badger iteration time: 1.207451184s 19 | badger time: 13.3816506s 20 | 21 | === RUN TestPutAndIterate 22 | Value size: 0 23 | Num unique keys: 2855279 24 | rocks iteration time: 1.652722679s 25 | rocks time: 8.782233328s 26 | Num unique keys: 2855279 27 | badger iteration time: 2.109307851s 28 | badger time: 15.168629622s 29 | -------------------------------------------------------------------------------- /rdb/snapshot.go: -------------------------------------------------------------------------------- 1 | package rdb 2 | 3 | // #include 4 | // #include 5 | // #include "rdbc.h" 6 | import "C" 7 | 8 | // Snapshot provides a consistent view of read operations in a DB. 9 | type Snapshot struct { 10 | c *C.rdb_snapshot_t 11 | cDb *C.rdb_t 12 | } 13 | 14 | // NewNativeSnapshot creates a Snapshot object. 15 | func NewNativeSnapshot(c *C.rdb_snapshot_t, cDb *C.rdb_t) *Snapshot { 16 | return &Snapshot{c, cDb} 17 | } 18 | 19 | // Release removes the snapshot from the database's list of snapshots. 20 | func (s *Snapshot) Release() { 21 | C.rdb_release_snapshot(s.cDb, s.c) 22 | s.c, s.cDb = nil, nil 23 | } 24 | 25 | // NewSnapshot creates a new snapshot of the database. 26 | func (db *DB) NewSnapshot() *Snapshot { 27 | cSnap := C.rdb_create_snapshot(db.c) 28 | return NewNativeSnapshot(cSnap, db.c) 29 | } 30 | -------------------------------------------------------------------------------- /rdb/slice.go: -------------------------------------------------------------------------------- 1 | package rdb 2 | 3 | // #include 4 | // #include 5 | // #include "rdbc.h" 6 | import "C" 7 | import "unsafe" 8 | 9 | // Slice is used as a wrapper for non-copy values 10 | type Slice struct { 11 | data *C.char 12 | size C.size_t 13 | freed bool 14 | } 15 | 16 | // NewSlice returns a slice with the given data. 17 | func NewSlice(data *C.char, size C.size_t) *Slice { 18 | return &Slice{data, size, false} 19 | } 20 | 21 | // Data returns the data of the slice. 22 | func (s *Slice) Data() []byte { 23 | return charToByte(s.data, s.size) 24 | } 25 | 26 | // Size returns the size of the data. 27 | func (s *Slice) Size() int { 28 | return int(s.size) 29 | } 30 | 31 | // Free frees the slice data. 32 | func (s *Slice) Free() { 33 | if !s.freed { 34 | C.free(unsafe.Pointer(s.data)) 35 | s.freed = true 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /rdb/options.go: -------------------------------------------------------------------------------- 1 | package rdb 2 | 3 | // #include 4 | // #include 5 | // #include "rdbc.h" 6 | import "C" 7 | 8 | // Options represent all of the available options when opening a database with Open. 9 | type Options struct { 10 | c *C.rdb_options_t 11 | bbto *BlockBasedTableOptions 12 | } 13 | 14 | // NewDefaultOptions creates the default Options. 15 | func NewDefaultOptions() *Options { 16 | return NewNativeOptions(C.rdb_options_create()) 17 | } 18 | 19 | // NewNativeOptions creates a Options object. 20 | func NewNativeOptions(c *C.rdb_options_t) *Options { 21 | return &Options{c: c} 22 | } 23 | 24 | // SetCreateIfMissing specifies whether the database 25 | // should be created if it is missing. 26 | // Default: false 27 | func (opts *Options) SetCreateIfMissing(value bool) { 28 | C.rdb_options_set_create_if_missing(opts.c, boolToChar(value)) 29 | } 30 | 31 | // SetBlockBasedTableFactory sets the block based table factory. 32 | func (opts *Options) SetBlockBasedTableFactory(value *BlockBasedTableOptions) { 33 | opts.bbto = value 34 | C.rdb_options_set_block_based_table_factory(opts.c, value.c) 35 | } 36 | -------------------------------------------------------------------------------- /rdb/options_write.go: -------------------------------------------------------------------------------- 1 | package rdb 2 | 3 | // #include 4 | // #include 5 | // #include "rdbc.h" 6 | import "C" 7 | 8 | // WriteOptions represent all of the available options when writing to a 9 | // database. 10 | type WriteOptions struct { 11 | c *C.rdb_writeoptions_t 12 | } 13 | 14 | // NewDefaultWriteOptions creates a default WriteOptions object. 15 | func NewDefaultWriteOptions() *WriteOptions { 16 | return NewNativeWriteOptions(C.rdb_writeoptions_create()) 17 | } 18 | 19 | // NewNativeWriteOptions creates a WriteOptions object. 20 | func NewNativeWriteOptions(c *C.rdb_writeoptions_t) *WriteOptions { 21 | return &WriteOptions{c} 22 | } 23 | 24 | // SetSync sets the sync mode. If true, the write will be flushed 25 | // from the operating system buffer cache before the write is considered complete. 26 | // If this flag is true, writes will be slower. 27 | // Default: false 28 | func (opts *WriteOptions) SetSync(value bool) { 29 | C.rdb_writeoptions_set_sync(opts.c, boolToChar(value)) 30 | } 31 | 32 | // Destroy deallocates the WriteOptions object. 33 | func (opts *WriteOptions) Destroy() { 34 | C.rdb_writeoptions_destroy(opts.c) 35 | opts.c = nil 36 | } 37 | -------------------------------------------------------------------------------- /extra/README.md: -------------------------------------------------------------------------------- 1 | ``` 2 | $ go test -bench Read -count 3 3 | 4 | BenchmarkRead/mode=2,m=4-8 5 311218250 ns/op 5 | BenchmarkRead/mode=2,m=4-8 5 317086338 ns/op 6 | BenchmarkRead/mode=2,m=4-8 5 308295082 ns/op 7 | BenchmarkRead/mode=2,m=16-8 5 223310483 ns/op 8 | BenchmarkRead/mode=2,m=16-8 5 231452472 ns/op 9 | BenchmarkRead/mode=2,m=16-8 5 242802455 ns/op 10 | BenchmarkRead/mode=2,m=64-8 10 210723926 ns/op 11 | BenchmarkRead/mode=2,m=64-8 5 777420932 ns/op 12 | BenchmarkRead/mode=2,m=64-8 5 200855156 ns/op 13 | BenchmarkRead/mode=3,m=4-8 5 906213188 ns/op 14 | BenchmarkRead/mode=3,m=4-8 5 1105820741 ns/op 15 | BenchmarkRead/mode=3,m=4-8 2 527907341 ns/op 16 | BenchmarkRead/mode=3,m=16-8 3 393121910 ns/op 17 | BenchmarkRead/mode=3,m=16-8 3 378802854 ns/op 18 | BenchmarkRead/mode=3,m=16-8 3 398507217 ns/op 19 | BenchmarkRead/mode=3,m=64-8 3 379417432 ns/op 20 | BenchmarkRead/mode=3,m=64-8 3 373250323 ns/op 21 | BenchmarkRead/mode=3,m=64-8 3 392327897 ns/op 22 | 23 | ``` 24 | 25 | Loading into RAM has no visible advantage over mmap. Variance seems high. -------------------------------------------------------------------------------- /rdb/options_read.go: -------------------------------------------------------------------------------- 1 | package rdb 2 | 3 | // #include 4 | // #include 5 | // #include "rdbc.h" 6 | import "C" 7 | 8 | // ReadOptions represent all of the available options when reading from a 9 | // database. 10 | type ReadOptions struct { 11 | c *C.rdb_readoptions_t 12 | } 13 | 14 | // NewDefaultReadOptions creates a default ReadOptions object. 15 | func NewDefaultReadOptions() *ReadOptions { 16 | return NewNativeReadOptions(C.rdb_readoptions_create()) 17 | } 18 | 19 | // NewNativeReadOptions creates a ReadOptions object. 20 | func NewNativeReadOptions(c *C.rdb_readoptions_t) *ReadOptions { 21 | return &ReadOptions{c} 22 | } 23 | 24 | // Destroy deallocates the ReadOptions object. 25 | func (opts *ReadOptions) Destroy() { 26 | C.rdb_readoptions_destroy(opts.c) 27 | opts.c = nil 28 | } 29 | 30 | // SetFillCache specify whether the "data block"/"index block"/"filter block" 31 | // read for this iteration should be cached in memory? 32 | // Callers may wish to set this field to false for bulk scans. 33 | // Default: true 34 | func (opts *ReadOptions) SetFillCache(value bool) { 35 | C.rdb_readoptions_set_fill_cache(opts.c, boolToChar(value)) 36 | } 37 | 38 | // SetSnapshot updates the default read options to use the given snapshot. 39 | func (opts *ReadOptions) SetSnapshot(snapshot *Snapshot) { 40 | if snapshot == nil { 41 | C.rdb_readoptions_set_snapshot(opts.c, nil) 42 | return 43 | } 44 | C.rdb_readoptions_set_snapshot(opts.c, snapshot.c) 45 | } 46 | -------------------------------------------------------------------------------- /rdb/cache.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2017 Dgraph Labs, Inc. and Contributors 3 | * 4 | * This program is free software: you can redistribute it and/or modify 5 | * it under the terms of the GNU Affero General Public License as published by 6 | * the Free Software Foundation, either version 3 of the License, or 7 | * (at your option) any later version. 8 | * 9 | * This program is distributed in the hope that it will be useful, 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | * GNU Affero General Public License for more details. 13 | * 14 | * You should have received a copy of the GNU Affero General Public License 15 | * along with this program. If not, see . 16 | */ 17 | 18 | package rdb 19 | 20 | // #include 21 | // #include 22 | // #include "rdbc.h" 23 | import "C" 24 | 25 | // Cache is a cache used to store data read from data in memory. 26 | type Cache struct { 27 | c *C.rdb_cache_t 28 | } 29 | 30 | // NewLRUCache creates a new LRU Cache object with the capacity given. 31 | func NewLRUCache(capacity int) *Cache { 32 | return NewNativeCache(C.rdb_cache_create_lru(C.size_t(capacity))) 33 | } 34 | 35 | // NewNativeCache creates a Cache object. 36 | func NewNativeCache(c *C.rdb_cache_t) *Cache { 37 | return &Cache{c} 38 | } 39 | 40 | // Destroy deallocates the Cache object. 41 | func (c *Cache) Destroy() { 42 | C.rdb_cache_destroy(c.c) 43 | c.c = nil 44 | } 45 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Benchmarks for BadgerDB 2 | This repo contains the code for benchmarking [BadgerDB], along with detailed logs from previous benchmarking runs. 3 | 4 | [BadgerDB]:https://github.com/dgraph-io/badger 5 | 6 | ## Setting Up 7 | - Install rocksdb using steps here: https://github.com/facebook/rocksdb/blob/master/INSTALL.md 8 | 9 | ``` 10 | $ sudo apt-get update && sudo apt-get install libgflags-dev libsnappy-dev zlib1g-dev libbz2-dev liblz4-dev libzstd-dev 11 | $ wget https://github.com/facebook/rocksdb/archive/v5.1.4.tar.gz 12 | $ tar -xzvf v5.1.4.tar.gz 13 | $ cd rocksdb-5.1.4 14 | $ export USE_RTTI=1 && make shared_lib 15 | $ sudo make install-shared 16 | $ ldconfig # to update ld.so.cache 17 | ``` 18 | 19 | - Install badger bench 20 | 21 | ``` 22 | $ go get github.com/dgraph-io/badger-bench/... 23 | ``` 24 | 25 | - Run `go test -c` and make sure everything compiles. Refer to the benchmarking logs below for commands to run individual benchmarks. 26 | 27 | ## Benchmarking Logs and Blog Posts 28 | We have performed comprehensive benchmarks against RocksDB, BoltDB and LMDB. 29 | Detailed logs of all the steps are made available in this repo. Refer to the 30 | blog posts for graphs and other information. 31 | 32 | * [Benchmarking log for RocksDB](https://github.com/dgraph-io/badger-bench/blob/master/BENCH-rocks.txt) (link to [blog post](https://blog.dgraph.io/post/badger/)) 33 | * [Benchmarking log for BoltDB and LMDB](https://github.com/dgraph-io/badger-bench/blob/master/BENCH-lmdb-bolt.md) (link to [blog post](https://blog.dgraph.io/post/badger-lmdb-boltdb/)) 34 | 35 | -------------------------------------------------------------------------------- /rdb/write_batch.go: -------------------------------------------------------------------------------- 1 | package rdb 2 | 3 | // #include 4 | // #include 5 | // #include "rdbc.h" 6 | import "C" 7 | 8 | // WriteBatch is a batching of Puts, Merges and Deletes. 9 | type WriteBatch struct { 10 | c *C.rdb_writebatch_t 11 | } 12 | 13 | // NewWriteBatch create a WriteBatch object. 14 | func NewWriteBatch() *WriteBatch { 15 | return NewNativeWriteBatch(C.rdb_writebatch_create()) 16 | } 17 | 18 | // NewNativeWriteBatch create a WriteBatch object. 19 | func NewNativeWriteBatch(c *C.rdb_writebatch_t) *WriteBatch { 20 | return &WriteBatch{c} 21 | } 22 | 23 | // WriteBatchFrom creates a write batch from a serialized WriteBatch. 24 | func WriteBatchFrom(data []byte) *WriteBatch { 25 | return NewNativeWriteBatch(C.rdb_writebatch_create_from(byteToChar(data), C.size_t(len(data)))) 26 | } 27 | 28 | // Put queues a key-value pair. 29 | func (wb *WriteBatch) Put(key, value []byte) { 30 | cKey := byteToChar(key) 31 | cValue := byteToChar(value) 32 | C.rdb_writebatch_put(wb.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value))) 33 | } 34 | 35 | // Delete queues a deletion of the data at key. 36 | func (wb *WriteBatch) Delete(key []byte) { 37 | cKey := byteToChar(key) 38 | C.rdb_writebatch_delete(wb.c, cKey, C.size_t(len(key))) 39 | } 40 | 41 | // Count returns the number of updates in the batch. 42 | func (wb *WriteBatch) Count() int { 43 | return int(C.rdb_writebatch_count(wb.c)) 44 | } 45 | 46 | // Clear removes all the enqueued Put and Deletes. 47 | func (wb *WriteBatch) Clear() { 48 | C.rdb_writebatch_clear(wb.c) 49 | } 50 | 51 | // Destroy deallocates the WriteBatch object. 52 | func (wb *WriteBatch) Destroy() { 53 | C.rdb_writebatch_destroy(wb.c) 54 | wb.c = nil 55 | } 56 | -------------------------------------------------------------------------------- /run.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | #set -x 3 | 4 | export LD_LIBRARY_PATH=/usr/local/lib 5 | #keysMil=(250 75 5 1000) 6 | keysMil=(250) 7 | #valueSizes=(128 1024 16384 16) 8 | valueSizes=(128) 9 | 10 | for i in "${!keysMil[@]}"; do 11 | keyMil=${keysMil[$i]} 12 | valueSz=${valueSizes[$i]} 13 | echo "keyMil:$keyMil, valueSz:$valueSz" 14 | 15 | DATADIR=bench-data-$valueSz 16 | if [ ! -d "$DATADIR" ]; then 17 | mkdir $DATADIR 18 | fi 19 | 20 | populate --kv rocksdb --valsz $valueSz --keys_mil $keyMil --dir=$DATADIR | tee logs/populate-rocksdb-$valueSz.log 21 | populate --kv badger --valsz $valueSz --keys_mil $keyMil --dir=$DATADIR | tee logs/populate-badger-$valueSz.log 22 | 23 | echo "cleaning caches" 24 | echo 3 |sudo tee /proc/sys/vm/drop_caches 25 | sudo blockdev --flushbufs /dev/nvme0n1 26 | echo "benchmark random read" 27 | 28 | go test --bench BenchmarkReadRandomRocks --keys_mil $keyMil --valsz $valueSz --dir $DATADIR --timeout 10m --benchtime 3m -v|tee logs/randomread-rocksdb-$valueSz.log 29 | go test --bench BenchmarkReadRandomBadger --keys_mil $keyMil --valsz $valueSz --dir $DATADIR --timeout 10m --benchtime 3m -v|tee logs/randomread-badger-$valueSz.log 30 | go test --bench BenchmarkIterateRocks --keys_mil $keyMil --valsz $valueSz --dir $DATADIR --timeout 10m --cpuprofile logs/iterate-rocks-cpu-$valueSz.out -v|tee logs/iterate-rocks-$valueSz.log 31 | go test --bench BenchmarkIterateBadgerOnly --keys_mil $keyMil --valsz $valueSz --dir $DATADIR --timeout 10m --cpuprofile logs/iterate-badger-cpu-$valueSz.out -v|tee logs/iterate-badger-$valueSz.log 32 | go test --bench BenchmarkIterateBadgerWithValues --keys_mil $keyMil --valsz $valueSz --dir $DATADIR --timeout 10m --cpuprofile logs/iterate-badger-with-values-cpu-$valueSz.out -v|tee logs/iterate-badger-with-values-$valueSz.log 33 | done 34 | 35 | -------------------------------------------------------------------------------- /rdb/util.go: -------------------------------------------------------------------------------- 1 | package rdb 2 | 3 | import "C" 4 | import ( 5 | "reflect" 6 | "unsafe" 7 | ) 8 | 9 | // btoi converts a bool value to int. 10 | func btoi(b bool) int { 11 | if b { 12 | return 1 13 | } 14 | return 0 15 | } 16 | 17 | // boolToChar converts a bool value to C.uchar. 18 | func boolToChar(b bool) C.uchar { 19 | if b { 20 | return 1 21 | } 22 | return 0 23 | } 24 | 25 | // charToByte converts a *C.char to a byte slice. 26 | func charToByte(data *C.char, len C.size_t) []byte { 27 | var value []byte 28 | sH := (*reflect.SliceHeader)(unsafe.Pointer(&value)) 29 | sH.Cap, sH.Len, sH.Data = int(len), int(len), uintptr(unsafe.Pointer(data)) 30 | return value 31 | } 32 | 33 | // byteToChar returns *C.char from byte slice. 34 | func byteToChar(b []byte) *C.char { 35 | var c *C.char 36 | if len(b) > 0 { 37 | c = (*C.char)(unsafe.Pointer(&b[0])) 38 | } 39 | return c 40 | } 41 | 42 | // Go []byte to C string 43 | // The C string is allocated in the C heap using malloc. 44 | func cByteSlice(b []byte) *C.char { 45 | var c *C.char 46 | if len(b) > 0 { 47 | cData := C.malloc(C.size_t(len(b))) 48 | copy((*[1 << 24]byte)(cData)[0:len(b)], b) 49 | c = (*C.char)(cData) 50 | } 51 | return c 52 | } 53 | 54 | // stringToChar returns *C.char from string. 55 | func stringToChar(s string) *C.char { 56 | ptrStr := (*reflect.StringHeader)(unsafe.Pointer(&s)) 57 | return (*C.char)(unsafe.Pointer(ptrStr.Data)) 58 | } 59 | 60 | // charSlice converts a C array of *char to a []*C.char. 61 | func charSlice(data **C.char, len C.int) []*C.char { 62 | var value []*C.char 63 | sH := (*reflect.SliceHeader)(unsafe.Pointer(&value)) 64 | sH.Cap, sH.Len, sH.Data = int(len), int(len), uintptr(unsafe.Pointer(data)) 65 | return value 66 | } 67 | 68 | // sizeSlice converts a C array of size_t to a []C.size_t. 69 | func sizeSlice(data *C.size_t, len C.int) []C.size_t { 70 | var value []C.size_t 71 | sH := (*reflect.SliceHeader)(unsafe.Pointer(&value)) 72 | sH.Cap, sH.Len, sH.Data = int(len), int(len), uintptr(unsafe.Pointer(data)) 73 | return value 74 | } 75 | -------------------------------------------------------------------------------- /rdb/checkpoint.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2017 Dgraph Labs, Inc. and Contributors 3 | * 4 | * This program is free software: you can redistribute it and/or modify 5 | * it under the terms of the GNU Affero General Public License as published by 6 | * the Free Software Foundation, either version 3 of the License, or 7 | * (at your option) any later version. 8 | * 9 | * This program is distributed in the hope that it will be useful, 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | * GNU Affero General Public License for more details. 13 | * 14 | * You should have received a copy of the GNU Affero General Public License 15 | * along with this program. If not, see . 16 | */ 17 | 18 | package rdb 19 | 20 | // #include 21 | // #include 22 | // #include "rdbc.h" 23 | import "C" 24 | import ( 25 | "errors" 26 | "unsafe" 27 | ) 28 | 29 | // Checkpoint can be used to create openable snapshots. 30 | type Checkpoint struct { 31 | c *C.rdb_checkpoint_t 32 | cDb *C.rdb_t 33 | } 34 | 35 | // Destroy removes the snapshot from the database's list of snapshots. 36 | func (s *Checkpoint) Destroy() { 37 | C.rdb_destroy_checkpoint(s.c) 38 | s.c, s.cDb = nil, nil 39 | } 40 | 41 | // NewCheckpoint creates a new snapshot of the database. 42 | func (db *DB) NewCheckpoint() (*Checkpoint, error) { 43 | var cErr *C.char 44 | cCheck := C.rdb_create_checkpoint(db.c, &cErr) 45 | if cErr != nil { 46 | defer C.free(unsafe.Pointer(cErr)) 47 | return nil, errors.New(C.GoString(cErr)) 48 | } 49 | return &Checkpoint{ 50 | c: cCheck, 51 | cDb: db.c, 52 | }, nil 53 | } 54 | 55 | // Save builds openable snapshot of RocksDB on disk. 56 | // CAUTION: checkpointDir should not already exist. If so, nothing will happen. 57 | func (s *Checkpoint) Save(checkpointDir string) error { 58 | var ( 59 | cErr *C.char 60 | cDir = C.CString(checkpointDir) 61 | ) 62 | defer C.free(unsafe.Pointer(cDir)) 63 | C.rdb_open_checkpoint(s.c, cDir, &cErr) 64 | if cErr != nil { 65 | defer C.free(unsafe.Pointer(cErr)) 66 | return errors.New(C.GoString(cErr)) 67 | } 68 | return nil 69 | } 70 | -------------------------------------------------------------------------------- /extra/read_test.go: -------------------------------------------------------------------------------- 1 | package extra 2 | 3 | import ( 4 | "bytes" 5 | "flag" 6 | "fmt" 7 | // "io/ioutil" 8 | "math/rand" 9 | "os" 10 | "syscall" 11 | "testing" 12 | 13 | "github.com/dgraph-io/badger/y" 14 | ) 15 | 16 | func createFile(writeBuf []byte) string { 17 | filename := fmt.Sprintf("/tmp/rwbench_%16x", rand.Int63()) 18 | f, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE|syscall.O_DSYNC, 0666) 19 | y.Check(err) 20 | defer f.Close() 21 | _, err = f.Write(writeBuf) 22 | y.Check(err) 23 | return filename 24 | } 25 | 26 | const ( 27 | modeControl = iota 28 | modeDisk 29 | modeMmap 30 | modeRAM 31 | ) 32 | 33 | func BenchmarkRead(b *testing.B) { 34 | n := 64 << 20 35 | writeBuf := make([]byte, n) 36 | 37 | // for _, mode := range []int{modeControl, modeDisk, modeMmap, modeRAM} { 38 | for _, mode := range []int{modeMmap, modeRAM} { 39 | var mmap []byte 40 | if mode == modeRAM { 41 | mmap = make([]byte, n) // Don't count the time to make this. 42 | } 43 | for _, m := range []int{1 << 2, 1 << 4, 1 << 6} { 44 | b.Run(fmt.Sprintf("mode=%d,m=%d", mode, m), func(b *testing.B) { 45 | y.AssertTruef((n%m) == 0, "%d %d", n, m) 46 | 47 | b.ResetTimer() 48 | for j := 0; j < b.N; j++ { 49 | func() { 50 | filename := createFile(writeBuf) 51 | defer os.Remove(filename) 52 | if mode == modeControl { 53 | return 54 | } 55 | 56 | // Measure time to open and read the whole file. 57 | f, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE|syscall.O_DSYNC, 0666) 58 | y.Check(err) 59 | defer f.Close() 60 | 61 | if mode == modeMmap { 62 | mmap, err = syscall.Mmap(int(f.Fd()), 0, n, 63 | syscall.PROT_READ, syscall.MAP_PRIVATE|syscall.MAP_POPULATE) 64 | y.Check(err) 65 | } else if mode == modeRAM { 66 | f.ReadAt(mmap, 0) 67 | } 68 | 69 | readBuf := make([]byte, m) 70 | numIters := n / m 71 | var written int 72 | if mode == modeDisk { 73 | for i := 0; i < numIters; i++ { 74 | k, err := f.Read(readBuf) 75 | y.Check(err) 76 | written += k 77 | } 78 | } else { 79 | in := bytes.NewBuffer(mmap) 80 | for i := 0; i < numIters; i++ { 81 | k, err := in.Read(readBuf) 82 | y.Check(err) 83 | written += k 84 | } 85 | } 86 | y.AssertTruef(written == n, "%d %d", written, n) 87 | 88 | if mode == modeMmap { 89 | y.Check(syscall.Munmap(mmap)) 90 | } 91 | }() 92 | } 93 | 94 | }) 95 | } 96 | } 97 | 98 | } 99 | 100 | func TestMain(m *testing.M) { 101 | flag.Parse() 102 | os.Exit(m.Run()) 103 | } 104 | -------------------------------------------------------------------------------- /rdb/LICENSE: -------------------------------------------------------------------------------- 1 | For gorocksdb software 2 | 3 | Copyright (C) 2016 Thomas Adam 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is furnished 10 | to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | 23 | --------------------------------------------------------------------- 24 | 25 | BSD License 26 | 27 | For rocksdb software 28 | 29 | Copyright (c) 2011-present, Facebook, Inc. 30 | All rights reserved. 31 | 32 | --------------------------------------------------------------------- 33 | 34 | Copyright (c) 2011 The LevelDB Authors. All rights reserved. 35 | 36 | Redistribution and use in source and binary forms, with or without 37 | modification, are permitted provided that the following conditions are 38 | met: 39 | 40 | * Redistributions of source code must retain the above copyright 41 | notice, this list of conditions and the following disclaimer. 42 | * Redistributions in binary form must reproduce the above 43 | copyright notice, this list of conditions and the following disclaimer 44 | in the documentation and/or other materials provided with the 45 | distribution. 46 | * Neither the name of Google Inc. nor the names of its 47 | contributors may be used to endorse or promote products derived from 48 | this software without specific prior written permission. 49 | 50 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 51 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 52 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 53 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 54 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 55 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 56 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 57 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 58 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 59 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 60 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- /populate/lmdb_txn_bench_test.go: -------------------------------------------------------------------------------- 1 | // Benchmark batched writing to lmdb using txns against without using them. 2 | // 3 | // This is needed because lmdb does not have support for batched writes, 4 | // and we are trying to simulate it using a sub-txn, going by a hint in 5 | // the Node bindings for lmdb: https://github.com/rvagg/lmdb/blob/master/src/database.cc#L208 6 | package main 7 | 8 | import ( 9 | "flag" 10 | "os" 11 | "testing" 12 | 13 | "github.com/bmatsuo/lmdb-go/lmdb" 14 | "github.com/dgraph-io/badger" 15 | "github.com/dgraph-io/badger/y" 16 | ) 17 | 18 | func getLmdbEnv() *lmdb.Env { 19 | os.RemoveAll(*dir + "/lmdb") 20 | os.MkdirAll(*dir+"/lmdb", 0777) 21 | 22 | var err error 23 | lmdbEnv, err = lmdb.NewEnv() 24 | y.Check(err) 25 | err = lmdbEnv.SetMaxDBs(1) 26 | y.Check(err) 27 | err = lmdbEnv.SetMapSize(1 << 36) // ~68Gb 28 | y.Check(err) 29 | 30 | err = lmdbEnv.Open(*dir+"/lmdb", 0, 0777) 31 | y.Check(err) 32 | return lmdbEnv 33 | } 34 | 35 | // Create a function that wraps env.Update and sends the resulting error 36 | // over a channel. Because env.Update is called our update function will 37 | // call runtime.LockOSThread to safely issue the update operation. 38 | var update = func(res chan<- error, op lmdb.TxnOp) { 39 | res <- lmdbEnv.Update(op) 40 | } 41 | 42 | func writeEntries(dbi lmdb.DBI, txn *lmdb.Txn, entries []*badger.Entry) error { 43 | for _, e := range entries { 44 | err := txn.Put(dbi, e.Key, e.Value, 0) 45 | if err != nil { 46 | return err 47 | } 48 | } 49 | return nil 50 | } 51 | 52 | func writeSimpleBatched(entries []*badger.Entry, dbi lmdb.DBI) { 53 | err := lmdbEnv.Update(func(txn *lmdb.Txn) error { 54 | return writeEntries(dbi, txn, entries) 55 | }) 56 | y.Check(err) 57 | } 58 | 59 | func writeTxnBatched(entries []*badger.Entry, dbi lmdb.DBI) { 60 | err := lmdbEnv.Update(func(txn *lmdb.Txn) error { 61 | return txn.Sub(func(txn *lmdb.Txn) error { 62 | return writeEntries(dbi, txn, entries) 63 | }) 64 | }) 65 | y.Check(err) 66 | } 67 | 68 | func BenchmarkLmdbBatch(b *testing.B) { 69 | entries := make([]*badger.Entry, 1000) 70 | for i := 0; i < len(entries); i++ { 71 | e := new(badger.Entry) 72 | e.Key = make([]byte, 22) 73 | e.Value = make([]byte, *valueSize) 74 | entries[i] = e 75 | } 76 | 77 | lmdbEnv := getLmdbEnv() 78 | defer lmdbEnv.Close() 79 | 80 | var dbi lmdb.DBI 81 | err := lmdbEnv.Update(func(txn *lmdb.Txn) error { 82 | var err error 83 | dbi, err = txn.CreateDBI("bench") 84 | return err 85 | }) 86 | y.Check(err) 87 | 88 | b.Run("SimpleBatched", func(b *testing.B) { 89 | // Do a batched write without txns 90 | for i := 0; i < b.N; i++ { 91 | writeSimpleBatched(entries, dbi) 92 | } 93 | }) 94 | 95 | b.Run("TxnBatched", func(b *testing.B) { 96 | // Do a batched write with explicit txns 97 | for i := 0; i < b.N; i++ { 98 | writeTxnBatched(entries, dbi) 99 | } 100 | }) 101 | } 102 | 103 | func TestMain(m *testing.M) { 104 | flag.Parse() 105 | os.Exit(m.Run()) 106 | } 107 | -------------------------------------------------------------------------------- /store/README.md: -------------------------------------------------------------------------------- 1 | Results of benchmarking 2 | ------------------------ 3 | 4 | Using RocksDB 5 | 6 | So, reading times are on the order of single unit microseconds, while writing 7 | times with `Sync` set to true are ~30 milliseconds. 8 | 9 | ``` 10 | $ go test -run BenchmarkSet -v -bench . 11 | PASS 12 | BenchmarkGet_valsize100-6 500000 2850 ns/op 13 | --- BENCH: BenchmarkGet_valsize100-6 14 | store_test.go:85: Wrote 100 keys. 15 | store_test.go:85: Wrote 100 keys. 16 | store_test.go:85: Wrote 100 keys. 17 | store_test.go:85: Wrote 100 keys. 18 | BenchmarkGet_valsize1000-6 500000 3565 ns/op 19 | --- BENCH: BenchmarkGet_valsize1000-6 20 | store_test.go:85: Wrote 100 keys. 21 | store_test.go:85: Wrote 100 keys. 22 | store_test.go:85: Wrote 100 keys. 23 | store_test.go:85: Wrote 100 keys. 24 | BenchmarkGet_valsize10000-6 200000 8541 ns/op 25 | --- BENCH: BenchmarkGet_valsize10000-6 26 | store_test.go:85: Wrote 100 keys. 27 | store_test.go:85: Wrote 100 keys. 28 | store_test.go:85: Wrote 100 keys. 29 | store_test.go:85: Wrote 100 keys. 30 | store_test.go:85: Wrote 100 keys. 31 | BenchmarkSet_valsize100-6 50 32932578 ns/op 32 | BenchmarkSet_valsize1000-6 50 28066678 ns/op 33 | BenchmarkSet_valsize10000-6 50 28736228 ns/op 34 | ok github.com/dgraph-io/dgraph/store 48.393s 35 | ``` 36 | 37 | Also based on dgraph-io/experiments/db: 38 | 39 | ## BoltDB 40 | 41 | Without copying the resulting byte slice from Bolt. **Unsafe** 42 | ``` 43 | $ go test -bench BenchmarkRead . 44 | testing: warning: no tests to run 45 | PASS 46 | BenchmarkReadBolt_1024 500000 3858 ns/op 47 | BenchmarkReadBolt_10KB 500000 3738 ns/op 48 | BenchmarkReadBolt_500KB 1000000 3141 ns/op 49 | BenchmarkReadBolt_1MB 1000000 3026 ns/op 50 | ok github.com/dgraph-io/experiments/db 102.513s 51 | ``` 52 | 53 | Copying the resulting byte slice. **Safe** 54 | ``` 55 | $ go test -bench BenchmarkRead . 56 | testing: warning: no tests to run 57 | PASS 58 | BenchmarkReadBolt_1024 200000 6760 ns/op 59 | BenchmarkReadBolt_10KB 100000 21249 ns/op 60 | BenchmarkReadBolt_500KB 10000 214449 ns/op 61 | BenchmarkReadBolt_1MB 3000 350712 ns/op 62 | ok github.com/dgraph-io/experiments/db 80.890s 63 | ``` 64 | 65 | ## RocksDB 66 | 67 | ``` 68 | $ go test -bench BenchmarkGet . 69 | PASS 70 | BenchmarkGet_valsize1024 300000 5715 ns/op 71 | BenchmarkGet_valsize10KB 50000 27619 ns/op 72 | BenchmarkGet_valsize500KB 2000 604185 ns/op 73 | BenchmarkGet_valsize1MB 2000 1064685 ns/op 74 | ok github.com/dgraph-io/dgraph/store 55.029s 75 | ``` 76 | 77 | ### Thoughts 78 | Dgraph uses append only commit log to sync new mutations to disk before returning. 79 | Every time a posting list gets init, it checks for both the stored posting list and 80 | the mutations committed after the posting list was written. Hence, our access pattern 81 | from store is largely read-only, with fewer writes. This is true, irrespective of how 82 | many writes get commited by the end user. 83 | 84 | Hence, BoltDB is a better choice. It performs better for reads/seeks, despite Dgraph needing 85 | a value copy. Writes are somewhat slower, but that shouldn't be a problem because of the 86 | above mentioned reasons. 87 | 88 | **Update**: Just realized that BoltDB only allows a SINGLE writer at any point in time. 89 | This is equivalent to a global mutex lock. That'd essentially kill Dgraph's performance. So, 90 | BoltDB is out! 91 | -------------------------------------------------------------------------------- /rdb/filter_policy.go: -------------------------------------------------------------------------------- 1 | package rdb 2 | 3 | // #include 4 | // #include 5 | // #include "rdbc.h" 6 | import "C" 7 | 8 | // FilterPolicy is a factory type that allows the RocksDB database to create a 9 | // filter, such as a bloom filter, which will used to reduce reads. 10 | type FilterPolicy interface { 11 | // keys contains a list of keys (potentially with duplicates) 12 | // that are ordered according to the user supplied comparator. 13 | CreateFilter(keys [][]byte) []byte 14 | 15 | // "filter" contains the data appended by a preceding call to 16 | // CreateFilter(). This method must return true if 17 | // the key was in the list of keys passed to CreateFilter(). 18 | // This method may return true or false if the key was not on the 19 | // list, but it should aim to return false with a high probability. 20 | KeyMayMatch(key []byte, filter []byte) bool 21 | 22 | // Return the name of this policy. 23 | Name() string 24 | } 25 | 26 | // NewNativeFilterPolicy creates a FilterPolicy object. 27 | func NewNativeFilterPolicy(c *C.rdb_filterpolicy_t) FilterPolicy { 28 | return nativeFilterPolicy{c} 29 | } 30 | 31 | type nativeFilterPolicy struct { 32 | c *C.rdb_filterpolicy_t 33 | } 34 | 35 | func (fp nativeFilterPolicy) CreateFilter(keys [][]byte) []byte { return nil } 36 | func (fp nativeFilterPolicy) KeyMayMatch(key []byte, filter []byte) bool { return false } 37 | func (fp nativeFilterPolicy) Name() string { return "" } 38 | 39 | // NewBloomFilter returns a new filter policy that uses a bloom filter with approximately 40 | // the specified number of bits per key. A good value for bits_per_key 41 | // is 10, which yields a filter with ~1% false positive rate. 42 | // 43 | // Note: if you are using a custom comparator that ignores some parts 44 | // of the keys being compared, you must not use NewBloomFilterPolicy() 45 | // and must provide your own FilterPolicy that also ignores the 46 | // corresponding parts of the keys. For example, if the comparator 47 | // ignores trailing spaces, it would be incorrect to use a 48 | // FilterPolicy (like NewBloomFilterPolicy) that does not ignore 49 | // trailing spaces in keys. 50 | func NewBloomFilter(bitsPerKey int) FilterPolicy { 51 | return NewNativeFilterPolicy(C.rdb_filterpolicy_create_bloom(C.int(bitsPerKey))) 52 | } 53 | 54 | // Hold references to filter policies. 55 | var filterPolicies []FilterPolicy 56 | 57 | func registerFilterPolicy(fp FilterPolicy) int { 58 | filterPolicies = append(filterPolicies, fp) 59 | return len(filterPolicies) - 1 60 | } 61 | 62 | //export rdbc_filterpolicy_create_filter 63 | func rdbc_filterpolicy_create_filter(idx int, cKeys **C.char, cKeysLen *C.size_t, cNumKeys C.int, cDstLen *C.size_t) *C.char { 64 | rawKeys := charSlice(cKeys, cNumKeys) 65 | keysLen := sizeSlice(cKeysLen, cNumKeys) 66 | keys := make([][]byte, int(cNumKeys)) 67 | for i, len := range keysLen { 68 | keys[i] = charToByte(rawKeys[i], len) 69 | } 70 | 71 | dst := filterPolicies[idx].CreateFilter(keys) 72 | *cDstLen = C.size_t(len(dst)) 73 | return cByteSlice(dst) 74 | } 75 | 76 | //export rdbc_filterpolicy_key_may_match 77 | func rdbc_filterpolicy_key_may_match(idx int, cKey *C.char, cKeyLen C.size_t, cFilter *C.char, cFilterLen C.size_t) C.uchar { 78 | key := charToByte(cKey, cKeyLen) 79 | filter := charToByte(cFilter, cFilterLen) 80 | return boolToChar(filterPolicies[idx].KeyMayMatch(key, filter)) 81 | } 82 | 83 | //export rdbc_filterpolicy_name 84 | func rdbc_filterpolicy_name(idx int) *C.char { 85 | return stringToChar(filterPolicies[idx].Name()) 86 | } 87 | -------------------------------------------------------------------------------- /rdb/options_block_based_table.go: -------------------------------------------------------------------------------- 1 | package rdb 2 | 3 | // #include 4 | // #include 5 | // #include "rdbc.h" 6 | import "C" 7 | 8 | // BlockBasedTableOptions represents block-based table options. 9 | type BlockBasedTableOptions struct { 10 | c *C.rdb_block_based_table_options_t 11 | 12 | // Hold references for GC. 13 | cache *Cache 14 | compCache *Cache 15 | 16 | // We keep these so we can free their memory in Destroy. 17 | cFp *C.rdb_filterpolicy_t 18 | } 19 | 20 | // NewDefaultBlockBasedTableOptions creates a default BlockBasedTableOptions object. 21 | func NewDefaultBlockBasedTableOptions() *BlockBasedTableOptions { 22 | return NewNativeBlockBasedTableOptions(C.rdb_block_based_options_create()) 23 | } 24 | 25 | // NewNativeBlockBasedTableOptions creates a BlockBasedTableOptions object. 26 | func NewNativeBlockBasedTableOptions(c *C.rdb_block_based_table_options_t) *BlockBasedTableOptions { 27 | return &BlockBasedTableOptions{c: c} 28 | } 29 | 30 | // Destroy deallocates the BlockBasedTableOptions object. 31 | func (opts *BlockBasedTableOptions) Destroy() { 32 | C.rdb_block_based_options_destroy(opts.c) 33 | opts.c = nil 34 | opts.cache = nil 35 | opts.compCache = nil 36 | } 37 | 38 | // SetBlockSize sets the approximate size of user data packed per block. 39 | // Note that the block size specified here corresponds opts uncompressed data. 40 | // The actual size of the unit read from disk may be smaller if 41 | // compression is enabled. This parameter can be changed dynamically. 42 | // Default: 4K 43 | func (opts *BlockBasedTableOptions) SetBlockSize(blockSize int) { 44 | C.rdb_block_based_options_set_block_size(opts.c, C.size_t(blockSize)) 45 | } 46 | 47 | // SetFilterPolicy sets the filter policy opts reduce disk reads. 48 | // Many applications will benefit from passing the result of 49 | // NewBloomFilterPolicy() here. 50 | // Default: nil 51 | func (opts *BlockBasedTableOptions) SetFilterPolicy(fp FilterPolicy) { 52 | if nfp, ok := fp.(nativeFilterPolicy); ok { 53 | opts.cFp = nfp.c 54 | } else { 55 | idx := registerFilterPolicy(fp) 56 | opts.cFp = C.rdbc_filterpolicy_create(C.uintptr_t(idx)) 57 | } 58 | C.rdb_block_based_options_set_filter_policy(opts.c, opts.cFp) 59 | } 60 | 61 | // SetNoBlockCache specify whether block cache should be used or not. 62 | // Default: false 63 | func (opts *BlockBasedTableOptions) SetNoBlockCache(value bool) { 64 | C.rdb_block_based_options_set_no_block_cache(opts.c, boolToChar(value)) 65 | } 66 | 67 | // SetBlockCache sets the control over blocks (user data is stored in a set of blocks, and 68 | // a block is the unit of reading from disk). 69 | // 70 | // If set, use the specified cache for blocks. 71 | // If nil, rocksdb will auoptsmatically create and use an 8MB internal cache. 72 | // Default: nil 73 | func (opts *BlockBasedTableOptions) SetBlockCache(cache *Cache) { 74 | opts.cache = cache 75 | C.rdb_block_based_options_set_block_cache(opts.c, cache.c) 76 | } 77 | 78 | // SetBlockCacheCompressed sets the cache for compressed blocks. 79 | // If nil, rocksdb will not use a compressed block cache. 80 | // Default: nil 81 | func (opts *BlockBasedTableOptions) SetBlockCacheCompressed(cache *Cache) { 82 | opts.compCache = cache 83 | C.rdb_block_based_options_set_block_cache_compressed(opts.c, cache.c) 84 | } 85 | 86 | // SetWholeKeyFiltering specify if whole keys in the filter (not just prefixes) 87 | // should be placed. 88 | // This must generally be true for gets opts be efficient. 89 | // Default: true 90 | func (opts *BlockBasedTableOptions) SetWholeKeyFiltering(value bool) { 91 | C.rdb_block_based_options_set_whole_key_filtering(opts.c, boolToChar(value)) 92 | } 93 | -------------------------------------------------------------------------------- /rdb/iterator.go: -------------------------------------------------------------------------------- 1 | package rdb 2 | 3 | // #include 4 | // #include 5 | // #include "rdbc.h" 6 | import "C" 7 | import ( 8 | "bytes" 9 | "errors" 10 | "unsafe" 11 | ) 12 | 13 | // Iterator provides a way to seek to specific keys and iterate through 14 | // the keyspace from that point, as well as access the values of those keys. 15 | // 16 | // For example: 17 | // 18 | // it := rdb.NewIterator(readOpts) 19 | // defer it.Close() 20 | // 21 | // it.Seek([]byte("foo")) 22 | // for ; it.Valid(); it.Next() { 23 | // fmt.Printf("Key: %v Value: %v\n", it.Key().Data(), it.Value().Data()) 24 | // } 25 | // 26 | // if err := it.Err(); err != nil { 27 | // return err 28 | // } 29 | type Iterator struct { 30 | c *C.rdb_iterator_t 31 | } 32 | 33 | // NewNativeIterator creates a Iterator object. 34 | func NewNativeIterator(c unsafe.Pointer) *Iterator { 35 | return &Iterator{(*C.rdb_iterator_t)(c)} 36 | } 37 | 38 | // Valid returns false only when an Iterator has iterated past either the 39 | // first or the last key in the database. 40 | func (iter *Iterator) Valid() bool { 41 | return C.rdb_iter_valid(iter.c) != 0 42 | } 43 | 44 | // ValidForPrefix returns false only when an Iterator has iterated past the 45 | // first or the last key in the database or the specified prefix. 46 | func (iter *Iterator) ValidForPrefix(prefix []byte) bool { 47 | return C.rdb_iter_valid(iter.c) != 0 && bytes.HasPrefix(iter.Key().Data(), prefix) 48 | } 49 | 50 | // Key returns the key the iterator currently holds. 51 | func (iter *Iterator) Key() *Slice { 52 | var cLen C.size_t 53 | cKey := C.rdb_iter_key(iter.c, &cLen) 54 | if cKey == nil { 55 | return nil 56 | } 57 | return &Slice{cKey, cLen, true} 58 | } 59 | 60 | // Value returns the value in the database the iterator currently holds. 61 | func (iter *Iterator) Value() *Slice { 62 | var cLen C.size_t 63 | cVal := C.rdb_iter_value(iter.c, &cLen) 64 | if cVal == nil { 65 | return nil 66 | } 67 | return &Slice{cVal, cLen, true} 68 | } 69 | 70 | // Next moves the iterator to the next sequential key in the database. 71 | func (iter *Iterator) Next() { 72 | C.rdb_iter_next(iter.c) 73 | } 74 | 75 | // Prev moves the iterator to the previous sequential key in the database. 76 | func (iter *Iterator) Prev() { 77 | C.rdb_iter_prev(iter.c) 78 | } 79 | 80 | // SeekToFirst moves the iterator to the first key in the database. 81 | func (iter *Iterator) SeekToFirst() { 82 | C.rdb_iter_seek_to_first(iter.c) 83 | } 84 | 85 | // SeekToLast moves the iterator to the last key in the database. 86 | func (iter *Iterator) SeekToLast() { 87 | C.rdb_iter_seek_to_last(iter.c) 88 | } 89 | 90 | // Seek moves the iterator to the position greater than or equal to the key. 91 | func (iter *Iterator) Seek(key []byte) { 92 | cKey := byteToChar(key) 93 | C.rdb_iter_seek(iter.c, cKey, C.size_t(len(key))) 94 | } 95 | 96 | // SeekForPrev moves the iterator to the position less than or equal to the key. 97 | func (iter *Iterator) SeekForPrev(key []byte) { 98 | cKey := byteToChar(key) 99 | C.rdb_iter_seek_for_prev(iter.c, cKey, C.size_t(len(key))) 100 | } 101 | 102 | // Err returns nil if no errors happened during iteration, or the actual 103 | // error otherwise. 104 | func (iter *Iterator) Err() error { 105 | var cErr *C.char 106 | C.rdb_iter_get_error(iter.c, &cErr) 107 | if cErr != nil { 108 | defer C.free(unsafe.Pointer(cErr)) 109 | return errors.New(C.GoString(cErr)) 110 | } 111 | return nil 112 | } 113 | 114 | // Close closes the iterator. 115 | func (iter *Iterator) Close() { 116 | C.rdb_iter_destroy(iter.c) 117 | iter.c = nil 118 | } 119 | -------------------------------------------------------------------------------- /randread/maxprocs.txt: -------------------------------------------------------------------------------- 1 | With fio 2 | $ fio --name=randread --ioengine=libaio --iodepth=32 --rw=randread --bs=4k --direct=0 --size=2G --numjobs=16 --runtime=240 --group_reporting 3 | 4 | Average: DEV tps rd_sec/s wr_sec/s avgrq-sz avgqu-sz await svctm %util 5 | Average: xvda 0.09 4.57 0.71 59.20 0.00 0.00 0.00 0.00 6 | Average: nvme0n1 118063.07 944503.71 0.86 8.00 12.75 0.11 0.01 100.36 7 | 8 | 9 | With Go (default GOMAXPROCS, should be 4 because 4 core machine) 10 | Average: DEV tps rd_sec/s wr_sec/s avgrq-sz avgqu-sz await svctm %util 11 | Average: xvda 1.27 12.00 4.76 13.19 0.00 0.21 0.21 0.03 12 | Average: nvme0n1 57501.74 548921.95 0.43 9.55 6.43 0.11 0.02 99.76 13 | 14 | With Go, GOMAXPROCS=64 15 | Average: DEV tps rd_sec/s wr_sec/s avgrq-sz avgqu-sz await svctm %util 16 | Average: xvda 0.11 0.17 3.30 32.00 0.00 0.80 0.80 0.01 17 | Average: nvme0n1 104680.17 981817.39 0.00 9.38 12.82 0.12 0.01 100.04 18 | 19 | With Go, GOMAXPROCS=128 20 | Average: DEV tps rd_sec/s wr_sec/s avgrq-sz avgqu-sz await svctm %util 21 | Average: xvda 0.40 0.32 5.92 15.60 0.00 0.20 0.20 0.01 22 | Average: nvme0n1 105601.16 989440.35 0.00 9.37 12.79 0.12 0.01 100.04 23 | 24 | 25 | With GOMAXPROCS=32 26 | Command being timed: "./randread --dir /mnt/data/fio --mode 1 --seconds 60" 27 | User time (seconds): 23.34 28 | System time (seconds): 100.91 29 | Percent of CPU this job got: 207% 30 | Elapsed (wall clock) time (h:mm:ss or m:ss): 1:00.00 31 | Average shared text size (kbytes): 0 32 | Average unshared data size (kbytes): 0 33 | Average stack size (kbytes): 0 34 | Average total size (kbytes): 0 35 | Maximum resident set size (kbytes): 3820 36 | Average resident set size (kbytes): 0 37 | Major (requiring I/O) page faults: 9 38 | Minor (reclaiming a frame) page faults: 416 39 | Voluntary context switches: 2958129 40 | Involuntary context switches: 2525 41 | Swaps: 0 42 | File system inputs: 59343840 43 | File system outputs: 0 44 | Socket messages sent: 0 45 | Socket messages received: 0 46 | Signals delivered: 0 47 | Page size (bytes): 4096 48 | Exit status: 0 49 | 50 | With GOMAXPROCS=128 51 | Command being timed: "./randread --dir /mnt/data/fio --mode 1 --seconds 60" 52 | User time (seconds): 21.59 53 | System time (seconds): 104.34 54 | Percent of CPU this job got: 209% 55 | Elapsed (wall clock) time (h:mm:ss or m:ss): 1:00.00 56 | Average shared text size (kbytes): 0 57 | Average unshared data size (kbytes): 0 58 | Average stack size (kbytes): 0 59 | Average total size (kbytes): 0 60 | Maximum resident set size (kbytes): 3968 61 | Average resident set size (kbytes): 0 62 | Major (requiring I/O) page faults: 11 63 | Minor (reclaiming a frame) page faults: 590 64 | Voluntary context switches: 2956871 65 | Involuntary context switches: 2591 66 | Swaps: 0 67 | File system inputs: 59264616 68 | File system outputs: 0 69 | Socket messages sent: 0 70 | Socket messages received: 0 71 | Signals delivered: 0 72 | Page size (bytes): 4096 73 | Exit status: 0 74 | 75 | -------------------------------------------------------------------------------- /rw-bench/bench.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "log" 7 | "math/rand" 8 | "os" 9 | "runtime" 10 | "runtime/pprof" 11 | "time" 12 | 13 | "github.com/dgraph-io/badger" 14 | "github.com/dgraph-io/badger-bench/rdb" 15 | "github.com/dgraph-io/badger-bench/store" 16 | "github.com/dgraph-io/badger/y" 17 | ) 18 | 19 | var ( 20 | numKeys = flag.Int("keys_mil", 1, "How many million keys to write.") 21 | valueSize = flag.Int("valsz", 0, "Value size in bytes.") 22 | mil = 1000000 23 | cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`") 24 | memprofile = flag.String("memprofile", "", "write memory profile to `file`") 25 | ) 26 | 27 | type entry struct { 28 | Key []byte 29 | Value []byte 30 | Meta byte 31 | } 32 | 33 | func fillEntry(e *entry) { 34 | k := rand.Intn(*numKeys * mil * 10) 35 | key := fmt.Sprintf("vsz=%05d-k=%010d", *valueSize, k) // 22 bytes. 36 | if cap(e.Key) < len(key) { 37 | e.Key = make([]byte, 2*len(key)) 38 | } 39 | e.Key = e.Key[:len(key)] 40 | copy(e.Key, key) 41 | 42 | rand.Read(e.Value) 43 | e.Meta = 0 44 | } 45 | 46 | var bdg *badger.DB 47 | var rocks *store.Store 48 | 49 | func createEntries(entries []*entry) *rdb.WriteBatch { 50 | rb := rocks.NewWriteBatch() 51 | for _, e := range entries { 52 | fillEntry(e) 53 | rb.Put(e.Key, e.Value) 54 | } 55 | return rb 56 | } 57 | 58 | func main() { 59 | flag.Parse() 60 | if *cpuprofile != "" { 61 | f, err := os.Create(*cpuprofile) 62 | if err != nil { 63 | log.Fatal("could not create CPU profile: ", err) 64 | } 65 | if err := pprof.StartCPUProfile(f); err != nil { 66 | log.Fatal("could not start CPU profile: ", err) 67 | } 68 | defer pprof.StopCPUProfile() 69 | } 70 | 71 | rand.Seed(time.Now().Unix()) 72 | opt := badger.DefaultOptions("tmp/badger") 73 | // opt.MapTablesTo = table.Nothing 74 | opt.SyncWrites = false 75 | 76 | var err error 77 | y.Check(os.RemoveAll("tmp/badger")) 78 | os.MkdirAll("tmp/badger", 0777) 79 | bdg, err = badger.Open(opt) 80 | y.Check(err) 81 | 82 | y.Check(os.RemoveAll("tmp/rocks")) 83 | os.MkdirAll("tmp/rocks", 0777) 84 | rocks, err = store.NewStore("tmp/rocks") 85 | y.Check(err) 86 | 87 | entries := make([]*entry, *numKeys*1000000) 88 | for i := 0; i < len(entries); i++ { 89 | e := new(entry) 90 | e.Key = make([]byte, 22) 91 | e.Value = make([]byte, *valueSize) 92 | entries[i] = e 93 | } 94 | rb := createEntries(entries) 95 | txn := bdg.NewTransaction(true) 96 | for _, e := range entries { 97 | y.Check(txn.Set(e.Key, e.Value)) 98 | } 99 | 100 | fmt.Println("Value size:", *valueSize) 101 | fmt.Println("RocksDB:") 102 | rstart := time.Now() 103 | y.Check(rocks.WriteBatch(rb)) 104 | var count int 105 | ritr := rocks.NewIterator() 106 | ristart := time.Now() 107 | for ritr.SeekToFirst(); ritr.Valid(); ritr.Next() { 108 | _ = ritr.Key() 109 | count++ 110 | } 111 | fmt.Println("Num unique keys:", count) 112 | fmt.Println("Iteration time: ", time.Since(ristart)) 113 | fmt.Println("Total time: ", time.Since(rstart)) 114 | rb.Destroy() 115 | rocks.Close() 116 | 117 | fmt.Println("Badger:") 118 | bstart := time.Now() 119 | y.Check(txn.Commit()) 120 | iopt := badger.IteratorOptions{} 121 | bistart := time.Now() 122 | iopt.PrefetchValues = false 123 | iopt.PrefetchSize = 1000 124 | txn = bdg.NewTransaction(false) 125 | bitr := txn.NewIterator(iopt) 126 | count = 0 127 | for bitr.Rewind(); bitr.Valid(); bitr.Next() { 128 | _ = bitr.Item().Key() 129 | count++ 130 | } 131 | fmt.Println("Num unique keys:", count) 132 | fmt.Println("Iteration time: ", time.Since(bistart)) 133 | fmt.Println("Total time: ", time.Since(bstart)) 134 | if *memprofile != "" { 135 | f, err := os.Create(*memprofile) 136 | if err != nil { 137 | log.Fatal("could not create memory profile: ", err) 138 | } 139 | runtime.GC() // get up-to-date statistics 140 | if err := pprof.WriteHeapProfile(f); err != nil { 141 | log.Fatal("could not write memory profile: ", err) 142 | } 143 | f.Close() 144 | } 145 | bdg.Close() 146 | } 147 | -------------------------------------------------------------------------------- /store/store.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2017 Dgraph Labs, Inc. and Contributors 3 | * 4 | * This program is free software: you can redistribute it and/or modify 5 | * it under the terms of the GNU Affero General Public License as published by 6 | * the Free Software Foundation, either version 3 of the License, or 7 | * (at your option) any later version. 8 | * 9 | * This program is distributed in the hope that it will be useful, 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | * GNU Affero General Public License for more details. 13 | * 14 | * You should have received a copy of the GNU Affero General Public License 15 | * along with this program. If not, see . 16 | */ 17 | 18 | package store 19 | 20 | import ( 21 | "strconv" 22 | 23 | "github.com/dgraph-io/badger-bench/rdb" 24 | ) 25 | 26 | // Store contains some handles to RocksDB. 27 | type Store struct { 28 | db *rdb.DB 29 | opt *rdb.Options // Contains blockopt. 30 | blockopt *rdb.BlockBasedTableOptions 31 | ropt *rdb.ReadOptions 32 | wopt *rdb.WriteOptions 33 | } 34 | 35 | func (s *Store) setOpts() { 36 | s.opt = rdb.NewDefaultOptions() 37 | s.blockopt = rdb.NewDefaultBlockBasedTableOptions() 38 | s.opt.SetBlockBasedTableFactory(s.blockopt) 39 | 40 | // If you want to access blockopt.blockCache, you need to grab handles to them 41 | // as well. Otherwise, they will be nil. However, for now, we do not really need 42 | // to do this. 43 | // s.blockopt.SetBlockCache(rocksdb.NewLRUCache(blockCacheSize)) 44 | // s.blockopt.SetBlockCacheCompressed(rocksdb.NewLRUCache(blockCacheSize)) 45 | 46 | s.opt.SetCreateIfMissing(true) 47 | fp := rdb.NewBloomFilter(16) 48 | s.blockopt.SetFilterPolicy(fp) 49 | 50 | s.ropt = rdb.NewDefaultReadOptions() 51 | s.wopt = rdb.NewDefaultWriteOptions() 52 | s.wopt.SetSync(false) // We don't need to do synchronous writes. 53 | } 54 | 55 | // NewStore constructs a Store object at filepath, given some options. 56 | func NewStore(filepath string) (*Store, error) { 57 | s := &Store{} 58 | s.setOpts() 59 | var err error 60 | s.db, err = rdb.OpenDb(s.opt, filepath) 61 | return s, err 62 | } 63 | 64 | func NewSyncStore(filepath string) (*Store, error) { 65 | s := &Store{} 66 | s.setOpts() 67 | s.wopt.SetSync(true) // Do synchronous writes. 68 | var err error 69 | s.db, err = rdb.OpenDb(s.opt, filepath) 70 | return s, err 71 | } 72 | 73 | // NewReadOnlyStore constructs a readonly Store object at filepath, given options. 74 | func NewReadOnlyStore(filepath string) (*Store, error) { 75 | s := &Store{} 76 | s.setOpts() 77 | var err error 78 | s.db, err = rdb.OpenDbForReadOnly(s.opt, filepath, false) 79 | return s, err 80 | } 81 | 82 | // Get returns the value given a key for RocksDB. 83 | func (s *Store) Get(key []byte) (*rdb.Slice, error) { 84 | valSlice, err := s.db.Get(s.ropt, key) 85 | if err != nil { 86 | return nil, err 87 | } 88 | 89 | return valSlice, nil 90 | } 91 | 92 | // SetOne adds a key-value to data store. 93 | func (s *Store) SetOne(k []byte, val []byte) error { return s.db.Put(s.wopt, k, val) } 94 | 95 | // Delete deletes a key from data store. 96 | func (s *Store) Delete(k []byte) error { return s.db.Delete(s.wopt, k) } 97 | 98 | // NewIterator initializes a new iterator and returns it. 99 | func (s *Store) NewIterator() *rdb.Iterator { 100 | ro := rdb.NewDefaultReadOptions() 101 | // SetFillCache should be set to false for bulk reads to avoid caching data 102 | // while doing bulk scans. 103 | ro.SetFillCache(false) 104 | return s.db.NewIterator(ro) 105 | } 106 | 107 | // Close closes our data store. 108 | func (s *Store) Close() { s.db.Close() } 109 | 110 | // Memtable returns the memtable size. 111 | func (s *Store) MemtableSize() uint64 { 112 | memTableSize, _ := strconv.ParseUint(s.db.GetProperty("rocksdb.cur-size-all-mem-tables"), 10, 64) 113 | return memTableSize 114 | } 115 | 116 | // IndexFilterblockSize returns the filter block size. 117 | func (s *Store) IndexFilterblockSize() uint64 { 118 | blockSize, _ := strconv.ParseUint(s.db.GetProperty("rocksdb.estimate-table-readers-mem"), 10, 64) 119 | return blockSize 120 | } 121 | 122 | // NewWriteBatch creates a new WriteBatch object and returns a pointer to it. 123 | func (s *Store) NewWriteBatch() *rdb.WriteBatch { return rdb.NewWriteBatch() } 124 | 125 | // WriteBatch does a batch write to RocksDB from the data in WriteBatch object. 126 | func (s *Store) WriteBatch(wb *rdb.WriteBatch) error { 127 | return s.db.Write(s.wopt, wb) 128 | } 129 | 130 | // NewCheckpoint creates new checkpoint from current store. 131 | func (s *Store) NewCheckpoint() (*rdb.Checkpoint, error) { return s.db.NewCheckpoint() } 132 | 133 | // NewSnapshot creates new snapshot from current store. 134 | func (s *Store) NewSnapshot() *rdb.Snapshot { return s.db.NewSnapshot() } 135 | 136 | // SetSnapshot updates default read options to use the given snapshot. 137 | func (s *Store) SetSnapshot(snapshot *rdb.Snapshot) { s.ropt.SetSnapshot(snapshot) } 138 | 139 | // GetStats returns stats of our data store. 140 | func (s *Store) GetStats() string { return s.db.GetStats() } 141 | -------------------------------------------------------------------------------- /rdb/db.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2017 Dgraph Labs, Inc. and Contributors 3 | * 4 | * This program is free software: you can redistribute it and/or modify 5 | * it under the terms of the GNU Affero General Public License as published by 6 | * the Free Software Foundation, either version 3 of the License, or 7 | * (at your option) any later version. 8 | * 9 | * This program is distributed in the hope that it will be useful, 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | * GNU Affero General Public License for more details. 13 | * 14 | * You should have received a copy of the GNU Affero General Public License 15 | * along with this program. If not, see . 16 | */ 17 | 18 | package rdb 19 | 20 | // #include 21 | // #include 22 | // #include "rdbc.h" 23 | import "C" 24 | import ( 25 | "errors" 26 | "unsafe" 27 | ) 28 | 29 | // Range is a range of keys in the database. GetApproximateSizes calls with it 30 | // begin at the key Start and end right before the key Limit. 31 | type Range struct { 32 | Start []byte 33 | Limit []byte 34 | } 35 | 36 | // DB is a reusable handle to a RocksDB database on disk, created by Open. 37 | type DB struct { 38 | c *C.rdb_t 39 | name string 40 | opts *Options 41 | } 42 | 43 | // OpenDb opens a database with the specified options. 44 | func OpenDb(opts *Options, name string) (*DB, error) { 45 | var ( 46 | cErr *C.char 47 | cName = C.CString(name) 48 | ) 49 | defer C.free(unsafe.Pointer(cName)) 50 | db := C.rdb_open(opts.c, cName, &cErr) 51 | if cErr != nil { 52 | defer C.free(unsafe.Pointer(cErr)) 53 | return nil, errors.New(C.GoString(cErr)) 54 | } 55 | return &DB{ 56 | name: name, 57 | c: db, 58 | opts: opts, 59 | }, nil 60 | } 61 | 62 | // OpenDbForReadOnly opens a database with the specified options for readonly usage. 63 | func OpenDbForReadOnly(opts *Options, name string, errorIfLogFileExist bool) (*DB, error) { 64 | var ( 65 | cErr *C.char 66 | cName = C.CString(name) 67 | ) 68 | defer C.free(unsafe.Pointer(cName)) 69 | db := C.rdb_open_for_read_only(opts.c, cName, boolToChar(errorIfLogFileExist), &cErr) 70 | if cErr != nil { 71 | defer C.free(unsafe.Pointer(cErr)) 72 | return nil, errors.New(C.GoString(cErr)) 73 | } 74 | return &DB{ 75 | name: name, 76 | c: db, 77 | opts: opts, 78 | }, nil 79 | } 80 | 81 | // Close closes the database. 82 | func (db *DB) Close() { 83 | C.rdb_close(db.c) 84 | } 85 | 86 | // Get returns the data associated with the key from the database. Remember 87 | // to deallocate the returned Slice. 88 | func (db *DB) Get(opts *ReadOptions, key []byte) (*Slice, error) { 89 | var ( 90 | cErr *C.char 91 | cValLen C.size_t 92 | cKey = byteToChar(key) 93 | ) 94 | cValue := C.rdb_get(db.c, opts.c, cKey, C.size_t(len(key)), &cValLen, &cErr) 95 | if cErr != nil { 96 | defer C.free(unsafe.Pointer(cErr)) 97 | return nil, errors.New(C.GoString(cErr)) 98 | } 99 | return NewSlice(cValue, cValLen), nil 100 | } 101 | 102 | // GetBytes is like Get but returns a copy of the data. 103 | func (db *DB) GetBytes(opts *ReadOptions, key []byte) ([]byte, error) { 104 | var ( 105 | cErr *C.char 106 | cValLen C.size_t 107 | cKey = byteToChar(key) 108 | ) 109 | cValue := C.rdb_get(db.c, opts.c, cKey, C.size_t(len(key)), &cValLen, &cErr) 110 | if cErr != nil { 111 | defer C.free(unsafe.Pointer(cErr)) 112 | return nil, errors.New(C.GoString(cErr)) 113 | } 114 | if cValue == nil { 115 | return nil, nil 116 | } 117 | defer C.free(unsafe.Pointer(cValue)) 118 | return C.GoBytes(unsafe.Pointer(cValue), C.int(cValLen)), nil 119 | } 120 | 121 | // Put writes data associated with a key to the database. 122 | func (db *DB) Put(opts *WriteOptions, key, value []byte) error { 123 | var ( 124 | cErr *C.char 125 | cKey = byteToChar(key) 126 | cValue = byteToChar(value) 127 | ) 128 | C.rdb_put(db.c, opts.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)), &cErr) 129 | if cErr != nil { 130 | defer C.free(unsafe.Pointer(cErr)) 131 | return errors.New(C.GoString(cErr)) 132 | } 133 | return nil 134 | } 135 | 136 | // Delete removes the data associated with the key from the database. 137 | func (db *DB) Delete(opts *WriteOptions, key []byte) error { 138 | var ( 139 | cErr *C.char 140 | cKey = byteToChar(key) 141 | ) 142 | C.rdb_delete(db.c, opts.c, cKey, C.size_t(len(key)), &cErr) 143 | if cErr != nil { 144 | defer C.free(unsafe.Pointer(cErr)) 145 | return errors.New(C.GoString(cErr)) 146 | } 147 | return nil 148 | } 149 | 150 | // Write writes a WriteBatch to the database 151 | func (db *DB) Write(opts *WriteOptions, batch *WriteBatch) error { 152 | var cErr *C.char 153 | C.rdb_write(db.c, opts.c, batch.c, &cErr) 154 | if cErr != nil { 155 | defer C.free(unsafe.Pointer(cErr)) 156 | return errors.New(C.GoString(cErr)) 157 | } 158 | return nil 159 | } 160 | 161 | // NewIterator returns an Iterator over the the database that uses the 162 | // ReadOptions given. 163 | func (db *DB) NewIterator(opts *ReadOptions) *Iterator { 164 | cIter := C.rdb_create_iterator(db.c, opts.c) 165 | return NewNativeIterator(unsafe.Pointer(cIter)) 166 | } 167 | 168 | // GetProperty returns the value of a database property. 169 | func (db *DB) GetProperty(propName string) string { 170 | cprop := C.CString(propName) 171 | defer C.free(unsafe.Pointer(cprop)) 172 | cValue := C.rdb_property_value(db.c, cprop) 173 | defer C.free(unsafe.Pointer(cValue)) 174 | return C.GoString(cValue) 175 | } 176 | 177 | // GetStats returns stats of our data store. 178 | func (db *DB) GetStats() string { return db.GetProperty("rocksdb.stats") } 179 | -------------------------------------------------------------------------------- /store/store_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (C) 2017 Dgraph Labs, Inc. and Contributors 3 | * 4 | * This program is free software: you can redistribute it and/or modify 5 | * it under the terms of the GNU Affero General Public License as published by 6 | * the Free Software Foundation, either version 3 of the License, or 7 | * (at your option) any later version. 8 | * 9 | * This program is distributed in the hope that it will be useful, 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 | * GNU Affero General Public License for more details. 13 | * 14 | * You should have received a copy of the GNU Affero General Public License 15 | * along with this program. If not, see . 16 | */ 17 | 18 | package store 19 | 20 | import ( 21 | "fmt" 22 | "io/ioutil" 23 | "math/rand" 24 | "os" 25 | "path" 26 | "testing" 27 | 28 | "github.com/stretchr/testify/require" 29 | ) 30 | 31 | func TestGet(t *testing.T) { 32 | path, err := ioutil.TempDir("", "storetest_") 33 | require.NoError(t, err) 34 | defer os.RemoveAll(path) 35 | 36 | s, err := NewStore(path) 37 | require.NoError(t, err) 38 | 39 | k := []byte("mykey") 40 | require.NoError(t, s.SetOne(k, []byte("neo"))) 41 | 42 | val, err := s.Get(k) 43 | require.NoError(t, err) 44 | require.EqualValues(t, val.Data(), "neo") 45 | 46 | require.NoError(t, s.SetOne(k, []byte("the one"))) 47 | val, err = s.Get(k) 48 | require.NoError(t, err) 49 | require.EqualValues(t, val.Data(), "the one") 50 | } 51 | 52 | func TestSnapshot(t *testing.T) { 53 | path, err := ioutil.TempDir("", "storetest_") 54 | require.NoError(t, err) 55 | defer os.RemoveAll(path) 56 | 57 | s, err := NewStore(path) 58 | require.NoError(t, err) 59 | 60 | k := []byte("mykey") 61 | require.NoError(t, s.SetOne(k, []byte("neo"))) 62 | 63 | snapshot := s.NewSnapshot() // Snapshot will contain neo, not trinity. 64 | require.NoError(t, s.SetOne(k, []byte("trinity"))) 65 | 66 | // Before setting snapshot, do a get. Expect to get trinity. 67 | val, err := s.Get(k) 68 | require.NoError(t, err) 69 | require.EqualValues(t, val.Data(), "trinity") 70 | 71 | s.SetSnapshot(snapshot) 72 | // After setting snapshot, we expect to get neo. 73 | val, err = s.Get(k) 74 | require.NoError(t, err) 75 | require.EqualValues(t, val.Data(), "neo") 76 | 77 | s.SetSnapshot(nil) 78 | // After clearing snapshot, we expect to get trinity again. 79 | val, err = s.Get(k) 80 | require.NoError(t, err) 81 | require.EqualValues(t, val.Data(), "trinity") 82 | } 83 | 84 | func TestCheckpoint(t *testing.T) { 85 | dbPath, err := ioutil.TempDir("", "storetest_") 86 | require.NoError(t, err) 87 | defer os.RemoveAll(dbPath) 88 | 89 | s, err := NewStore(dbPath) 90 | require.NoError(t, err) 91 | 92 | key := []byte("mykey") 93 | require.NoError(t, s.SetOne(key, []byte("neo"))) 94 | 95 | // Make sure neo did get written as we expect. 96 | val, err := s.Get(key) 97 | require.NoError(t, err) 98 | require.EqualValues(t, val.Data(), "neo") 99 | 100 | // Do checkpointing. Checkpoint should contain neo. 101 | checkpoint, err := s.NewCheckpoint() 102 | require.NoError(t, err) 103 | 104 | pathCheckpoint := path.Join(dbPath, "checkpoint") // Do not mkdir yet. 105 | checkpoint.Save(pathCheckpoint) 106 | checkpoint.Destroy() 107 | 108 | // Update original store to contain trinity. 109 | require.NoError(t, s.SetOne(key, []byte("trinity"))) 110 | 111 | // Original store should contain trinity. 112 | val, err = s.Get(key) 113 | require.NoError(t, err) 114 | require.EqualValues(t, val.Data(), "trinity") 115 | 116 | // Open checkpoint and check that it contains neo, not trinity. 117 | s2, err := NewStore(pathCheckpoint) 118 | require.NoError(t, err) 119 | 120 | val, err = s2.Get(key) 121 | require.NoError(t, err) 122 | require.EqualValues(t, val.Data(), "neo") 123 | } 124 | 125 | func benchmarkGet(valSize int, b *testing.B) { 126 | path, err := ioutil.TempDir("", "storetest_") 127 | if err != nil { 128 | b.Error(err) 129 | return 130 | } 131 | defer os.RemoveAll(path) 132 | 133 | s, err := NewStore(path) 134 | if err != nil { 135 | b.Error(err) 136 | return 137 | } 138 | buf := make([]byte, valSize) 139 | 140 | nkeys := 100 141 | for i := 0; i < nkeys; i++ { 142 | key := []byte(fmt.Sprintf("key_%d", i)) 143 | if err := s.SetOne(key, buf); err != nil { 144 | b.Error(err) 145 | return 146 | } 147 | } 148 | 149 | b.ResetTimer() 150 | for i := 0; i < b.N; i++ { 151 | k := rand.Int() % nkeys 152 | key := []byte(fmt.Sprintf("key_%d", k)) 153 | valSlice, err := s.Get(key) 154 | if valSlice == nil { 155 | b.Error("Missing value") 156 | } 157 | if err != nil { 158 | b.Error(err) 159 | } 160 | if len(valSlice.Data()) != valSize { 161 | b.Errorf("Value size expected: %d. Found: %d", valSize, len(valSlice.Data())) 162 | } 163 | } 164 | b.StopTimer() 165 | } 166 | 167 | func BenchmarkGet_valsize1024(b *testing.B) { benchmarkGet(1024, b) } 168 | func BenchmarkGet_valsize10KB(b *testing.B) { benchmarkGet(10240, b) } 169 | func BenchmarkGet_valsize500KB(b *testing.B) { benchmarkGet(1<<19, b) } 170 | func BenchmarkGet_valsize1MB(b *testing.B) { benchmarkGet(1<<20, b) } 171 | 172 | func benchmarkSet(valSize int, b *testing.B) { 173 | path, err := ioutil.TempDir("", "storetest_") 174 | if err != nil { 175 | b.Error(err) 176 | return 177 | } 178 | defer os.RemoveAll(path) 179 | 180 | s, err := NewStore(path) 181 | if err != nil { 182 | b.Error(err) 183 | return 184 | } 185 | buf := make([]byte, valSize) 186 | 187 | b.ResetTimer() 188 | for i := 0; i < b.N; i++ { 189 | key := []byte(fmt.Sprintf("key_%d", i)) 190 | if err := s.SetOne(key, buf); err != nil { 191 | b.Error(err) 192 | return 193 | } 194 | } 195 | b.StopTimer() 196 | } 197 | 198 | func BenchmarkSet_valsize1024(b *testing.B) { benchmarkSet(1024, b) } 199 | func BenchmarkSet_valsize10KB(b *testing.B) { benchmarkSet(10240, b) } 200 | func BenchmarkSet_valsize500KB(b *testing.B) { benchmarkSet(1<<19, b) } 201 | func BenchmarkSet_valsize1MB(b *testing.B) { benchmarkSet(1<<20, b) } 202 | -------------------------------------------------------------------------------- /rdb/rdbc.h: -------------------------------------------------------------------------------- 1 | // This file is a subset of the C API from RocksDB. It should remain consistent. 2 | // There will be another file which contains some extra routines that we find 3 | // useful. 4 | #ifndef __DGROCKSDBC__ 5 | #define __DGROCKSDBC__ 6 | 7 | #ifdef __cplusplus 8 | extern "C" { 9 | #endif 10 | 11 | typedef struct rdb_t rdb_t; 12 | typedef struct rdb_options_t rdb_options_t; 13 | typedef struct rdb_readoptions_t rdb_readoptions_t; 14 | typedef struct rdb_writeoptions_t rdb_writeoptions_t; 15 | typedef struct rdb_writebatch_t rdb_writebatch_t; 16 | typedef struct rdb_iterator_t rdb_iterator_t; 17 | typedef struct rdb_filterpolicy_t rdb_filterpolicy_t; 18 | typedef struct rdb_cache_t rdb_cache_t; 19 | typedef struct rdb_block_based_table_options_t rdb_block_based_table_options_t; 20 | typedef struct rdb_snapshot_t rdb_snapshot_t; 21 | typedef struct rdb_checkpoint_t rdb_checkpoint_t; 22 | 23 | //////////////////////////// rdb_t 24 | rdb_t* rdb_open( 25 | const rdb_options_t* options, 26 | const char* name, 27 | char** errptr); 28 | rdb_t* rdb_open_for_read_only( 29 | const rdb_options_t* options, 30 | const char* name, 31 | unsigned char error_if_log_file_exist, 32 | char** errptr); 33 | void rdb_close(rdb_t* db); 34 | char* rdb_get( 35 | rdb_t* db, 36 | const rdb_readoptions_t* options, 37 | const char* key, size_t keylen, 38 | size_t* vallen, 39 | char** errptr); 40 | void rdb_put( 41 | rdb_t* db, 42 | const rdb_writeoptions_t* options, 43 | const char* key, size_t keylen, 44 | const char* val, size_t vallen, 45 | char** errptr); 46 | void rdb_delete( 47 | rdb_t* db, 48 | const rdb_writeoptions_t* options, 49 | const char* key, size_t keylen, 50 | char** errptr); 51 | char* rdb_property_value( 52 | rdb_t* db, 53 | const char* propname); 54 | 55 | //////////////////////////// rdb_writebatch_t 56 | rdb_writebatch_t* rdb_writebatch_create(); 57 | rdb_writebatch_t* rdb_writebatch_create_from(const char* rep, size_t size); 58 | void rdb_writebatch_destroy(rdb_writebatch_t* b); 59 | void rdb_writebatch_clear(rdb_writebatch_t* b); 60 | int rdb_writebatch_count(rdb_writebatch_t* b); 61 | void rdb_writebatch_put( 62 | rdb_writebatch_t* b, 63 | const char* key, size_t klen, 64 | const char* val, size_t vlen); 65 | void rdb_writebatch_delete( 66 | rdb_writebatch_t* b, 67 | const char* key, size_t klen); 68 | void rdb_write( 69 | rdb_t* db, 70 | const rdb_writeoptions_t* options, 71 | rdb_writebatch_t* batch, 72 | char** errptr); 73 | 74 | //////////////////////////// rdb_options_t 75 | rdb_options_t* rdb_options_create(); 76 | void rdb_options_set_create_if_missing( 77 | rdb_options_t* opt, unsigned char v); 78 | void rdb_options_set_block_based_table_factory( 79 | rdb_options_t *opt, 80 | rdb_block_based_table_options_t* table_options); 81 | 82 | //////////////////////////// rdb_readoptions_t 83 | rdb_readoptions_t* rdb_readoptions_create(); 84 | void rdb_readoptions_destroy(rdb_readoptions_t* opt); 85 | void rdb_readoptions_set_fill_cache( 86 | rdb_readoptions_t* opt, unsigned char v); 87 | void rdb_readoptions_set_snapshot( 88 | rdb_readoptions_t* opt, 89 | const rdb_snapshot_t* snap); 90 | 91 | //////////////////////////// rdb_writeoptions_t 92 | rdb_writeoptions_t* rdb_writeoptions_create(); 93 | void rdb_writeoptions_destroy(rdb_writeoptions_t* opt); 94 | void rdb_writeoptions_set_sync( 95 | rdb_writeoptions_t* opt, unsigned char v); 96 | 97 | //////////////////////////// rdb_iterator_t 98 | rdb_iterator_t* rdb_create_iterator( 99 | rdb_t* db, 100 | const rdb_readoptions_t* options); 101 | void rdb_iter_destroy(rdb_iterator_t* iter); 102 | unsigned char rdb_iter_valid(const rdb_iterator_t* iter); 103 | void rdb_iter_seek_to_first(rdb_iterator_t* iter); 104 | void rdb_iter_seek_to_last(rdb_iterator_t* iter); 105 | void rdb_iter_seek(rdb_iterator_t* iter, const char* k, size_t klen); 106 | void rdb_iter_seek_for_prev(rdb_iterator_t* iter, const char* k, size_t klen); 107 | void rdb_iter_next(rdb_iterator_t* iter); 108 | void rdb_iter_prev(rdb_iterator_t* iter); 109 | const char* rdb_iter_key(const rdb_iterator_t* iter, size_t* klen); 110 | const char* rdb_iter_value(const rdb_iterator_t* iter, size_t* vlen); 111 | void rdb_iter_get_error(const rdb_iterator_t* iter, char** errptr); 112 | 113 | //////////////////////////// rdb_filterpolicy_t 114 | rdb_filterpolicy_t* rdb_filterpolicy_create( 115 | void* state, 116 | void (*destructor)(void*), 117 | char* (*create_filter)( 118 | void*, 119 | const char* const* key_array, const size_t* key_length_array, 120 | int num_keys, 121 | size_t* filter_length), 122 | unsigned char (*key_may_match)( 123 | void*, 124 | const char* key, size_t length, 125 | const char* filter, size_t filter_length), 126 | void (*delete_filter)( 127 | void*, 128 | const char* filter, size_t filter_length), 129 | const char* (*name)(void*)); 130 | rdb_filterpolicy_t* rdbc_filterpolicy_create(uintptr_t idx); 131 | rdb_filterpolicy_t* rdb_filterpolicy_create_bloom(int bits_per_key); 132 | 133 | //////////////////////////// rdb_cache_t 134 | rdb_cache_t* rdb_cache_create_lru(size_t capacity); 135 | void rdb_cache_destroy(rdb_cache_t* cache); 136 | void rdb_cache_set_capacity(rdb_cache_t* cache, size_t capacity); 137 | 138 | //////////////////////////// rdb_block_based_table_options_t 139 | rdb_block_based_table_options_t* 140 | rdb_block_based_options_create(); 141 | void rdb_block_based_options_destroy( 142 | rdb_block_based_table_options_t* options); 143 | void rdb_block_based_options_set_block_size( 144 | rdb_block_based_table_options_t* options, size_t block_size); 145 | void rdb_block_based_options_set_filter_policy( 146 | rdb_block_based_table_options_t* options, 147 | rdb_filterpolicy_t* filter_policy); 148 | void rdb_block_based_options_set_no_block_cache( 149 | rdb_block_based_table_options_t* options, 150 | unsigned char no_block_cache); 151 | void rdb_block_based_options_set_block_cache( 152 | rdb_block_based_table_options_t* options, 153 | rdb_cache_t* block_cache); 154 | void rdb_block_based_options_set_block_cache_compressed( 155 | rdb_block_based_table_options_t* options, 156 | rdb_cache_t* block_cache_compressed); 157 | void rdb_block_based_options_set_whole_key_filtering( 158 | rdb_block_based_table_options_t* options, unsigned char v); 159 | 160 | //////////////////////////// rdb_snapshot_t 161 | const rdb_snapshot_t* rdb_create_snapshot( 162 | rdb_t* db); 163 | void rdb_release_snapshot( 164 | rdb_t* db, 165 | const rdb_snapshot_t* snapshot); 166 | 167 | //////////////////////////// rdb_checkpoint_t 168 | rdb_checkpoint_t* rdb_create_checkpoint(rdb_t* db, char** errptr); 169 | void rdb_open_checkpoint( 170 | rdb_checkpoint_t* checkpoint, 171 | const char* checkpoint_dir, 172 | char** errptr); 173 | void rdb_destroy_checkpoint(rdb_checkpoint_t* checkpoint); 174 | 175 | #ifdef __cplusplus 176 | } /* end extern "C" */ 177 | #endif 178 | 179 | #endif // __DGROCKSDBC__ 180 | -------------------------------------------------------------------------------- /randread/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "log" 7 | "math/rand" 8 | "os" 9 | "path/filepath" 10 | "sync" 11 | "sync/atomic" 12 | "time" 13 | 14 | "github.com/codahale/hdrhistogram" 15 | "github.com/dgraph-io/badger/y" 16 | "github.com/pkg/profile" 17 | "github.com/traetox/goaio" 18 | ) 19 | 20 | var ( 21 | dir = flag.String("dir", "datafiles", "File to read from") 22 | mode = flag.Int("mode", 1, "0 = serial, 1 = parallel, 2 = parallel via channel") 23 | duration = flag.Int64("seconds", 30, "Number of seconds to run for") 24 | numGoroutines = flag.Int("jobs", 8, "Number of Goroutines") 25 | profilemode = flag.String("profile.mode", "", "Enable profiling mode, one of [cpu, mem, mutex, block, trace]") 26 | ) 27 | var done int32 28 | var readSize int64 = 1 << 10 29 | 30 | func getIndices(r *rand.Rand, flist []*os.File, maxFileSize int64) (*os.File, int64) { 31 | fidx := r.Intn(len(flist)) 32 | iidx := r.Int63n(maxFileSize - readSize) 33 | return flist[fidx], iidx 34 | } 35 | 36 | func getAIOIndices(r *rand.Rand, aios []*goaio.AIO, maxFileSize int64) (*goaio.AIO, int64) { 37 | fidx := r.Intn(len(aios)) 38 | iidx := r.Int63n(maxFileSize - readSize) 39 | return aios[fidx], iidx 40 | } 41 | 42 | func Serial(fList []*os.File, maxFileSize int64) { 43 | startT := time.Now() 44 | var count int64 = 0 45 | b := make([]byte, int(readSize)) 46 | hist := hdrhistogram.New(1, 1000000, 4) // in microseconds. 47 | 48 | r := rand.New(rand.NewSource(time.Now().UnixNano())) 49 | for { 50 | if atomic.LoadInt32(&done) == 1 { 51 | break 52 | } 53 | count++ 54 | fd, offset := getIndices(r, fList, maxFileSize) 55 | 56 | start := time.Now() 57 | _, err := fd.ReadAt(b, offset) 58 | if err != nil { 59 | log.Fatalf("Error reading file: %v", err) 60 | } 61 | 62 | dur := time.Since(start).Nanoseconds() / 1000 63 | if dur > 1000000 { 64 | dur = 1000000 65 | } 66 | if err = hist.RecordValue(dur); err != nil { 67 | log.Fatalf("Unable to record hist: %v", err) 68 | } 69 | if count%10000 == 0 { 70 | fmt.Printf("Serial: Number of random reads per second: %f\r", 71 | float64(hist.TotalCount())/time.Since(startT).Seconds()) 72 | } 73 | } 74 | fmt.Println("Serial: Number of random reads per second: ", 75 | float64(count)/time.Since(startT).Seconds()) 76 | fmt.Println("Serial: Time Taken: ", time.Since(startT)) 77 | } 78 | 79 | func Conc2(fList []*os.File, maxFileSize int64) { 80 | startT := time.Now() 81 | var wg sync.WaitGroup 82 | hist := hdrhistogram.New(1, 1000000, 4) // in microseconds. 83 | 84 | for k := 0; k < *numGoroutines; k++ { 85 | wg.Add(1) 86 | go func() { 87 | b := make([]byte, int(readSize)) 88 | r := rand.New(rand.NewSource(time.Now().UnixNano())) 89 | for { 90 | if atomic.LoadInt32(&done) == 1 { 91 | break 92 | } 93 | fd, offset := getIndices(r, fList, maxFileSize) 94 | start := time.Now() 95 | _, err := fd.ReadAt(b, offset) 96 | if err != nil { 97 | log.Fatalf("Error reading file: %v", err) 98 | } 99 | dur := time.Since(start).Nanoseconds() / 1000 100 | if dur > 1000000 { 101 | dur = 1000000 102 | } 103 | if err = hist.RecordValue(dur); err != nil { 104 | log.Fatalf("Unable to record hist: %v", err) 105 | } 106 | } 107 | wg.Done() 108 | }() 109 | } 110 | go func() { 111 | d := time.NewTicker(time.Second) 112 | for range d.C { 113 | fmt.Printf("Concurrent 2: Number of random reads per second: %f\r", 114 | float64(hist.TotalCount())/time.Since(startT).Seconds()) 115 | if atomic.LoadInt32(&done) == 1 { 116 | fmt.Println() 117 | fmt.Println("Concurrent 2: Time Taken: ", time.Since(startT)) 118 | fmt.Println("Total count by hist", hist.TotalCount()) 119 | break 120 | } 121 | } 122 | }() 123 | 124 | wg.Wait() 125 | for _, b := range hist.CumulativeDistribution() { 126 | fmt.Printf("[%f] %d\n", b.Quantile, b.ValueAt) 127 | } 128 | fmt.Printf("=> [0.9] %d\n", hist.ValueAtQuantile(90.0)) 129 | } 130 | 131 | type aioReq struct { 132 | aio *goaio.AIO 133 | offset int64 134 | start time.Time 135 | } 136 | 137 | func ConcAio(fList []*os.File, maxFileSize int64) { 138 | startT := time.Now() 139 | var wg sync.WaitGroup 140 | hist := hdrhistogram.New(1, 1000000, 4) // in microseconds. 141 | 142 | aioCfg := goaio.AIOExtConfig{ 143 | QueueDepth: 32, 144 | } 145 | aioList := make([]*goaio.AIO, len(fList)) 146 | for i := range fList { 147 | aio, err := goaio.NewAIOExt(fList[i].Name(), aioCfg, os.O_RDONLY, 0755) 148 | if err != nil { 149 | fmt.Println("Failed to create new AIO for", fList[i].Name(), err) 150 | return 151 | } 152 | aioList[i] = aio 153 | } 154 | 155 | for k := 0; k < *numGoroutines; k++ { 156 | wg.Add(1) 157 | go func() { 158 | b := make([]byte, int(readSize)) 159 | r := rand.New(rand.NewSource(time.Now().UnixNano())) 160 | ids := make([]goaio.RequestId, 32) 161 | for { 162 | if atomic.LoadInt32(&done) == 1 { 163 | break 164 | } 165 | aio, offset := getAIOIndices(r, aioList, maxFileSize) 166 | // if !aio.Ready() { 167 | // if _, err := aio.WaitAny(ids); err != nil { 168 | // log.Fatalf("Failed to collect requests", err) 169 | // } 170 | // } 171 | // r.start = time.Now() 172 | if _, err := aio.ReadAt(b, offset); err != nil { 173 | log.Fatalf("Unable to read: %v", err) 174 | } 175 | } 176 | 177 | for _, aio := range aioList { 178 | for { 179 | n, err := aio.WaitAny(ids) 180 | if err != nil { 181 | log.Fatalf("Failed to collect reqs: %v", err) 182 | } 183 | if n == 0 { 184 | break 185 | } 186 | } 187 | } 188 | // dur := time.Since(start).Nanoseconds() / 1000 189 | // if dur > 1000000 { 190 | // dur = 1000000 191 | // } 192 | // if err = hist.RecordValue(dur); err != nil { 193 | // log.Fatalf("Unable to record hist: %v", err) 194 | // } 195 | wg.Done() 196 | }() 197 | } 198 | wg.Wait() 199 | fmt.Println("Concurrent 2: Number of random reads per second: ", 200 | float64(hist.TotalCount())/time.Since(startT).Seconds()) 201 | fmt.Println("Concurrent 2: Time Taken: ", time.Since(startT)) 202 | total := float64(hist.TotalCount()) 203 | fmt.Println("Total count by hist", total) 204 | for _, b := range hist.CumulativeDistribution() { 205 | fmt.Printf("[%f] %d\n", b.Quantile, b.ValueAt) 206 | } 207 | fmt.Printf("=> [0.9] %d\n", hist.ValueAtQuantile(90.0)) 208 | } 209 | 210 | func main() { 211 | flag.Parse() 212 | 213 | var flist []*os.File 214 | var maxFileSize int64 215 | getFile := func(path string, info os.FileInfo, err error) error { 216 | if err != nil { 217 | log.Print(err) 218 | return nil 219 | } 220 | 221 | if !info.IsDir() { 222 | f, err := os.Open(path) 223 | y.AssertTruef(err == nil, "Error opening file: %v", path) 224 | flist = append(flist, f) 225 | log.Println("Opened file:", path, "Size:", info.Size()/(1<<20), "MB") 226 | maxFileSize = info.Size() 227 | } 228 | return nil 229 | } 230 | 231 | err := filepath.Walk(*dir, getFile) 232 | if err != nil { 233 | log.Fatalf("%v", err) 234 | } 235 | if len(flist) == 0 { 236 | log.Fatalf("Must have files already created") 237 | } 238 | 239 | switch *profilemode { 240 | case "cpu": 241 | defer profile.Start(profile.CPUProfile).Stop() 242 | case "mem": 243 | defer profile.Start(profile.MemProfile).Stop() 244 | case "mutex": 245 | defer profile.Start(profile.MutexProfile).Stop() 246 | case "block": 247 | defer profile.Start(profile.BlockProfile).Stop() 248 | case "trace": 249 | defer profile.Start(profile.TraceProfile).Stop() 250 | } 251 | 252 | done = 0 253 | go func() { 254 | time.Sleep(time.Duration(*duration) * time.Second) 255 | atomic.StoreInt32(&done, 1) 256 | }() 257 | 258 | switch *mode { 259 | case 0: 260 | Serial(flist, maxFileSize) 261 | case 1: 262 | Conc2(flist, maxFileSize) 263 | case 2: 264 | ConcAio(flist, maxFileSize) 265 | default: 266 | log.Fatalf("Unknown mode") 267 | } 268 | } 269 | -------------------------------------------------------------------------------- /populate/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "flag" 6 | "fmt" 7 | "log" 8 | "math/rand" 9 | "net/http" 10 | _ "net/http/pprof" 11 | "os" 12 | "sync" 13 | "sync/atomic" 14 | "time" 15 | 16 | "golang.org/x/net/trace" 17 | 18 | "github.com/bmatsuo/lmdb-go/lmdb" 19 | "github.com/boltdb/bolt" 20 | "github.com/dgraph-io/badger" 21 | "github.com/dgraph-io/badger-bench/store" 22 | "github.com/dgraph-io/badger/options" 23 | "github.com/dgraph-io/badger/y" 24 | "github.com/paulbellamy/ratecounter" 25 | "github.com/pkg/profile" 26 | "github.com/syndtr/goleveldb/leveldb" 27 | "github.com/syndtr/goleveldb/leveldb/opt" 28 | ) 29 | 30 | const mil float64 = 1000000 31 | 32 | var ( 33 | which = flag.String("kv", "badger", "Which KV store to use. Options: badger, rocksdb, bolt, leveldb") 34 | numKeys = flag.Float64("keys_mil", 10.0, "How many million keys to write.") 35 | valueSize = flag.Int("valsz", 128, "Value size in bytes.") 36 | dir = flag.String("dir", "", "Base dir for writes.") 37 | mode = flag.String("profile.mode", "", "enable profiling mode, one of [cpu, mem, mutex, block]") 38 | ) 39 | 40 | type entry struct { 41 | Key []byte 42 | Value []byte 43 | Meta byte 44 | } 45 | 46 | func fillEntry(e *entry) { 47 | k := rand.Int() % int(*numKeys*mil) 48 | key := fmt.Sprintf("vsz=%05d-k=%010d", *valueSize, k) // 22 bytes. 49 | if cap(e.Key) < len(key) { 50 | e.Key = make([]byte, 2*len(key)) 51 | } 52 | e.Key = e.Key[:len(key)] 53 | copy(e.Key, key) 54 | 55 | rand.Read(e.Value) 56 | e.Meta = 0 57 | } 58 | 59 | var bdb *badger.DB 60 | 61 | var rdb *store.Store 62 | var boltdb *bolt.DB 63 | var ldb *leveldb.DB 64 | var lmdbEnv *lmdb.Env 65 | var lmdbDBI lmdb.DBI 66 | 67 | func writeBatch(entries []*entry) int { 68 | for _, e := range entries { 69 | fillEntry(e) 70 | } 71 | 72 | if bdb != nil { 73 | txn := bdb.NewTransaction(true) 74 | 75 | for _, e := range entries { 76 | y.Check(txn.Set(e.Key, e.Value)) 77 | } 78 | y.Check(txn.Commit()) 79 | } 80 | 81 | if ldb != nil { 82 | batch := new(leveldb.Batch) 83 | for _, e := range entries { 84 | batch.Put(e.Key, e.Value) 85 | } 86 | wopt := &opt.WriteOptions{} 87 | wopt.Sync = true 88 | y.Check(ldb.Write(batch, wopt)) 89 | } 90 | 91 | if rdb != nil { 92 | rb := rdb.NewWriteBatch() 93 | defer rb.Destroy() 94 | for _, e := range entries { 95 | rb.Put(e.Key, e.Value) 96 | } 97 | y.Check(rdb.WriteBatch(rb)) 98 | } 99 | 100 | if boltdb != nil { 101 | err := boltdb.Batch(func(txn *bolt.Tx) error { 102 | boltBkt := txn.Bucket([]byte("bench")) 103 | y.AssertTrue(boltBkt != nil) 104 | for _, e := range entries { 105 | if err := boltBkt.Put(e.Key, e.Value); err != nil { 106 | return err 107 | } 108 | } 109 | return nil 110 | }) 111 | y.Check(err) 112 | } 113 | 114 | if lmdbEnv != nil { 115 | err := lmdbEnv.Update(func(txn *lmdb.Txn) error { 116 | for _, e := range entries { 117 | err := txn.Put(lmdbDBI, e.Key, e.Value, 0) 118 | if err != nil { 119 | return err 120 | } 121 | } 122 | return nil 123 | }) 124 | y.Check(err) 125 | } 126 | 127 | return len(entries) 128 | } 129 | 130 | func humanize(n int64) string { 131 | if n >= 1000000 { 132 | return fmt.Sprintf("%6.2fM", float64(n)/1000000.0) 133 | } 134 | if n >= 1000 { 135 | return fmt.Sprintf("%6.2fK", float64(n)/1000.0) 136 | } 137 | return fmt.Sprintf("%5.2f", float64(n)) 138 | } 139 | 140 | func main() { 141 | flag.Parse() 142 | switch *mode { 143 | case "cpu": 144 | defer profile.Start(profile.CPUProfile).Stop() 145 | case "mem": 146 | defer profile.Start(profile.MemProfile).Stop() 147 | case "mutex": 148 | defer profile.Start(profile.MutexProfile).Stop() 149 | case "block": 150 | defer profile.Start(profile.BlockProfile).Stop() 151 | default: 152 | // do nothing 153 | } 154 | 155 | trace.AuthRequest = func(req *http.Request) (any, sensitive bool) { 156 | return true, true 157 | } 158 | 159 | nw := *numKeys * mil 160 | fmt.Printf("TOTAL KEYS TO WRITE: %s\n", humanize(int64(nw))) 161 | opt := badger.DefaultOptions(*dir + "/badger") 162 | opt.TableLoadingMode = options.MemoryMap 163 | opt.SyncWrites = true 164 | 165 | var err error 166 | 167 | var init bool 168 | 169 | if *which == "badger" { 170 | init = true 171 | fmt.Println("Init Badger") 172 | y.Check(os.RemoveAll(*dir + "/badger")) 173 | os.MkdirAll(*dir+"/badger", 0777) 174 | bdb, err = badger.Open(opt) 175 | if err != nil { 176 | log.Fatalf("while opening badger: %v", err) 177 | } 178 | } else if *which == "rocksdb" { 179 | init = true 180 | fmt.Println("Init Rocks") 181 | os.RemoveAll(*dir + "/rocks") 182 | os.MkdirAll(*dir+"/rocks", 0777) 183 | rdb, err = store.NewStore(*dir + "/rocks") 184 | y.Check(err) 185 | } else if *which == "bolt" { 186 | init = true 187 | fmt.Println("Init BoltDB") 188 | os.RemoveAll(*dir + "/bolt") 189 | os.MkdirAll(*dir+"/bolt", 0777) 190 | boltdb, err = bolt.Open(*dir+"/bolt/bolt.db", 0777, bolt.DefaultOptions) 191 | y.Check(err) 192 | boltdb.NoSync = false // Set this to speed up writes 193 | err = boltdb.Update(func(txn *bolt.Tx) error { 194 | var err error 195 | _, err = txn.CreateBucketIfNotExists([]byte("bench")) 196 | return err 197 | }) 198 | y.Check(err) 199 | 200 | } else if *which == "leveldb" { 201 | init = true 202 | fmt.Println("Init LevelDB") 203 | os.RemoveAll(*dir + "/level") 204 | os.MkdirAll(*dir+"/level", 0777) 205 | ldb, err = leveldb.OpenFile(*dir+"/level/l.db", nil) 206 | y.Check(err) 207 | 208 | } else if *which == "lmdb" { 209 | init = true 210 | fmt.Println("Init lmdb") 211 | os.RemoveAll(*dir + "/lmdb") 212 | os.MkdirAll(*dir+"/lmdb", 0777) 213 | 214 | lmdbEnv, err = lmdb.NewEnv() 215 | y.Check(err) 216 | err = lmdbEnv.SetMaxDBs(1) 217 | y.Check(err) 218 | err = lmdbEnv.SetMapSize(1 << 38) // ~273Gb 219 | y.Check(err) 220 | 221 | err = lmdbEnv.Open(*dir+"/lmdb", 0, 0777) 222 | y.Check(err) 223 | 224 | // Acquire handle 225 | err := lmdbEnv.Update(func(txn *lmdb.Txn) error { 226 | var err error 227 | lmdbDBI, err = txn.CreateDBI("bench") 228 | return err 229 | }) 230 | y.Check(err) 231 | } else { 232 | log.Fatalf("Invalid value for option kv: '%s'", *which) 233 | } 234 | 235 | if !init { 236 | log.Fatalf("Invalid arguments. Unable to init any store.") 237 | } 238 | 239 | rc := ratecounter.NewRateCounter(time.Minute) 240 | var counter int64 241 | ctx, cancel := context.WithCancel(context.Background()) 242 | go func() { 243 | var count int64 244 | t := time.NewTicker(time.Second) 245 | defer t.Stop() 246 | for { 247 | select { 248 | case <-t.C: 249 | fmt.Printf("[%04d] Write key rate per minute: %s. Total: %s\n", 250 | count, 251 | humanize(rc.Rate()), 252 | humanize(atomic.LoadInt64(&counter))) 253 | count++ 254 | case <-ctx.Done(): 255 | return 256 | } 257 | } 258 | }() 259 | go func() { 260 | if err := http.ListenAndServe("0.0.0.0:8081", nil); err != nil { 261 | log.Fatalf("While opening http. Error: %v", err) 262 | } 263 | }() 264 | 265 | N := 32 266 | var wg sync.WaitGroup 267 | for i := 0; i < N; i++ { 268 | wg.Add(1) 269 | go func(proc int) { 270 | entries := make([]*entry, 1000) 271 | for i := 0; i < len(entries); i++ { 272 | e := new(entry) 273 | e.Key = make([]byte, 22) 274 | e.Value = make([]byte, *valueSize) 275 | entries[i] = e 276 | } 277 | 278 | var written float64 279 | for written < nw/float64(N) { 280 | wrote := float64(writeBatch(entries)) 281 | 282 | wi := int64(wrote) 283 | atomic.AddInt64(&counter, wi) 284 | rc.Incr(wi) 285 | 286 | written += wrote 287 | } 288 | wg.Done() 289 | }(i) 290 | } 291 | // wg.Add(1) // Block 292 | wg.Wait() 293 | cancel() 294 | 295 | if bdb != nil { 296 | fmt.Println("closing badger") 297 | bdb.Close() 298 | } 299 | 300 | if rdb != nil { 301 | fmt.Println("closing rocks") 302 | rdb.Close() 303 | } 304 | 305 | if ldb != nil { 306 | fmt.Println("closing leveldb") 307 | ldb.Close() 308 | } 309 | 310 | if boltdb != nil { 311 | fmt.Println("closing bolt") 312 | boltdb.Close() 313 | } 314 | 315 | if lmdbEnv != nil { 316 | fmt.Println("closing lmdb") 317 | lmdbEnv.CloseDBI(lmdbDBI) 318 | lmdbEnv.Close() 319 | } 320 | 321 | fmt.Printf("\nWROTE %d KEYS\n", atomic.LoadInt64(&counter)) 322 | } 323 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /bench_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "flag" 6 | "fmt" 7 | "math" 8 | "math/rand" 9 | "net/http" 10 | "os" 11 | "runtime" 12 | "sync/atomic" 13 | "testing" 14 | 15 | "github.com/bmatsuo/lmdb-go/lmdb" 16 | "github.com/boltdb/bolt" 17 | "github.com/dgraph-io/badger" 18 | "github.com/dgraph-io/badger-bench/store" 19 | "github.com/dgraph-io/badger/options" 20 | "github.com/dgraph-io/badger/y" 21 | "github.com/syndtr/goleveldb/leveldb" 22 | ) 23 | 24 | var ( 25 | ctx = context.Background() 26 | numKeys = flag.Float64("keys_mil", 10.0, "How many million keys to write.") 27 | flagDir = flag.String("dir", "bench-tmp", "Where data is temporarily stored.") 28 | flagValueSize = flag.Int("valsz", 128, "Size of each value.") 29 | ) 30 | 31 | const Mi int = 1000000 32 | const Mf float64 = 1000000 33 | 34 | func getBadger() (*badger.DB, error) { 35 | opt := badger.DefaultOptions(*flagDir + "/badger") 36 | opt.TableLoadingMode = options.LoadToRAM 37 | opt.ReadOnly = true 38 | return badger.Open(opt) 39 | } 40 | 41 | func getRocks() *store.Store { 42 | rdb, err := store.NewReadOnlyStore(*flagDir + "/rocks") 43 | y.Check(err) 44 | return rdb 45 | } 46 | 47 | func getLevelDB() *leveldb.DB { 48 | ldb, err := leveldb.OpenFile(*flagDir+"/level/l.db", nil) 49 | y.Check(err) 50 | return ldb 51 | } 52 | 53 | func getBoltDB() *bolt.DB { 54 | opts := bolt.DefaultOptions 55 | opts.ReadOnly = true 56 | boltdb, err := bolt.Open(*flagDir+"/bolt/bolt.db", 0777, opts) 57 | y.Check(err) 58 | return boltdb 59 | } 60 | 61 | func getLmdb() *lmdb.Env { 62 | lmdbEnv, err := lmdb.NewEnv() 63 | y.Check(err) 64 | err = lmdbEnv.SetMaxReaders(math.MaxInt64) 65 | y.Check(err) 66 | err = lmdbEnv.SetMaxDBs(1) 67 | y.Check(err) 68 | err = lmdbEnv.SetMapSize(1 << 38) // ~273Gb 69 | y.Check(err) 70 | 71 | err = lmdbEnv.Open(*flagDir+"/lmdb", lmdb.Readonly|lmdb.NoReadahead, 0777) 72 | y.Check(err) 73 | return lmdbEnv 74 | } 75 | 76 | func newKey() []byte { 77 | k := rand.Int() % int(*numKeys*Mf) 78 | key := fmt.Sprintf("vsz=%05d-k=%010d", *flagValueSize, k) // 22 bytes. 79 | return []byte(key) 80 | } 81 | 82 | func print(count int) { 83 | if count%100000 == 0 { 84 | fmt.Printf(".") 85 | } else if count%Mi == 0 { 86 | fmt.Printf("-") 87 | } 88 | } 89 | 90 | type hitCounter struct { 91 | found uint64 92 | notFound uint64 93 | errored uint64 94 | } 95 | 96 | func (h *hitCounter) Reset() { 97 | h.found, h.notFound, h.errored = 0, 0, 0 98 | } 99 | 100 | func (h *hitCounter) Update(c *hitCounter) { 101 | atomic.AddUint64(&h.found, c.found) 102 | atomic.AddUint64(&h.notFound, c.notFound) 103 | atomic.AddUint64(&h.errored, c.errored) 104 | } 105 | 106 | func (h *hitCounter) Print(storeName string, b *testing.B) { 107 | b.Logf("%s: %d keys had valid values.", storeName, h.found) 108 | b.Logf("%s: %d keys had no values", storeName, h.notFound) 109 | b.Logf("%s: %d keys had errors", storeName, h.errored) 110 | b.Logf("%s: %d total keys looked at", storeName, h.found+h.notFound+h.errored) 111 | b.Logf("%s: hit rate : %.2f", storeName, float64(h.found)/float64(h.found+h.notFound+h.errored)) 112 | } 113 | 114 | // A generic read benchmark that runs the doBench func for a specific key value store, 115 | // aggregates the hit counts and prints them out. 116 | func runRandomReadBenchmark(b *testing.B, storeName string, doBench func(*hitCounter, *testing.PB)) { 117 | counter := &hitCounter{} 118 | b.Run("read-random"+storeName, func(b *testing.B) { 119 | counter.Reset() 120 | b.RunParallel(func(pb *testing.PB) { 121 | c := &hitCounter{} 122 | doBench(c, pb) 123 | counter.Update(c) 124 | }) 125 | }) 126 | counter.Print(storeName, b) 127 | } 128 | 129 | func BenchmarkReadRandomBadger(b *testing.B) { 130 | bdb, err := getBadger() 131 | y.Check(err) 132 | defer bdb.Close() 133 | 134 | read := func(txn *badger.Txn, key []byte) error { 135 | item, err := txn.Get(key) 136 | if err != nil { 137 | return err 138 | } 139 | val, err := item.ValueCopy(nil) 140 | if err != nil { 141 | return err 142 | } 143 | y.AssertTruef(len(val) == *flagValueSize, 144 | "Assertion failed. value size is %d, expected %d", len(val), *flagValueSize) 145 | return nil 146 | } 147 | 148 | runRandomReadBenchmark(b, "badger", func(c *hitCounter, pb *testing.PB) { 149 | err := bdb.View(func(txn *badger.Txn) error { 150 | for pb.Next() { 151 | key := newKey() 152 | err := read(txn, key) 153 | if err == badger.ErrKeyNotFound { 154 | c.notFound++ 155 | } else if err != nil { 156 | c.errored++ 157 | } else { 158 | c.found++ 159 | } 160 | } 161 | return nil 162 | }) 163 | y.Check(err) 164 | }) 165 | } 166 | 167 | func BenchmarkReadRandomRocks(b *testing.B) { 168 | rdb := getRocks() 169 | defer rdb.Close() 170 | runRandomReadBenchmark(b, "rocksdb", func(c *hitCounter, pb *testing.PB) { 171 | for pb.Next() { 172 | key := newKey() 173 | rdb_slice, err := rdb.Get(key) 174 | if err != nil { 175 | c.errored++ 176 | } else if rdb_slice.Size() > 0 { 177 | c.found++ 178 | } else { 179 | c.notFound++ 180 | } 181 | } 182 | }) 183 | } 184 | 185 | func BenchmarkReadRandomLevel(b *testing.B) { 186 | ldb := getLevelDB() 187 | defer ldb.Close() 188 | 189 | runRandomReadBenchmark(b, "leveldb", func(c *hitCounter, pb *testing.PB) { 190 | for pb.Next() { 191 | key := newKey() 192 | v, err := ldb.Get(key, nil) 193 | if err == leveldb.ErrNotFound { 194 | c.notFound++ 195 | } else if err != nil { 196 | c.errored++ 197 | } else { 198 | y.AssertTruef(len(v) == *flagValueSize, 199 | "Assertion failed. value size is %d, expected %d", len(v), *flagValueSize) 200 | c.found++ 201 | } 202 | } 203 | }) 204 | } 205 | 206 | func BenchmarkReadRandomBolt(b *testing.B) { 207 | boltdb := getBoltDB() 208 | defer boltdb.Close() 209 | 210 | runRandomReadBenchmark(b, "bolt", func(c *hitCounter, pb *testing.PB) { 211 | err := boltdb.View(func(txn *bolt.Tx) error { 212 | boltBkt := txn.Bucket([]byte("bench")) 213 | y.AssertTrue(boltBkt != nil) 214 | for pb.Next() { 215 | key := newKey() 216 | v := boltBkt.Get(key) 217 | if v == nil { 218 | c.notFound++ 219 | continue 220 | } 221 | y.AssertTruef(len(v) == *flagValueSize, 222 | "Assertion failed. value size is %d, expected %d", len(v), *flagValueSize) 223 | c.found++ 224 | } 225 | return nil 226 | }) 227 | y.Check(err) 228 | }) 229 | } 230 | 231 | func BenchmarkReadRandomLmdb(b *testing.B) { 232 | lmdbEnv := getLmdb() 233 | defer lmdbEnv.Close() 234 | 235 | var lmdbDBI lmdb.DBI 236 | // Acquire handle 237 | err := lmdbEnv.View(func(txn *lmdb.Txn) error { 238 | var err error 239 | lmdbDBI, err = txn.OpenDBI("bench", 0) 240 | return err 241 | }) 242 | y.Check(err) 243 | defer lmdbEnv.CloseDBI(lmdbDBI) 244 | 245 | runRandomReadBenchmark(b, "lmdb", func(c *hitCounter, pb *testing.PB) { 246 | err := lmdbEnv.View(func(txn *lmdb.Txn) error { 247 | txn.RawRead = true 248 | for pb.Next() { 249 | key := newKey() 250 | v, err := txn.Get(lmdbDBI, key) 251 | if lmdb.IsNotFound(err) { 252 | c.notFound++ 253 | continue 254 | } else if err != nil { 255 | c.errored++ 256 | continue 257 | } 258 | y.AssertTruef(len(v) == *flagValueSize, "Assertion failed. value size is %d, expected %d", len(v), *flagValueSize) 259 | c.found++ 260 | } 261 | return nil 262 | }) 263 | if err != nil { 264 | y.Check(err) 265 | } 266 | }) 267 | } 268 | 269 | func safecopy(dst []byte, src []byte) []byte { 270 | if cap(dst) < len(src) { 271 | dst = make([]byte, len(src)) 272 | } 273 | dst = dst[0:len(src)] 274 | copy(dst, src) 275 | return dst 276 | } 277 | 278 | func BenchmarkIterateRocks(b *testing.B) { 279 | rdb := getRocks() 280 | defer rdb.Close() 281 | k := make([]byte, 1024) 282 | v := make([]byte, Mi) 283 | b.ResetTimer() 284 | b.Run("rocksdb-iterate", func(b *testing.B) { 285 | for j := 0; j < b.N; j++ { 286 | itr := rdb.NewIterator() 287 | var count int 288 | for itr.SeekToFirst(); itr.Valid(); itr.Next() { 289 | { 290 | // do some processing. 291 | k = safecopy(k, itr.Key().Data()) 292 | v = safecopy(v, itr.Value().Data()) 293 | } 294 | count++ 295 | print(count) 296 | if count >= 2*Mi { 297 | break 298 | } 299 | } 300 | b.Logf("[%d] Counted %d keys\n", j, count) 301 | } 302 | }) 303 | } 304 | 305 | func BenchmarkIterateBolt(b *testing.B) { 306 | boltdb := getBoltDB() 307 | defer boltdb.Close() 308 | 309 | k := make([]byte, 1024) 310 | v := make([]byte, Mi) 311 | b.ResetTimer() 312 | 313 | b.Run("boltdb-iterate", func(b *testing.B) { 314 | for j := 0; j < b.N; j++ { 315 | var count int 316 | err := boltdb.View(func(txn *bolt.Tx) error { 317 | boltBkt := txn.Bucket([]byte("bench")) 318 | y.AssertTrue(boltBkt != nil) 319 | cur := boltBkt.Cursor() 320 | for k1, v1 := cur.First(); k1 != nil; k1, v1 = cur.Next() { 321 | y.AssertTruef(len(v1) == *flagValueSize, "Assertion failed. value size is %d, expected %d", len(v1), *flagValueSize) 322 | 323 | // do some processing. 324 | k = safecopy(k, k1) 325 | v = safecopy(v, v1) 326 | 327 | count++ 328 | print(count) 329 | if count >= 2*Mi { 330 | break 331 | } 332 | } 333 | return nil 334 | }) 335 | y.Check(err) 336 | b.Logf("[%d] Counted %d keys\n", j, count) 337 | } 338 | }) 339 | } 340 | 341 | func BenchmarkIterateLmdb(b *testing.B) { 342 | lmdbEnv := getLmdb() 343 | defer lmdbEnv.Close() 344 | 345 | var lmdbDBI lmdb.DBI 346 | // Acquire handle 347 | err := lmdbEnv.View(func(txn *lmdb.Txn) error { 348 | var err error 349 | lmdbDBI, err = txn.OpenDBI("bench", 0) 350 | return err 351 | }) 352 | y.Check(err) 353 | defer lmdbEnv.CloseDBI(lmdbDBI) 354 | 355 | k := make([]byte, 1024) 356 | v := make([]byte, Mi) 357 | b.ResetTimer() 358 | 359 | b.Run("lmdb-iterate", func(b *testing.B) { 360 | for j := 0; j < b.N; j++ { 361 | var count int 362 | err = lmdbEnv.View(func(txn *lmdb.Txn) error { 363 | txn.RawRead = true 364 | cur, err := txn.OpenCursor(lmdbDBI) 365 | if err != nil { 366 | return err 367 | } 368 | defer cur.Close() 369 | 370 | for { 371 | k1, v1, err := cur.Get(nil, nil, lmdb.Next) 372 | if lmdb.IsNotFound(err) { 373 | return nil 374 | } 375 | if err != nil { 376 | return err 377 | } 378 | 379 | y.AssertTruef(len(v1) == *flagValueSize, "Assertion failed. value size is %d, expected %d", len(v1), *flagValueSize) 380 | 381 | // do some processing. 382 | k = safecopy(k, k1) 383 | v = safecopy(v, v1) 384 | 385 | count++ 386 | print(count) 387 | if count >= 2*Mi { 388 | break 389 | } 390 | } 391 | return nil 392 | }) 393 | y.Check(err) 394 | b.Logf("[%d] Counted %d keys\n", j, count) 395 | } 396 | }) 397 | } 398 | 399 | func BenchmarkIterateBadgerOnlyKeys(b *testing.B) { 400 | bdb, err := getBadger() 401 | y.Check(err) 402 | defer bdb.Close() 403 | k := make([]byte, 1024) 404 | b.ResetTimer() 405 | 406 | b.Run("badger-iterate-onlykeys", func(b *testing.B) { 407 | for j := 0; j < b.N; j++ { 408 | var count int 409 | // 100 = size, 0 = num workers, false = fwd direction. 410 | opt := badger.IteratorOptions{} 411 | opt.PrefetchSize = 256 412 | txn := bdb.NewTransaction(false) 413 | itr := txn.NewIterator(opt) 414 | for itr.Rewind(); itr.Valid(); itr.Next() { 415 | item := itr.Item() 416 | { 417 | // do some processing. 418 | k = safecopy(k, item.Key()) 419 | } 420 | count++ 421 | print(count) 422 | if count >= 2*Mi { 423 | break 424 | } 425 | } 426 | b.Logf("[%d] Counted %d keys\n", j, count) 427 | } 428 | }) 429 | } 430 | 431 | func BenchmarkIterateBadgerWithValues(b *testing.B) { 432 | bdb, err := getBadger() 433 | y.Check(err) 434 | defer bdb.Close() 435 | k := make([]byte, 1024) 436 | v := make([]byte, Mi) 437 | b.ResetTimer() 438 | 439 | b.Run("badger-iterate-withvals", func(b *testing.B) { 440 | for j := 0; j < b.N; j++ { 441 | var count int 442 | opt := badger.IteratorOptions{} 443 | opt.PrefetchSize = 256 444 | opt.PrefetchValues = true 445 | txn := bdb.NewTransaction(false) 446 | itr := txn.NewIterator(opt) 447 | for itr.Rewind(); itr.Valid(); itr.Next() { 448 | item := itr.Item() 449 | val, err := item.ValueCopy(nil) 450 | y.Check(err) 451 | 452 | vsz := len(val) 453 | y.AssertTruef(vsz == *flagValueSize, 454 | "Assertion failed. value size is %d, expected %d", vsz, *flagValueSize) 455 | // do some processing. 456 | k = safecopy(k, item.Key()) 457 | v = safecopy(v, val) 458 | count++ 459 | print(count) 460 | if count >= 2*Mi { 461 | break 462 | } 463 | } 464 | b.Logf("[%d] Counted %d keys\n", j, count) 465 | } 466 | }) 467 | } 468 | 469 | func TestMain(m *testing.M) { 470 | flag.Parse() 471 | runtime.GOMAXPROCS(128) 472 | // call flag.Parse() here if TestMain uses flags 473 | go http.ListenAndServe(":8080", nil) 474 | os.Exit(m.Run()) 475 | } 476 | -------------------------------------------------------------------------------- /rdb/rdbc.cc: -------------------------------------------------------------------------------- 1 | // This file is a subset of the C API from RocksDB. It should remain consistent. 2 | // There will be another file which contains some extra routines that we find 3 | // useful. 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #include "rocksdb/cache.h" 10 | #include "rocksdb/db.h" 11 | #include "rocksdb/filter_policy.h" 12 | #include "rocksdb/iterator.h" 13 | #include "rocksdb/options.h" 14 | #include "rocksdb/snapshot.h" 15 | #include "rocksdb/status.h" 16 | #include "rocksdb/table.h" 17 | #include "rocksdb/write_batch.h" 18 | #include "rocksdb/utilities/checkpoint.h" 19 | 20 | #include "rdbc.h" 21 | #include "_cgo_export.h" 22 | 23 | using rocksdb::DB; 24 | using rocksdb::Options; 25 | using rocksdb::Status; 26 | using rocksdb::ReadOptions; 27 | using rocksdb::Slice; 28 | using rocksdb::WriteOptions; 29 | using rocksdb::WriteBatch; 30 | using rocksdb::Iterator; 31 | using rocksdb::FilterPolicy; 32 | using rocksdb::NewBloomFilterPolicy; 33 | using rocksdb::Cache; 34 | using rocksdb::NewLRUCache; 35 | using rocksdb::BlockBasedTableOptions; 36 | using rocksdb::Snapshot; 37 | using rocksdb::Checkpoint; 38 | 39 | struct rdb_t { DB* rep; }; 40 | struct rdb_options_t { Options rep; }; 41 | struct rdb_readoptions_t { 42 | ReadOptions rep; 43 | Slice upper_bound; // stack variable to set pointer to in ReadOptions 44 | }; 45 | struct rdb_writeoptions_t { WriteOptions rep; }; 46 | struct rdb_writebatch_t { WriteBatch rep; }; 47 | struct rdb_iterator_t { Iterator* rep; }; 48 | struct rdb_cache_t { std::shared_ptr rep; }; 49 | struct rdb_block_based_table_options_t { BlockBasedTableOptions rep; }; 50 | struct rdb_snapshot_t { const Snapshot* rep; }; 51 | struct rdb_checkpoint_t { Checkpoint* rep; }; 52 | 53 | bool SaveError(char** errptr, const Status& s) { 54 | assert(errptr != nullptr); 55 | if (s.ok()) { 56 | return false; 57 | } else if (*errptr == nullptr) { 58 | *errptr = strdup(s.ToString().c_str()); 59 | } else { 60 | // TODO(sanjay): Merge with existing error? 61 | // This is a bug if *errptr is not created by malloc() 62 | free(*errptr); 63 | *errptr = strdup(s.ToString().c_str()); 64 | } 65 | return true; 66 | } 67 | 68 | static char* CopyString(const std::string& str) { 69 | char* result = reinterpret_cast(malloc(sizeof(char) * str.size())); 70 | memcpy(result, str.data(), sizeof(char) * str.size()); 71 | return result; 72 | } 73 | 74 | //////////////////////////// rdb_t 75 | rdb_t* rdb_open( 76 | const rdb_options_t* options, 77 | const char* name, 78 | char** errptr) { 79 | DB* db; 80 | if (SaveError(errptr, DB::Open(options->rep, std::string(name), &db))) { 81 | return nullptr; 82 | } 83 | rdb_t* result = new rdb_t; 84 | result->rep = db; 85 | return result; 86 | } 87 | 88 | rdb_t* rdb_open_for_read_only( 89 | const rdb_options_t* options, 90 | const char* name, 91 | unsigned char error_if_log_file_exist, 92 | char** errptr) { 93 | DB* db; 94 | if (SaveError(errptr, DB::OpenForReadOnly(options->rep, std::string(name), &db, error_if_log_file_exist))) { 95 | return nullptr; 96 | } 97 | rdb_t* result = new rdb_t; 98 | result->rep = db; 99 | return result; 100 | } 101 | 102 | void rdb_close(rdb_t* db) { 103 | delete db->rep; 104 | delete db; 105 | } 106 | 107 | char* rdb_get( 108 | rdb_t* db, 109 | const rdb_readoptions_t* options, 110 | const char* key, size_t keylen, 111 | size_t* vallen, 112 | char** errptr) { 113 | char* result = nullptr; 114 | std::string tmp; 115 | Status s = db->rep->Get(options->rep, Slice(key, keylen), &tmp); 116 | if (s.ok()) { 117 | *vallen = tmp.size(); 118 | result = CopyString(tmp); 119 | } else { 120 | *vallen = 0; 121 | if (!s.IsNotFound()) { 122 | SaveError(errptr, s); 123 | } 124 | } 125 | return result; 126 | } 127 | 128 | void rdb_put( 129 | rdb_t* db, 130 | const rdb_writeoptions_t* options, 131 | const char* key, size_t keylen, 132 | const char* val, size_t vallen, 133 | char** errptr) { 134 | SaveError(errptr, 135 | db->rep->Put(options->rep, Slice(key, keylen), Slice(val, vallen))); 136 | } 137 | 138 | void rdb_delete( 139 | rdb_t* db, 140 | const rdb_writeoptions_t* options, 141 | const char* key, size_t keylen, 142 | char** errptr) { 143 | SaveError(errptr, db->rep->Delete(options->rep, Slice(key, keylen))); 144 | } 145 | 146 | char* rdb_property_value( 147 | rdb_t* db, 148 | const char* propname) { 149 | std::string tmp; 150 | if (db->rep->GetProperty(Slice(propname), &tmp)) { 151 | // We use strdup() since we expect human readable output. 152 | return strdup(tmp.c_str()); 153 | } else { 154 | return nullptr; 155 | } 156 | } 157 | 158 | //////////////////////////// rdb_writebatch_t 159 | rdb_writebatch_t* rdb_writebatch_create() { 160 | return new rdb_writebatch_t; 161 | } 162 | 163 | rdb_writebatch_t* rdb_writebatch_create_from(const char* rep, 164 | size_t size) { 165 | rdb_writebatch_t* b = new rdb_writebatch_t; 166 | b->rep = WriteBatch(std::string(rep, size)); 167 | return b; 168 | } 169 | 170 | void rdb_writebatch_destroy(rdb_writebatch_t* b) { 171 | delete b; 172 | } 173 | 174 | void rdb_writebatch_clear(rdb_writebatch_t* b) { 175 | b->rep.Clear(); 176 | } 177 | 178 | int rdb_writebatch_count(rdb_writebatch_t* b) { 179 | return b->rep.Count(); 180 | } 181 | 182 | void rdb_writebatch_put( 183 | rdb_writebatch_t* b, 184 | const char* key, size_t klen, 185 | const char* val, size_t vlen) { 186 | b->rep.Put(Slice(key, klen), Slice(val, vlen)); 187 | } 188 | 189 | void rdb_writebatch_delete( 190 | rdb_writebatch_t* b, 191 | const char* key, size_t klen) { 192 | b->rep.Delete(Slice(key, klen)); 193 | } 194 | 195 | void rdb_write( 196 | rdb_t* db, 197 | const rdb_writeoptions_t* options, 198 | rdb_writebatch_t* batch, 199 | char** errptr) { 200 | SaveError(errptr, db->rep->Write(options->rep, &batch->rep)); 201 | } 202 | 203 | //////////////////////////// rdb_options_t 204 | rdb_options_t* rdb_options_create() { 205 | return new rdb_options_t; 206 | } 207 | 208 | void rdb_options_set_create_if_missing( 209 | rdb_options_t* opt, unsigned char v) { 210 | opt->rep.create_if_missing = v; 211 | } 212 | 213 | void rdb_options_set_block_based_table_factory( 214 | rdb_options_t *opt, 215 | rdb_block_based_table_options_t* table_options) { 216 | if (table_options) { 217 | opt->rep.table_factory.reset( 218 | rocksdb::NewBlockBasedTableFactory(table_options->rep)); 219 | } 220 | } 221 | 222 | //////////////////////////// rdb_readoptions_t 223 | rdb_readoptions_t* rdb_readoptions_create() { 224 | return new rdb_readoptions_t; 225 | } 226 | 227 | void rdb_readoptions_destroy(rdb_readoptions_t* opt) { 228 | delete opt; 229 | } 230 | 231 | void rdb_readoptions_set_fill_cache( 232 | rdb_readoptions_t* opt, unsigned char v) { 233 | opt->rep.fill_cache = v; 234 | } 235 | 236 | void rdb_readoptions_set_snapshot( 237 | rdb_readoptions_t* opt, 238 | const rdb_snapshot_t* snap) { 239 | opt->rep.snapshot = (snap ? snap->rep : nullptr); 240 | } 241 | 242 | //////////////////////////// rdb_writeoptions_t 243 | rdb_writeoptions_t* rdb_writeoptions_create() { 244 | return new rdb_writeoptions_t; 245 | } 246 | 247 | void rdb_writeoptions_destroy(rdb_writeoptions_t* opt) { 248 | delete opt; 249 | } 250 | 251 | void rdb_writeoptions_set_sync( 252 | rdb_writeoptions_t* opt, unsigned char v) { 253 | opt->rep.sync = v; 254 | } 255 | 256 | //////////////////////////// rdb_iterator_t 257 | rdb_iterator_t* rdb_create_iterator( 258 | rdb_t* db, 259 | const rdb_readoptions_t* options) { 260 | rdb_iterator_t* result = new rdb_iterator_t; 261 | result->rep = db->rep->NewIterator(options->rep); 262 | return result; 263 | } 264 | 265 | void rdb_iter_destroy(rdb_iterator_t* iter) { 266 | delete iter->rep; 267 | delete iter; 268 | } 269 | 270 | unsigned char rdb_iter_valid(const rdb_iterator_t* iter) { 271 | return iter->rep->Valid(); 272 | } 273 | 274 | void rdb_iter_seek_to_first(rdb_iterator_t* iter) { 275 | iter->rep->SeekToFirst(); 276 | } 277 | 278 | void rdb_iter_seek_to_last(rdb_iterator_t* iter) { 279 | iter->rep->SeekToLast(); 280 | } 281 | 282 | void rdb_iter_seek(rdb_iterator_t* iter, const char* k, size_t klen) { 283 | iter->rep->Seek(Slice(k, klen)); 284 | } 285 | 286 | void rdb_iter_seek_for_prev(rdb_iterator_t* iter, const char* k, size_t klen) { 287 | iter->rep->SeekForPrev(Slice(k, klen)); 288 | } 289 | 290 | void rdb_iter_next(rdb_iterator_t* iter) { 291 | iter->rep->Next(); 292 | } 293 | 294 | void rdb_iter_prev(rdb_iterator_t* iter) { 295 | iter->rep->Prev(); 296 | } 297 | 298 | const char* rdb_iter_key(const rdb_iterator_t* iter, size_t* klen) { 299 | Slice s = iter->rep->key(); 300 | *klen = s.size(); 301 | return s.data(); 302 | } 303 | 304 | const char* rdb_iter_value(const rdb_iterator_t* iter, size_t* vlen) { 305 | Slice s = iter->rep->value(); 306 | *vlen = s.size(); 307 | return s.data(); 308 | } 309 | 310 | void rdb_iter_get_error(const rdb_iterator_t* iter, char** errptr) { 311 | SaveError(errptr, iter->rep->status()); 312 | } 313 | 314 | //////////////////////////// rdb_filterpolicy_t 315 | struct rdb_filterpolicy_t : public FilterPolicy { 316 | void* state_; 317 | void (*destructor_)(void*); 318 | const char* (*name_)(void*); 319 | char* (*create_)( 320 | void*, 321 | const char* const* key_array, const size_t* key_length_array, 322 | int num_keys, 323 | size_t* filter_length); 324 | unsigned char (*key_match_)( 325 | void*, 326 | const char* key, size_t length, 327 | const char* filter, size_t filter_length); 328 | void (*delete_filter_)( 329 | void*, 330 | const char* filter, size_t filter_length); 331 | 332 | virtual ~rdb_filterpolicy_t() { 333 | (*destructor_)(state_); 334 | } 335 | 336 | virtual const char* Name() const override { return (*name_)(state_); } 337 | 338 | virtual void CreateFilter(const Slice* keys, int n, 339 | std::string* dst) const override { 340 | std::vector key_pointers(n); 341 | std::vector key_sizes(n); 342 | for (int i = 0; i < n; i++) { 343 | key_pointers[i] = keys[i].data(); 344 | key_sizes[i] = keys[i].size(); 345 | } 346 | size_t len; 347 | char* filter = (*create_)(state_, &key_pointers[0], &key_sizes[0], n, &len); 348 | dst->append(filter, len); 349 | 350 | if (delete_filter_ != nullptr) { 351 | (*delete_filter_)(state_, filter, len); 352 | } else { 353 | free(filter); 354 | } 355 | } 356 | 357 | virtual bool KeyMayMatch(const Slice& key, 358 | const Slice& filter) const override { 359 | return (*key_match_)(state_, key.data(), key.size(), 360 | filter.data(), filter.size()); 361 | } 362 | }; 363 | 364 | rdb_filterpolicy_t* rdb_filterpolicy_create( 365 | void* state, 366 | void (*destructor)(void*), 367 | char* (*create_filter)( 368 | void*, 369 | const char* const* key_array, const size_t* key_length_array, 370 | int num_keys, 371 | size_t* filter_length), 372 | unsigned char (*key_may_match)( 373 | void*, 374 | const char* key, size_t length, 375 | const char* filter, size_t filter_length), 376 | void (*delete_filter)( 377 | void*, 378 | const char* filter, size_t filter_length), 379 | const char* (*name)(void*)) { 380 | rdb_filterpolicy_t* result = new rdb_filterpolicy_t; 381 | result->state_ = state; 382 | result->destructor_ = destructor; 383 | result->create_ = create_filter; 384 | result->key_match_ = key_may_match; 385 | result->delete_filter_ = delete_filter; 386 | result->name_ = name; 387 | return result; 388 | } 389 | 390 | void rdb_filterpolicy_destroy(rdb_filterpolicy_t* filter) { 391 | delete filter; 392 | } 393 | 394 | void rdbc_destruct_handler(void* state) { } 395 | void rdbc_filterpolicy_delete_filter(void* state, const char* v, size_t s) { } 396 | 397 | rdb_filterpolicy_t* rdbc_filterpolicy_create(uintptr_t idx) { 398 | return rdb_filterpolicy_create( 399 | (void*)idx, 400 | rdbc_destruct_handler, 401 | (char* (*)(void*, const char* const*, const size_t*, int, size_t*))(rdbc_filterpolicy_create_filter), 402 | (unsigned char (*)(void*, const char*, size_t, const char*, size_t))(rdbc_filterpolicy_key_may_match), 403 | rdbc_filterpolicy_delete_filter, 404 | (const char *(*)(void*))(rdbc_filterpolicy_name)); 405 | } 406 | 407 | rdb_filterpolicy_t* rdb_filterpolicy_create_bloom_format(int bits_per_key, bool original_format) { 408 | // Make a rdb_filterpolicy_t, but override all of its methods so 409 | // they delegate to a NewBloomFilterPolicy() instead of user 410 | // supplied C functions. 411 | struct Wrapper : public rdb_filterpolicy_t { 412 | const FilterPolicy* rep_; 413 | ~Wrapper() { delete rep_; } 414 | const char* Name() const override { return rep_->Name(); } 415 | void CreateFilter(const Slice* keys, int n, 416 | std::string* dst) const override { 417 | return rep_->CreateFilter(keys, n, dst); 418 | } 419 | bool KeyMayMatch(const Slice& key, const Slice& filter) const override { 420 | return rep_->KeyMayMatch(key, filter); 421 | } 422 | static void DoNothing(void*) { } 423 | }; 424 | Wrapper* wrapper = new Wrapper; 425 | wrapper->rep_ = NewBloomFilterPolicy(bits_per_key, original_format); 426 | wrapper->state_ = nullptr; 427 | wrapper->delete_filter_ = nullptr; 428 | wrapper->destructor_ = &Wrapper::DoNothing; 429 | return wrapper; 430 | } 431 | 432 | rdb_filterpolicy_t* rdb_filterpolicy_create_bloom_full(int bits_per_key) { 433 | return rdb_filterpolicy_create_bloom_format(bits_per_key, false); 434 | } 435 | 436 | rdb_filterpolicy_t* rdb_filterpolicy_create_bloom(int bits_per_key) { 437 | return rdb_filterpolicy_create_bloom_format(bits_per_key, true); 438 | } 439 | 440 | //////////////////////////// rdb_cache_t 441 | rdb_cache_t* rdb_cache_create_lru(size_t capacity) { 442 | rdb_cache_t* c = new rdb_cache_t; 443 | c->rep = NewLRUCache(capacity); 444 | return c; 445 | } 446 | 447 | void rdb_cache_destroy(rdb_cache_t* cache) { 448 | delete cache; 449 | } 450 | 451 | void rdb_cache_set_capacity(rdb_cache_t* cache, size_t capacity) { 452 | cache->rep->SetCapacity(capacity); 453 | } 454 | 455 | //////////////////////////// rdb_block_based_table_options_t 456 | rdb_block_based_table_options_t* 457 | rdb_block_based_options_create() { 458 | return new rdb_block_based_table_options_t; 459 | } 460 | 461 | void rdb_block_based_options_destroy( 462 | rdb_block_based_table_options_t* options) { 463 | delete options; 464 | } 465 | 466 | void rdb_block_based_options_set_block_size( 467 | rdb_block_based_table_options_t* options, size_t block_size) { 468 | options->rep.block_size = block_size; 469 | } 470 | 471 | void rdb_block_based_options_set_filter_policy( 472 | rdb_block_based_table_options_t* options, 473 | rdb_filterpolicy_t* filter_policy) { 474 | options->rep.filter_policy.reset(filter_policy); 475 | } 476 | 477 | void rdb_block_based_options_set_no_block_cache( 478 | rdb_block_based_table_options_t* options, 479 | unsigned char no_block_cache) { 480 | options->rep.no_block_cache = no_block_cache; 481 | } 482 | 483 | void rdb_block_based_options_set_block_cache( 484 | rdb_block_based_table_options_t* options, 485 | rdb_cache_t* block_cache) { 486 | if (block_cache) { 487 | options->rep.block_cache = block_cache->rep; 488 | } 489 | } 490 | 491 | void rdb_block_based_options_set_block_cache_compressed( 492 | rdb_block_based_table_options_t* options, 493 | rdb_cache_t* block_cache_compressed) { 494 | if (block_cache_compressed) { 495 | options->rep.block_cache_compressed = block_cache_compressed->rep; 496 | } 497 | } 498 | 499 | void rdb_block_based_options_set_whole_key_filtering( 500 | rdb_block_based_table_options_t* options, unsigned char v) { 501 | options->rep.whole_key_filtering = v; 502 | } 503 | 504 | //////////////////////////// rdb_snapshot_t 505 | const rdb_snapshot_t* rdb_create_snapshot(rdb_t* db) { 506 | rdb_snapshot_t* result = new rdb_snapshot_t; 507 | result->rep = db->rep->GetSnapshot(); 508 | return result; 509 | } 510 | 511 | void rdb_release_snapshot( 512 | rdb_t* db, 513 | const rdb_snapshot_t* snapshot) { 514 | db->rep->ReleaseSnapshot(snapshot->rep); 515 | delete snapshot; 516 | } 517 | 518 | //////////////////////////// rdb_checkpoint_t 519 | rdb_checkpoint_t* rdb_create_checkpoint(rdb_t* db, char** errptr) { 520 | Checkpoint* checkpoint; 521 | if (SaveError(errptr, Checkpoint::Create(db->rep, &checkpoint))) { 522 | return nullptr; 523 | } 524 | rdb_checkpoint_t* result = new rdb_checkpoint_t; 525 | result->rep = checkpoint; 526 | return result; 527 | } 528 | 529 | void rdb_open_checkpoint( 530 | rdb_checkpoint_t* checkpoint, 531 | const char* checkpoint_dir, 532 | char** errptr) { 533 | SaveError(errptr, checkpoint->rep->CreateCheckpoint(std::string(checkpoint_dir))); 534 | } 535 | 536 | void rdb_destroy_checkpoint(rdb_checkpoint_t* checkpoint) { 537 | delete checkpoint->rep; 538 | } 539 | -------------------------------------------------------------------------------- /BENCH-rocks.txt: -------------------------------------------------------------------------------- 1 | # Amazon i3.large dedicated instance: 2 cores, 16G RAM, 450G local SSD. 2 | 3 | First, we shall set up the environment for running tests, by choosing where to store the benchmark 4 | data and telling the program where to find the rockdb shared libraries: 5 | export DATADIR=$HOME/bench-data 6 | mkdir $DATADIR 7 | export LD_LIBRARY_PATH=/usr/local/lib 8 | 9 | As shown by fio, this instance gives 93K random iops at 4K block size. 10 | 11 | $ fio --name=randread --ioengine=libaio --iodepth=32 --rw=randread --bs=4k --direct=0 --size=2G --numjobs=16 --runtime=240 --group_reporting 12 | randread: (g=0): rw=randread, bs=4K-4K/4K-4K/4K-4K, ioengine=libaio, iodepth=32 13 | ... 14 | fio-2.2.10 15 | Starting 16 processes 16 | randread: Laying out IO file(s) (1 file(s) / 2048MB) 17 | randread: Laying out IO file(s) (1 file(s) / 2048MB) 18 | randread: Laying out IO file(s) (1 file(s) / 2048MB) 19 | randread: Laying out IO file(s) (1 file(s) / 2048MB) 20 | randread: Laying out IO file(s) (1 file(s) / 2048MB) 21 | randread: Laying out IO file(s) (1 file(s) / 2048MB) 22 | randread: Laying out IO file(s) (1 file(s) / 2048MB) 23 | randread: Laying out IO file(s) (1 file(s) / 2048MB) 24 | randread: Laying out IO file(s) (1 file(s) / 2048MB) 25 | randread: Laying out IO file(s) (1 file(s) / 2048MB) 26 | randread: Laying out IO file(s) (1 file(s) / 2048MB) 27 | randread: Laying out IO file(s) (1 file(s) / 2048MB) 28 | randread: Laying out IO file(s) (1 file(s) / 2048MB) 29 | randread: Laying out IO file(s) (1 file(s) / 2048MB) 30 | randread: Laying out IO file(s) (1 file(s) / 2048MB) 31 | randread: Laying out IO file(s) (1 file(s) / 2048MB) 32 | Jobs: 1 (f=1): [r(1),_(15)] [100.0% done] [240.4MB/0KB/0KB /s] [61.6K/0/0 iops] [eta 00m:00s] s] 33 | randread: (groupid=0, jobs=16): err= 0: pid=13063: Sat Apr 29 12:37:49 2017 34 | read : io=32768MB, bw=371947KB/s, iops=92986, runt= 90213msec 35 | slat (usec): min=31, max=24800, avg=163.54, stdev=200.35 36 | clat (usec): min=1, max=69452, avg=5180.95, stdev=1919.00 37 | lat (usec): min=91, max=69546, avg=5345.05, stdev=1958.18 38 | clat percentiles (usec): 39 | | 1.00th=[ 3152], 5.00th=[ 3312], 10.00th=[ 3440], 20.00th=[ 3664], 40 | | 30.00th=[ 3856], 40.00th=[ 4128], 50.00th=[ 4512], 60.00th=[ 5024], 41 | | 70.00th=[ 5728], 80.00th=[ 6624], 90.00th=[ 7904], 95.00th=[ 9024], 42 | | 99.00th=[11456], 99.50th=[12352], 99.90th=[14528], 99.95th=[15680], 43 | | 99.99th=[20096] 44 | bw (KB /s): min=18632, max=36608, per=6.43%, avg=23925.03, stdev=2987.85 45 | lat (usec) : 2=0.01%, 4=0.01%, 100=0.01%, 250=0.01%, 500=0.01% 46 | lat (usec) : 750=0.01%, 1000=0.01% 47 | lat (msec) : 2=0.01%, 4=36.40%, 10=60.93%, 20=2.66%, 50=0.01% 48 | lat (msec) : 100=0.01% 49 | cpu : usr=2.31%, sys=6.14%, ctx=8463944, majf=0, minf=653 50 | IO depths : 1=0.1%, 2=0.1%, 4=0.1%, 8=0.1%, 16=0.1%, 32=100.0%, >=64=0.0% 51 | submit : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.0%, 64=0.0%, >=64=0.0% 52 | complete : 0=0.0%, 4=100.0%, 8=0.0%, 16=0.0%, 32=0.1%, 64=0.0%, >=64=0.0% 53 | issued : total=r=8388608/w=0/d=0, short=r=0/w=0/d=0, drop=r=0/w=0/d=0 54 | latency : target=0, window=0, percentile=100.00%, depth=32 55 | 56 | Run status group 0 (all jobs): 57 | READ: io=32768MB, aggrb=371946KB/s, minb=371946KB/s, maxb=371946KB/s, mint=90213msec, maxt=90213msec 58 | 59 | Disk stats (read/write): 60 | nvme0n1: ios=8386313/19379, merge=0/0, ticks=877396/60, in_queue=880100, util=100.00% 61 | 62 | Command being timed: "populate --kv rocksdb --valsz 128 --keys_mil 250 --dir=$DATADIR" 63 | User time (seconds): 2685.96 64 | System time (seconds): 532.66 65 | Percent of CPU this job got: 136% 66 | Elapsed (wall clock) time (h:mm:ss or m:ss): 39:10.78 67 | Average shared text size (kbytes): 0 68 | Average unshared data size (kbytes): 0 69 | Average stack size (kbytes): 0 70 | Average total size (kbytes): 0 71 | Maximum resident set size (kbytes): 611888 72 | Average resident set size (kbytes): 0 73 | Major (requiring I/O) page faults: 39 74 | Minor (reclaiming a frame) page faults: 2169690 75 | Voluntary context switches: 11455264 76 | Involuntary context switches: 4606594 77 | Swaps: 0 78 | File system inputs: 132138368 79 | File system outputs: 594809048 80 | Socket messages sent: 0 81 | Socket messages received: 0 82 | Signals delivered: 0 83 | Page size (bytes): 4096 84 | Exit status: 0 85 | 86 | $ du -sh $DATADIR/rocks 87 | 24G $DATADIR/rocks 88 | 89 | 90 | In this case, we set the value log GC threshold to 0.5. Turns out doing value log GC can be expensive. 91 | So, we should only do it sometimes. It's only worth if saving significant amount of disk space. 92 | 93 | Command being timed: "populate --kv badger --valsz 128 --keys_mil 250 --dir=$DATADIR" 94 | User time (seconds): 4983.09 95 | System time (seconds): 166.96 96 | Percent of CPU this job got: 188% 97 | Elapsed (wall clock) time (h:mm:ss or m:ss): 45:26.56 98 | Average shared text size (kbytes): 0 99 | Average unshared data size (kbytes): 0 100 | Average stack size (kbytes): 0 101 | Average total size (kbytes): 0 102 | Maximum resident set size (kbytes): 14660624 103 | Average resident set size (kbytes): 0 104 | Major (requiring I/O) page faults: 10690 105 | Minor (reclaiming a frame) page faults: 6659331 106 | Voluntary context switches: 1141184 107 | Involuntary context switches: 1071168 108 | Swaps: 0 109 | File system inputs: 14994928 110 | File system outputs: 291238896 111 | Socket messages sent: 0 112 | Socket messages received: 0 113 | Signals delivered: 0 114 | Page size (bytes): 4096 115 | Exit status: 0 116 | 117 | 118 | $ du -sh /mnt/data/badger 119 | 38G /mnt/data/badger 120 | 5.8G *.sst # LSM tree, can be kept in RAM. 121 | 122 | Random Reads: Badger is 3.67x faster 123 | 124 | $ go test --bench BenchmarkReadRandomRocks --keys_mil 250 --valsz 128 --dir $DATADIR --timeout 10m --benchtime 3m 125 | BenchmarkReadRandomRocks/read-random-rocks-2 2000000 118982 ns/op 126 | --- BENCH: BenchmarkReadRandomRocks/read-random-rocks-2 127 | bench_test.go:92: rocks 149864 keys had valid values. 128 | bench_test.go:92: rocks 150136 keys had valid values. 129 | bench_test.go:92: rocks 1000693 keys had valid values. 130 | bench_test.go:92: rocks 999307 keys had valid values. 131 | PASS 132 | 133 | $ go test --bench BenchmarkReadRandomBadger --keys_mil 250 --valsz 128 --dir $DATADIR --timeout 10m --benchtime 3m 134 | Called BenchmarkReadRandomBadger 135 | Replaying compact log: /mnt/data/badger/clog 136 | All compactions in compact log are done. 137 | NOT running any compactions due to DB options. 138 | NOT running any compactions due to DB options. 139 | NOT running any compactions due to DB options. 140 | Seeking at value pointer: {Fid:37 Len:163 Offset:1022845212} 141 | l.opt.ValueGCThreshold = 0.0. Exiting runGCInLoop 142 | key=vsz=00128-k=0025059055 143 | BenchmarkReadRandomBadger/read-random-badger-2 10000000 32361 ns/op 144 | --- BENCH: BenchmarkReadRandomBadger/read-random-badger-2 145 | bench_test.go:72: badger 325009 keys had valid values. 146 | bench_test.go:72: badger 324883 keys had valid values. 147 | bench_test.go:72: badger 3247736 keys had valid values. 148 | bench_test.go:72: badger 3243258 keys had valid values. 149 | Sending signal to 0 registered with name "value-gc" 150 | Sending signal to 1 registered with name "writes" 151 | --->> Size of bloom filter: 116 152 | =======> Deallocating skiplist 153 | Sending signal to 0 registered with name "memtable" 154 | Level "value-gc" already got signal 155 | Level "writes" already got signal 156 | PASS 157 | 158 | ### Iteration 159 | 160 | $ go test --bench BenchmarkIterateRocks --keys_mil 250 --valsz 128 --dir $DATADIR --timeout 10m --cpuprofile cpu.out 161 | BenchmarkIterateRocks/rocksdb-iterate-2 1 5806763436 ns/op 162 | --- BENCH: BenchmarkIterateRocks/rocksdb-iterate-2 163 | bench_test.go:128: [0] Counted 2000000 keys 164 | PASS 165 | ok github.com/dgraph-io/badger-bench 6.987s 166 | 167 | $ go test --bench BenchmarkIterateBadgerOnly --keys_mil 250 --valsz 128 --dir $DATADIR --timeout 10m --cpuprofile cpu.out 168 | Replaying compact log: /mnt/data/badger/clog 169 | All compactions in compact log are done. 170 | NOT running any compactions due to DB options. 171 | NOT running any compactions due to DB options. 172 | NOT running any compactions due to DB options. 173 | Seeking at value pointer: {Fid:39 Len:163 Offset:1012268142} 174 | l.opt.ValueGCThreshold = 0.0. Exiting runGCInLoop 175 | key=vsz=00128-k=0098569193 176 | BenchmarkIterateBadgerOnlyKeys/badger-iterate-onlykeys-2 2 713078716 ns/op 177 | --- BENCH: BenchmarkIterateBadgerOnlyKeys/badger-iterate-onlykeys-2 178 | bench_test.go:156: [0] Counted 2000000 keys 179 | bench_test.go:156: [0] Counted 2000000 keys 180 | bench_test.go:156: [1] Counted 2000000 keys 181 | PASS 182 | ok github.com/dgraph-io/badger-bench 10.198s 183 | 184 | $ go test --bench BenchmarkIterateBadgerWithValues --keys_mil 250 --valsz 128 --dir $DATADIR --timeout 10m 185 | Replaying compact log: /mnt/data/badger/clog 186 | All compactions in compact log are done. 187 | NOT running any compactions due to DB options. 188 | NOT running any compactions due to DB options. 189 | NOT running any compactions due to DB options. 190 | Seeking at value pointer: {Fid:39 Len:163 Offset:1012268142} 191 | l.opt.ValueGCThreshold = 0.0. Exiting runGCInLoop 192 | key=vsz=00128-k=0098569193 193 | ....................BenchmarkIterateBadgerWithValues/badger-iterate-withvals-2 1 75781455080 ns/op 194 | --- BENCH: BenchmarkIterateBadgerWithValues/badger-iterate-withvals-2 195 | bench_test.go:187: [0] Counted 2000000 keys 196 | PASS 197 | ok github.com/dgraph-io/badger-bench 81.401s 198 | 199 | WROTE 75000000 KEYS 200 | Command being timed: "./populate --kv rocksdb --valsz 1024 --keys_mil 75 --dir /mnt/data/1kb" 201 | User time (seconds): 2529.19 202 | System time (seconds): 1498.27 203 | Percent of CPU this job got: 85% 204 | Elapsed (wall clock) time (h:mm:ss or m:ss): 1:18:08 205 | Average shared text size (kbytes): 0 206 | Average unshared data size (kbytes): 0 207 | Average stack size (kbytes): 0 208 | Average total size (kbytes): 0 209 | Maximum resident set size (kbytes): 1040732 210 | Average resident set size (kbytes): 0 211 | Major (requiring I/O) page faults: 298 212 | Minor (reclaiming a frame) page faults: 11338619 213 | Voluntary context switches: 6822622 214 | Involuntary context switches: 1738511 215 | Swaps: 0 216 | File system inputs: 1046110728 217 | File system outputs: 1814480952 218 | Socket messages sent: 0 219 | Socket messages received: 0 220 | Signals delivered: 0 221 | Page size (bytes): 4096 222 | Exit status: 0 223 | 224 | $ du -sh /mnt/data/1kb/rocks 225 | 49G 226 | 227 | 228 | WROTE 75000000 KEYS 229 | Command being timed: "./populate --kv badger --valsz 1024 --keys_mil 75 --dir /mnt/data/1kb" 230 | User time (seconds): 1445.97 231 | System time (seconds): 109.23 232 | Percent of CPU this job got: 151% 233 | Elapsed (wall clock) time (h:mm:ss or m:ss): 17:09.66 234 | Average shared text size (kbytes): 0 235 | Average unshared data size (kbytes): 0 236 | Average stack size (kbytes): 0 237 | Average total size (kbytes): 0 238 | Maximum resident set size (kbytes): 11857204 239 | Average resident set size (kbytes): 0 240 | Major (requiring I/O) page faults: 1952 241 | Minor (reclaiming a frame) page faults: 10187929 242 | Voluntary context switches: 1804454 243 | Involuntary context switches: 282003 244 | Swaps: 0 245 | File system inputs: 205176 246 | File system outputs: 197457568 247 | Socket messages sent: 0 248 | Socket messages received: 0 249 | Signals delivered: 0 250 | Page size (bytes): 4096 251 | Exit status: 0 252 | 253 | $ du -shc /mnt/data/1kb/badger/*.sst 254 | 1.7G 255 | 256 | $ du -shc /mnt/data/1kb/badger/*.vlog 257 | 74G 258 | 259 | $ go test --bench BenchmarkReadRandomRocks --keys_mil 75 --valsz 1024 --dir "/mnt/data/1kb" --timeout 10m --benchtime 3m 260 | BenchmarkReadRandomRocks/read-random-rocks-2 2000000 156694 ns/op 261 | --- BENCH: BenchmarkReadRandomRocks/read-random-rocks-2 262 | bench_test.go:92: rocks 149796 keys had valid values. 263 | bench_test.go:92: rocks 150204 keys had valid values. 264 | bench_test.go:92: rocks 996349 keys had valid values. 265 | bench_test.go:92: rocks 1003651 keys had valid values. 266 | PASS 267 | ok github.com/dgraph-io/badger-bench 385.121s 268 | 269 | $ go test --bench BenchmarkReadRandomBadger --keys_mil 75 --valsz 1024 --dir "/mnt/data/1kb" --timeout 10m --benchtime 3m 270 | Called BenchmarkReadRandomBadger 271 | Replaying compact log: /mnt/data/1kb/badger/clog 272 | All compactions in compact log are done. 273 | NOT running any compactions due to DB options. 274 | NOT running any compactions due to DB options. 275 | NOT running any compactions due to DB options. 276 | Seeking at value pointer: {Fid:73 Len:1059 Offset:1041730887} 277 | l.opt.ValueGCThreshold = 0.0. Exiting runGCInLoop 278 | key=vsz=01024-k=0015263159 279 | BenchmarkReadRandomBadger/read-random-badger-2 10000000 37053 ns/op 280 | --- BENCH: BenchmarkReadRandomBadger/read-random-badger-2 281 | bench_test.go:72: badger 317463 keys had valid values. 282 | bench_test.go:72: badger 317460 keys had valid values. 283 | bench_test.go:72: badger 3175198 keys had valid values. 284 | bench_test.go:72: badger 3169988 keys had valid values. 285 | Sending signal to 0 registered with name "value-gc" 286 | Sending signal to 1 registered with name "writes" 287 | --->> Size of bloom filter: 116 288 | =======> Deallocating skiplist 289 | Sending signal to 0 registered with name "memtable" 290 | Level "value-gc" already got signal 291 | Level "writes" already got signal 292 | PASS 293 | ok github.com/dgraph-io/badger-bench 415.068s 294 | 295 | $ go test --bench BenchmarkIterateRocks --keys_mil 75 --valsz 1024 --dir "/mnt/data/1kb" --timeout 60m 296 | BenchmarkIterateRocks/rocksdb-iterate-2 1 24936162613 ns/op 297 | --- BENCH: BenchmarkIterateRocks/rocksdb-iterate-2 298 | bench_test.go:128: [0] Counted 2000001 keys 299 | PASS 300 | ok github.com/dgraph-io/badger-bench 26.416s 301 | 302 | $ go test --bench BenchmarkIterateBadger --keys_mil 75 --valsz 1024 --dir "/mnt/data/1kb" --timeout 60m 303 | Replaying compact log: /mnt/data/1kb/badger/clog 304 | All compactions in compact log are done. 305 | NOT running any compactions due to DB options. 306 | NOT running any compactions due to DB options. 307 | NOT running any compactions due to DB options. 308 | Seeking at value pointer: {Fid:73 Len:1059 Offset:1041730887} 309 | l.opt.ValueGCThreshold = 0.0. Exiting runGCInLoop 310 | key=vsz=01024-k=0015263159 311 | BenchmarkIterateBadgerOnlyKeys/badger-iterate-onlykeys-2 2 536687829 ns/op 312 | --- BENCH: BenchmarkIterateBadgerOnlyKeys/badger-iterate-onlykeys-2 313 | bench_test.go:156: [0] Counted 2000001 keys 314 | bench_test.go:156: [0] Counted 2000001 keys 315 | bench_test.go:156: [1] Counted 2000001 keys 316 | Replaying compact log: /mnt/data/1kb/badger/clog 317 | All compactions in compact log are done. 318 | NOT running any compactions due to DB options. 319 | NOT running any compactions due to DB options. 320 | NOT running any compactions due to DB options. 321 | Seeking at value pointer: {Fid:73 Len:1059 Offset:1041730887} 322 | key=vsz=01024-k=0015263159 323 | l.opt.ValueGCThreshold = 0.0. Exiting runGCInLoop 324 | ....................BenchmarkIterateBadgerWithValues/badger-iterate-withvals-2 1 101801301675 ns/op 325 | --- BENCH: BenchmarkIterateBadgerWithValues/badger-iterate-withvals-2 326 | bench_test.go:187: [0] Counted 2000000 keys 327 | PASS 328 | ok github.com/dgraph-io/badger-bench 114.170s 329 | 330 | WROTE 5004000 KEYS 331 | Command being timed: "./populate --kv rocksdb --valsz 16384 --keys_mil 5 --dir $DATADIR/16kb" 332 | User time (seconds): 1424.16 333 | System time (seconds): 1397.96 334 | Percent of CPU this job got: 57% 335 | Elapsed (wall clock) time (h:mm:ss or m:ss): 1:22:21 336 | Average shared text size (kbytes): 0 337 | Average unshared data size (kbytes): 0 338 | Average stack size (kbytes): 0 339 | Average total size (kbytes): 0 340 | Maximum resident set size (kbytes): 1224612 341 | Average resident set size (kbytes): 0 342 | Major (requiring I/O) page faults: 87 343 | Minor (reclaiming a frame) page faults: 6938621 344 | Voluntary context switches: 4541903 345 | Involuntary context switches: 1035841 346 | Swaps: 0 347 | File system inputs: 1141303472 348 | File system outputs: 1925444544 349 | Socket messages sent: 0 350 | Socket messages received: 0 351 | Signals delivered: 0 352 | Page size (bytes): 4096 353 | Exit status: 0 354 | 355 | $ du -sh rocks 356 | 52G 357 | 358 | WROTE 5004000 KEYS 359 | Command being timed: "./populate --kv badger --valsz 16384 --keys_mil 5 --dir $DATADIR/16kb" 360 | User time (seconds): 368.05 361 | System time (seconds): 113.57 362 | Percent of CPU this job got: 116% 363 | Elapsed (wall clock) time (h:mm:ss or m:ss): 6:55.01 364 | Average shared text size (kbytes): 0 365 | Average unshared data size (kbytes): 0 366 | Average stack size (kbytes): 0 367 | Average total size (kbytes): 0 368 | Maximum resident set size (kbytes): 2313908 369 | Average resident set size (kbytes): 0 370 | Major (requiring I/O) page faults: 55 371 | Minor (reclaiming a frame) page faults: 6128182 372 | Voluntary context switches: 2327323 373 | Involuntary context switches: 206230 374 | Swaps: 0 375 | File system inputs: 16424 376 | File system outputs: 161245240 377 | Socket messages sent: 0 378 | Socket messages received: 0 379 | Signals delivered: 0 380 | Page size (bytes): 4096 381 | Exit status: 0 382 | 383 | $ du -shc badger/*.sst 384 | 105M 385 | $ du -shc badger/*.vlog 386 | 77G 387 | 388 | $ go test -v --bench BenchmarkReadRandomRocks --keys_mil 5 --valsz 16384 --dir "$DATADIR/16kb" --timeout 10m --benchtime 3m 389 | BenchmarkReadRandomRocks/read-random-rocks-2 SIGQUIT: quit 390 | PC=0x460ed9 m=0 sigcode=0 391 | 392 | goroutine 0 [idle]: 393 | runtime.epollwait(0x4, 0x7ffdbcaf5ad8, 0xffffffff00000080, 0x0, 0xffffffff00000000, 0x0, 0x0, 0x0, 0x0, 0x0, ...) 394 | /usr/local/go/src/runtime/sys_linux_amd64.s:560 +0x19 395 | runtime.netpoll(0xc420029301, 0xc420028001) 396 | /usr/local/go/src/runtime/netpoll_epoll.go:67 +0x91 397 | runtime.findrunnable(0xc420029300, 0x0) 398 | /usr/local/go/src/runtime/proc.go:2084 +0x31f 399 | runtime.schedule() 400 | /usr/local/go/src/runtime/proc.go:2222 +0x14c 401 | runtime.park_m(0xc420001040) 402 | /usr/local/go/src/runtime/proc.go:2285 +0xab 403 | runtime.mcall(0x7ffdbcaf6240) 404 | /usr/local/go/src/runtime/asm_amd64.s:269 +0x5b 405 | *** Test killed: ran too long (11m0s). 406 | FAIL github.com/dgraph-io/badger-bench 674.142s 407 | 408 | NOTE: RocksDB took too much memory when doing random lookups. So, this crash happened multiple times. 409 | 410 | $ go test -v --bench BenchmarkReadRandomRocks --keys_mil 5 --valsz 16384 --dir "$DATADIR/16kb" --timeout 10m --benchtime 1m 411 | BenchmarkReadRandomRocks/read-random-rocks-2 300000 215171 ns/op 412 | --- BENCH: BenchmarkReadRandomRocks/read-random-rocks-2 413 | bench_test.go:93: rocks 100391 keys had valid values. 414 | bench_test.go:93: rocks 150850 keys had valid values. 415 | bench_test.go:93: rocks 149150 keys had valid values. 416 | PASS 417 | ok github.com/dgraph-io/badger-bench 121.391s 418 | 419 | $ go test -v --bench BenchmarkReadRandomBadger --keys_mil 5 --valsz 16384 --dir "$DATADIR/16kb" --timeout 10m --benchtime 1m 420 | Called BenchmarkReadRandomBadger 421 | Replaying compact log: $DATADIR/16kb/badger/clog 422 | All compactions in compact log are done. 423 | NOT running any compactions due to DB options. 424 | NOT running any compactions due to DB options. 425 | NOT running any compactions due to DB options. 426 | Seeking at value pointer: {Fid:76 Len:16419 Offset:554157669} 427 | l.opt.ValueGCThreshold = 0.0. Exiting runGCInLoop 428 | key=vsz=16384-k=0002454321 429 | BenchmarkReadRandomBadger/read-random-badger-2 2000000 40178 ns/op 430 | --- BENCH: BenchmarkReadRandomBadger/read-random-badger-2 431 | bench_test.go:73: badger 315956 keys had valid values. 432 | bench_test.go:73: badger 316468 keys had valid values. 433 | bench_test.go:73: badger 632648 keys had valid values. 434 | bench_test.go:73: badger 631790 keys had valid values. 435 | Sending signal to 0 registered with name "value-gc" 436 | Sending signal to 1 registered with name "writes" 437 | --->> Size of bloom filter: 116 438 | =======> Deallocating skiplist 439 | Level "writes" already got signal 440 | Sending signal to 0 registered with name "memtable" 441 | Level "value-gc" already got signal 442 | PASS 443 | ok github.com/dgraph-io/badger-bench 123.227s 444 | 445 | $ go test -v --bench BenchmarkIterate --keys_mil 5 --valsz 16384 --dir "$DATADIR/16kb" --timeout 60m 446 | BenchmarkIterateRocks/rocksdb-iterate-2 1 133313688657 ns/op 447 | --- BENCH: BenchmarkIterateRocks/rocksdb-iterate-2 448 | bench_test.go:129: [0] Counted 2000001 keys 449 | Replaying compact log: /mnt/data/16kb/badger/clog 450 | All compactions in compact log are done. 451 | NOT running any compactions due to DB options. 452 | NOT running any compactions due to DB options. 453 | NOT running any compactions due to DB options. 454 | Seeking at value pointer: {Fid:76 Len:16419 Offset:554157669} 455 | l.opt.ValueGCThreshold = 0.0. Exiting runGCInLoop 456 | key=vsz=16384-k=0002454321 457 | BenchmarkIterateBadgerOnlyKeys/badger-iterate-onlykeys-2 3 475018676 ns/op 458 | --- BENCH: BenchmarkIterateBadgerOnlyKeys/badger-iterate-onlykeys-2 459 | bench_test.go:157: [0] Counted 2000001 keys 460 | bench_test.go:157: [0] Counted 2000001 keys 461 | bench_test.go:157: [1] Counted 2000001 keys 462 | bench_test.go:157: [0] Counted 2000001 keys 463 | bench_test.go:157: [1] Counted 2000001 keys 464 | bench_test.go:157: [2] Counted 2000001 keys 465 | Replaying compact log: /mnt/data/16kb/badger/clog 466 | All compactions in compact log are done. 467 | NOT running any compactions due to DB options. 468 | NOT running any compactions due to DB options. 469 | NOT running any compactions due to DB options. 470 | Seeking at value pointer: {Fid:76 Len:16419 Offset:554157669} 471 | l.opt.ValueGCThreshold = 0.0. Exiting runGCInLoop 472 | key=vsz=16384-k=0002454321 473 | ....................BenchmarkIterateBadgerWithValues/badger-iterate-withvals-2 1 125095134637 ns/op 474 | --- BENCH: BenchmarkIterateBadgerWithValues/badger-iterate-withvals-2 475 | bench_test.go:188: [0] Counted 2000000 keys 476 | PASS 477 | ok github.com/dgraph-io/badger-bench 264.244s 478 | 479 | 480 | 16 Byte values 481 | 482 | WROTE 1000008000 KEYS 483 | Command being timed: "./populate --kv rocksdb --valsz 16 --keys_mil 1000 --dir $DATADIR/16" 484 | User time (seconds): 8515.35 485 | System time (seconds): 468.95 486 | Percent of CPU this job got: 151% 487 | Elapsed (wall clock) time (h:mm:ss or m:ss): 1:38:40 488 | Average shared text size (kbytes): 0 489 | Average unshared data size (kbytes): 0 490 | Average stack size (kbytes): 0 491 | Average total size (kbytes): 0 492 | Maximum resident set size (kbytes): 607276 493 | Average resident set size (kbytes): 0 494 | Major (requiring I/O) page faults: 41 495 | Minor (reclaiming a frame) page faults: 2068722 496 | Voluntary context switches: 17535643 497 | Involuntary context switches: 10364349 498 | Swaps: 0 499 | File system inputs: 22217320 500 | File system outputs: 491495256 501 | Socket messages sent: 0 502 | Socket messages received: 0 503 | Signals delivered: 0 504 | Page size (bytes): 4096 505 | Exit status: 0 506 | 507 | [11010] Write key rate per minute: 5.03M. Total: 999.91M 508 | Command terminated by signal 9 <---------------------------- Strange 509 | Command being timed: "./populate --kv badger --valsz 16 --keys_mil 1000 --dir $DATADIR/16" 510 | User time (seconds): 20732.95 511 | System time (seconds): 662.04 512 | Percent of CPU this job got: 193% 513 | Elapsed (wall clock) time (h:mm:ss or m:ss): 3:04:04 514 | Average shared text size (kbytes): 0 515 | Average unshared data size (kbytes): 0 516 | Average stack size (kbytes): 0 517 | Average total size (kbytes): 0 518 | Maximum resident set size (kbytes): 14910100 519 | Average resident set size (kbytes): 0 520 | Major (requiring I/O) page faults: 107693 521 | Minor (reclaiming a frame) page faults: 18716683 522 | Voluntary context switches: 4845155 523 | Involuntary context switches: 3718926 524 | Swaps: 0 525 | File system inputs: 489687344 526 | File system outputs: 926333912 527 | Socket messages sent: 0 528 | Socket messages received: 0 529 | Signals delivered: 0 530 | Page size (bytes): 4096 531 | Exit status: 0 532 | 533 | Update: with latest changes to compaction, 250K keys are written in 37m. 534 | 535 | Another run of Badger. 536 | 537 | WROTE 1000008000 KEYS 538 | Command being timed: "./populate --kv badger --valsz 16 --keys_mil 1000 --dir $DATADIR/16" 539 | User time (seconds): 21221.83 540 | System time (seconds): 707.07 541 | Percent of CPU this job got: 192% 542 | Elapsed (wall clock) time (h:mm:ss or m:ss): 3:10:01 543 | Average shared text size (kbytes): 0 544 | Average unshared data size (kbytes): 0 545 | Average stack size (kbytes): 0 546 | Average total size (kbytes): 0 547 | Maximum resident set size (kbytes): 14773368 548 | Average resident set size (kbytes): 0 549 | Major (requiring I/O) page faults: 53700 550 | Minor (reclaiming a frame) page faults: 19758570 551 | Voluntary context switches: 7890576 552 | Involuntary context switches: 4228086 553 | Swaps: 0 554 | File system inputs: 478626176 555 | File system outputs: 963384272 556 | Socket messages sent: 0 557 | Socket messages received: 0 558 | Signals delivered: 0 559 | Page size (bytes): 4096 560 | Exit status: 0 561 | -------------------------------------------------------------------------------- /write_benchmarks: -------------------------------------------------------------------------------- 1 | Sync writes are set to true for all. The following commands have been used to 2 | populate data. 3 | 4 | ============================================================================ 5 | /usr/bin/time -v populate --kv badger --valsz 128 --keys_mil 1 --dir /data 6 | /usr/bin/time -v populate --kv badger --valsz 1024 --keys_mil 1 --dir /data 7 | /usr/bin/time -v populate --kv badger --valsz 16384 --keys_mil 1 --dir /data 8 | 9 | /usr/bin/time -v populate --kv lmdb --valsz 128 --keys_mil 1 --dir /data 10 | /usr/bin/time -v populate --kv lmdb --valsz 1024 --keys_mil 1 --dir /data 11 | /usr/bin/time -v populate --kv lmdb --valsz 16384 --keys_mil 1 --dir /data 12 | 13 | /usr/bin/time -v populate --kv bolt --valsz 128 --keys_mil 1 --dir /data 14 | /usr/bin/time -v populate --kv bolt --valsz 1024 --keys_mil 1 --dir /data 15 | /usr/bin/time -v populate --kv bolt --valsz 16384 --keys_mil 1 --dir /data 16 | ========================================================================== 17 | 18 | TOTAL KEYS TO WRITE: 1.00M 19 | Init Badger 20 | [0000] Write key rate per minute: 102.00K. Total: 102.00K 21 | [0001] Write key rate per minute: 229.00K. Total: 229.00K 22 | [0002] Write key rate per minute: 330.00K. Total: 330.00K 23 | [0003] Write key rate per minute: 421.00K. Total: 421.00K 24 | [0004] Write key rate per minute: 510.00K. Total: 510.00K 25 | [0005] Write key rate per minute: 606.00K. Total: 606.00K 26 | [0006] Write key rate per minute: 690.00K. Total: 690.00K 27 | [0007] Write key rate per minute: 774.00K. Total: 774.00K 28 | [0008] Write key rate per minute: 858.00K. Total: 858.00K 29 | [0009] Write key rate per minute: 942.00K. Total: 942.00K 30 | closing badger 31 | 32 | WROTE 1008000 KEYS 33 | Command being timed: "populate --kv badger --valsz 128 --keys_mil 1 --dir /data" 34 | User time (seconds): 15.82 35 | System time (seconds): 0.84 36 | Percent of CPU this job got: 155% 37 | Elapsed (wall clock) time (h:mm:ss or m:ss): 0:10.73 38 | Average shared text size (kbytes): 0 39 | Average unshared data size (kbytes): 0 40 | Average stack size (kbytes): 0 41 | Average total size (kbytes): 0 42 | Maximum resident set size (kbytes): 532912 43 | Average resident set size (kbytes): 0 44 | Major (requiring I/O) page faults: 0 45 | Minor (reclaiming a frame) page faults: 64029 46 | Voluntary context switches: 74684 47 | Involuntary context switches: 759 48 | Swaps: 0 49 | File system inputs: 8 50 | File system outputs: 414520 51 | Socket messages sent: 0 52 | Socket messages received: 0 53 | Signals delivered: 0 54 | Page size (bytes): 4096 55 | Exit status: 0 56 | TOTAL KEYS TO WRITE: 1.00M 57 | Init Badger 58 | [0000] Write key rate per minute: 61.00K. Total: 61.00K 59 | [0001] Write key rate per minute: 131.00K. Total: 131.00K 60 | [0002] Write key rate per minute: 191.00K. Total: 191.00K 61 | [0003] Write key rate per minute: 253.00K. Total: 253.00K 62 | [0004] Write key rate per minute: 313.00K. Total: 313.00K 63 | [0005] Write key rate per minute: 373.00K. Total: 373.00K 64 | [0006] Write key rate per minute: 443.00K. Total: 443.00K 65 | [0007] Write key rate per minute: 503.00K. Total: 503.00K 66 | [0008] Write key rate per minute: 565.00K. Total: 565.00K 67 | [0009] Write key rate per minute: 625.00K. Total: 625.00K 68 | [0010] Write key rate per minute: 685.00K. Total: 685.00K 69 | [0011] Write key rate per minute: 733.00K. Total: 733.00K 70 | [0012] Write key rate per minute: 791.00K. Total: 791.00K 71 | [0013] Write key rate per minute: 851.00K. Total: 851.00K 72 | [0014] Write key rate per minute: 901.00K. Total: 901.00K 73 | [0015] Write key rate per minute: 973.00K. Total: 973.00K 74 | closing badger 75 | 76 | WROTE 1008000 KEYS 77 | Command being timed: "populate --kv badger --valsz 1024 --keys_mil 1 --dir /data" 78 | User time (seconds): 19.78 79 | System time (seconds): 2.38 80 | Percent of CPU this job got: 133% 81 | Elapsed (wall clock) time (h:mm:ss or m:ss): 0:16.58 82 | Average shared text size (kbytes): 0 83 | Average unshared data size (kbytes): 0 84 | Average stack size (kbytes): 0 85 | Average total size (kbytes): 0 86 | Maximum resident set size (kbytes): 559124 87 | Average resident set size (kbytes): 0 88 | Major (requiring I/O) page faults: 0 89 | Minor (reclaiming a frame) page faults: 136617 90 | Voluntary context switches: 173641 91 | Involuntary context switches: 1220 92 | Swaps: 0 93 | File system inputs: 72 94 | File system outputs: 2177568 95 | Socket messages sent: 0 96 | Socket messages received: 0 97 | Signals delivered: 0 98 | Page size (bytes): 4096 99 | Exit status: 0 100 | TOTAL KEYS TO WRITE: 1.00M 101 | Init Badger 102 | [0000] Write key rate per minute: 1.00K. Total: 1.00K 103 | [0001] Write key rate per minute: 13.00K. Total: 13.00K 104 | [0002] Write key rate per minute: 25.00K. Total: 25.00K 105 | [0003] Write key rate per minute: 37.00K. Total: 37.00K 106 | [0004] Write key rate per minute: 49.00K. Total: 49.00K 107 | [0005] Write key rate per minute: 61.00K. Total: 61.00K 108 | [0006] Write key rate per minute: 73.00K. Total: 73.00K 109 | [0007] Write key rate per minute: 85.00K. Total: 85.00K 110 | [0008] Write key rate per minute: 97.00K. Total: 97.00K 111 | [0009] Write key rate per minute: 109.00K. Total: 109.00K 112 | [0010] Write key rate per minute: 121.00K. Total: 121.00K 113 | [0011] Write key rate per minute: 133.00K. Total: 133.00K 114 | [0012] Write key rate per minute: 145.00K. Total: 145.00K 115 | [0013] Write key rate per minute: 157.00K. Total: 157.00K 116 | [0014] Write key rate per minute: 180.00K. Total: 180.00K 117 | [0015] Write key rate per minute: 192.00K. Total: 192.00K 118 | [0016] Write key rate per minute: 204.00K. Total: 204.00K 119 | [0017] Write key rate per minute: 216.00K. Total: 216.00K 120 | [0018] Write key rate per minute: 228.00K. Total: 228.00K 121 | [0019] Write key rate per minute: 229.00K. Total: 229.00K 122 | [0020] Write key rate per minute: 239.00K. Total: 239.00K 123 | [0021] Write key rate per minute: 251.00K. Total: 251.00K 124 | [0022] Write key rate per minute: 263.00K. Total: 263.00K 125 | [0023] Write key rate per minute: 275.00K. Total: 275.00K 126 | [0024] Write key rate per minute: 287.00K. Total: 287.00K 127 | [0025] Write key rate per minute: 301.00K. Total: 301.00K 128 | [0026] Write key rate per minute: 313.00K. Total: 313.00K 129 | [0027] Write key rate per minute: 325.00K. Total: 325.00K 130 | [0028] Write key rate per minute: 347.00K. Total: 347.00K 131 | [0029] Write key rate per minute: 359.00K. Total: 359.00K 132 | [0030] Write key rate per minute: 373.00K. Total: 373.00K 133 | [0031] Write key rate per minute: 385.00K. Total: 385.00K 134 | [0032] Write key rate per minute: 397.00K. Total: 397.00K 135 | [0033] Write key rate per minute: 409.00K. Total: 409.00K 136 | [0034] Write key rate per minute: 421.00K. Total: 421.00K 137 | [0035] Write key rate per minute: 433.00K. Total: 433.00K 138 | [0036] Write key rate per minute: 445.00K. Total: 445.00K 139 | [0037] Write key rate per minute: 467.00K. Total: 467.00K 140 | [0038] Write key rate per minute: 479.00K. Total: 479.00K 141 | [0039] Write key rate per minute: 491.00K. Total: 491.00K 142 | [0040] Write key rate per minute: 503.00K. Total: 503.00K 143 | [0041] Write key rate per minute: 515.00K. Total: 515.00K 144 | [0042] Write key rate per minute: 527.00K. Total: 527.00K 145 | [0043] Write key rate per minute: 541.00K. Total: 541.00K 146 | [0044] Write key rate per minute: 553.00K. Total: 553.00K 147 | [0045] Write key rate per minute: 565.00K. Total: 565.00K 148 | [0046] Write key rate per minute: 577.00K. Total: 577.00K 149 | [0047] Write key rate per minute: 589.00K. Total: 589.00K 150 | [0048] Write key rate per minute: 611.00K. Total: 611.00K 151 | [0049] Write key rate per minute: 623.00K. Total: 623.00K 152 | [0050] Write key rate per minute: 635.00K. Total: 635.00K 153 | [0051] Write key rate per minute: 647.00K. Total: 647.00K 154 | [0052] Write key rate per minute: 659.00K. Total: 659.00K 155 | [0053] Write key rate per minute: 671.00K. Total: 671.00K 156 | [0054] Write key rate per minute: 685.00K. Total: 685.00K 157 | [0055] Write key rate per minute: 697.00K. Total: 697.00K 158 | [0056] Write key rate per minute: 719.00K. Total: 719.00K 159 | [0057] Write key rate per minute: 731.00K. Total: 731.00K 160 | [0058] Write key rate per minute: 743.00K. Total: 743.00K 161 | [0059] Write key rate per minute: 755.00K. Total: 755.00K 162 | [0060] Write key rate per minute: 731.00K. Total: 767.00K 163 | [0061] Write key rate per minute: 745.00K. Total: 781.00K 164 | [0062] Write key rate per minute: 757.00K. Total: 793.00K 165 | [0063] Write key rate per minute: 743.00K. Total: 815.00K 166 | [0064] Write key rate per minute: 755.00K. Total: 827.00K 167 | [0065] Write key rate per minute: 757.00K. Total: 829.00K 168 | [0066] Write key rate per minute: 731.00K. Total: 839.00K 169 | [0067] Write key rate per minute: 743.00K. Total: 851.00K 170 | [0068] Write key rate per minute: 755.00K. Total: 863.00K 171 | [0069] Write key rate per minute: 730.00K. Total: 875.00K 172 | [0070] Write key rate per minute: 742.00K. Total: 887.00K 173 | [0071] Write key rate per minute: 754.00K. Total: 899.00K 174 | [0072] Write key rate per minute: 721.00K. Total: 913.00K 175 | [0073] Write key rate per minute: 733.00K. Total: 925.00K 176 | [0074] Write key rate per minute: 743.00K. Total: 935.00K 177 | [0075] Write key rate per minute: 730.00K. Total: 947.00K 178 | [0076] Write key rate per minute: 744.00K. Total: 961.00K 179 | [0077] Write key rate per minute: 756.00K. Total: 973.00K 180 | [0078] Write key rate per minute: 732.00K. Total: 983.00K 181 | [0079] Write key rate per minute: 746.00K. Total: 997.00K 182 | closing badger 183 | 184 | WROTE 1008000 KEYS 185 | Command being timed: "populate --kv badger --valsz 16384 --keys_mil 1 --dir /data" 186 | User time (seconds): 86.39 187 | System time (seconds): 19.86 188 | Percent of CPU this job got: 131% 189 | Elapsed (wall clock) time (h:mm:ss or m:ss): 1:20.71 190 | Average shared text size (kbytes): 0 191 | Average unshared data size (kbytes): 0 192 | Average stack size (kbytes): 0 193 | Average total size (kbytes): 0 194 | Maximum resident set size (kbytes): 1231752 195 | Average resident set size (kbytes): 0 196 | Major (requiring I/O) page faults: 0 197 | Minor (reclaiming a frame) page faults: 734320 198 | Voluntary context switches: 1395616 199 | Involuntary context switches: 1215 200 | Swaps: 0 201 | File system inputs: 1048 202 | File system outputs: 32403488 203 | Socket messages sent: 0 204 | Socket messages received: 0 205 | Signals delivered: 0 206 | Page size (bytes): 4096 207 | Exit status: 0 208 | TOTAL KEYS TO WRITE: 1.00M 209 | Init lmdb 210 | [0000] Write key rate per minute: 58.00K. Total: 58.00K 211 | [0001] Write key rate per minute: 105.00K. Total: 105.00K 212 | [0002] Write key rate per minute: 149.00K. Total: 149.00K 213 | [0003] Write key rate per minute: 195.00K. Total: 195.00K 214 | [0004] Write key rate per minute: 239.00K. Total: 239.00K 215 | [0005] Write key rate per minute: 283.00K. Total: 283.00K 216 | [0006] Write key rate per minute: 326.00K. Total: 326.00K 217 | [0007] Write key rate per minute: 369.00K. Total: 369.00K 218 | [0008] Write key rate per minute: 411.00K. Total: 411.00K 219 | [0009] Write key rate per minute: 452.00K. Total: 452.00K 220 | [0010] Write key rate per minute: 493.00K. Total: 493.00K 221 | [0011] Write key rate per minute: 536.00K. Total: 536.00K 222 | [0012] Write key rate per minute: 576.00K. Total: 576.00K 223 | [0013] Write key rate per minute: 616.00K. Total: 616.00K 224 | [0014] Write key rate per minute: 657.00K. Total: 657.00K 225 | [0015] Write key rate per minute: 696.00K. Total: 696.00K 226 | [0016] Write key rate per minute: 738.00K. Total: 738.00K 227 | [0017] Write key rate per minute: 779.00K. Total: 779.00K 228 | [0018] Write key rate per minute: 818.00K. Total: 818.00K 229 | [0019] Write key rate per minute: 858.00K. Total: 858.00K 230 | [0020] Write key rate per minute: 897.00K. Total: 897.00K 231 | [0021] Write key rate per minute: 937.00K. Total: 937.00K 232 | [0022] Write key rate per minute: 976.00K. Total: 976.00K 233 | closing lmdb 234 | 235 | WROTE 1008000 KEYS 236 | Command being timed: "populate --kv lmdb --valsz 128 --keys_mil 1 --dir /data" 237 | User time (seconds): 6.77 238 | System time (seconds): 10.70 239 | Percent of CPU this job got: 73% 240 | Elapsed (wall clock) time (h:mm:ss or m:ss): 0:23.82 241 | Average shared text size (kbytes): 0 242 | Average unshared data size (kbytes): 0 243 | Average stack size (kbytes): 0 244 | Average total size (kbytes): 0 245 | Maximum resident set size (kbytes): 190904 246 | Average resident set size (kbytes): 0 247 | Major (requiring I/O) page faults: 1 248 | Minor (reclaiming a frame) page faults: 11119 249 | Voluntary context switches: 75166 250 | Involuntary context switches: 1244 251 | Swaps: 0 252 | File system inputs: 16 253 | File system outputs: 9885784 254 | Socket messages sent: 0 255 | Socket messages received: 0 256 | Signals delivered: 0 257 | Page size (bytes): 4096 258 | Exit status: 0 259 | TOTAL KEYS TO WRITE: 1.00M 260 | Init lmdb 261 | [0000] Write key rate per minute: 36.00K. Total: 36.00K 262 | [0001] Write key rate per minute: 69.00K. Total: 69.00K 263 | [0002] Write key rate per minute: 100.00K. Total: 100.00K 264 | [0003] Write key rate per minute: 134.00K. Total: 134.00K 265 | [0004] Write key rate per minute: 166.00K. Total: 166.00K 266 | [0005] Write key rate per minute: 196.00K. Total: 196.00K 267 | [0006] Write key rate per minute: 226.00K. Total: 226.00K 268 | [0007] Write key rate per minute: 254.00K. Total: 254.00K 269 | [0008] Write key rate per minute: 282.00K. Total: 282.00K 270 | [0009] Write key rate per minute: 310.00K. Total: 310.00K 271 | [0010] Write key rate per minute: 339.00K. Total: 339.00K 272 | [0011] Write key rate per minute: 367.00K. Total: 367.00K 273 | [0012] Write key rate per minute: 395.00K. Total: 395.00K 274 | [0013] Write key rate per minute: 423.00K. Total: 423.00K 275 | [0014] Write key rate per minute: 450.00K. Total: 450.00K 276 | [0015] Write key rate per minute: 478.00K. Total: 478.00K 277 | [0016] Write key rate per minute: 506.00K. Total: 506.00K 278 | [0017] Write key rate per minute: 533.00K. Total: 533.00K 279 | [0018] Write key rate per minute: 561.00K. Total: 561.00K 280 | [0019] Write key rate per minute: 589.00K. Total: 589.00K 281 | [0020] Write key rate per minute: 616.00K. Total: 616.00K 282 | [0021] Write key rate per minute: 645.00K. Total: 645.00K 283 | [0022] Write key rate per minute: 673.00K. Total: 673.00K 284 | [0023] Write key rate per minute: 702.00K. Total: 702.00K 285 | [0024] Write key rate per minute: 731.00K. Total: 731.00K 286 | [0025] Write key rate per minute: 759.00K. Total: 759.00K 287 | [0026] Write key rate per minute: 786.00K. Total: 786.00K 288 | [0027] Write key rate per minute: 813.00K. Total: 813.00K 289 | [0028] Write key rate per minute: 841.00K. Total: 841.00K 290 | [0029] Write key rate per minute: 868.00K. Total: 868.00K 291 | [0030] Write key rate per minute: 896.00K. Total: 896.00K 292 | [0031] Write key rate per minute: 924.00K. Total: 924.00K 293 | [0032] Write key rate per minute: 953.00K. Total: 953.00K 294 | [0033] Write key rate per minute: 982.00K. Total: 982.00K 295 | closing lmdb 296 | 297 | WROTE 1008000 KEYS 298 | Command being timed: "populate --kv lmdb --valsz 1024 --keys_mil 1 --dir /data" 299 | User time (seconds): 13.20 300 | System time (seconds): 18.41 301 | Percent of CPU this job got: 90% 302 | Elapsed (wall clock) time (h:mm:ss or m:ss): 0:35.07 303 | Average shared text size (kbytes): 0 304 | Average unshared data size (kbytes): 0 305 | Average stack size (kbytes): 0 306 | Average total size (kbytes): 0 307 | Maximum resident set size (kbytes): 1348980 308 | Average resident set size (kbytes): 0 309 | Major (requiring I/O) page faults: 1 310 | Minor (reclaiming a frame) page faults: 82266 311 | Voluntary context switches: 76032 312 | Involuntary context switches: 1347 313 | Swaps: 0 314 | File system inputs: 88 315 | File system outputs: 16603280 316 | Socket messages sent: 0 317 | Socket messages received: 0 318 | Signals delivered: 0 319 | Page size (bytes): 4096 320 | Exit status: 0 321 | TOTAL KEYS TO WRITE: 1.00M 322 | Init lmdb 323 | [0000] Write key rate per minute: 10.00K. Total: 10.00K 324 | [0001] Write key rate per minute: 28.00K. Total: 28.00K 325 | [0002] Write key rate per minute: 38.00K. Total: 38.00K 326 | [0003] Write key rate per minute: 53.00K. Total: 53.00K 327 | [0004] Write key rate per minute: 69.00K. Total: 69.00K 328 | [0005] Write key rate per minute: 85.00K. Total: 85.00K 329 | [0006] Write key rate per minute: 100.00K. Total: 100.00K 330 | [0007] Write key rate per minute: 116.00K. Total: 116.00K 331 | [0008] Write key rate per minute: 131.00K. Total: 131.00K 332 | [0009] Write key rate per minute: 146.00K. Total: 146.00K 333 | [0010] Write key rate per minute: 163.00K. Total: 163.00K 334 | [0011] Write key rate per minute: 179.00K. Total: 179.00K 335 | [0012] Write key rate per minute: 194.00K. Total: 194.00K 336 | [0013] Write key rate per minute: 209.00K. Total: 209.00K 337 | [0014] Write key rate per minute: 224.00K. Total: 224.00K 338 | [0015] Write key rate per minute: 240.00K. Total: 240.00K 339 | [0016] Write key rate per minute: 253.00K. Total: 253.00K 340 | [0017] Write key rate per minute: 270.00K. Total: 270.00K 341 | [0018] Write key rate per minute: 285.00K. Total: 285.00K 342 | [0019] Write key rate per minute: 300.00K. Total: 300.00K 343 | [0020] Write key rate per minute: 316.00K. Total: 316.00K 344 | [0021] Write key rate per minute: 333.00K. Total: 333.00K 345 | [0022] Write key rate per minute: 348.00K. Total: 348.00K 346 | [0023] Write key rate per minute: 363.00K. Total: 363.00K 347 | [0024] Write key rate per minute: 381.00K. Total: 381.00K 348 | [0025] Write key rate per minute: 397.00K. Total: 397.00K 349 | [0026] Write key rate per minute: 413.00K. Total: 413.00K 350 | [0027] Write key rate per minute: 427.00K. Total: 427.00K 351 | [0028] Write key rate per minute: 443.00K. Total: 443.00K 352 | [0029] Write key rate per minute: 459.00K. Total: 459.00K 353 | [0030] Write key rate per minute: 473.00K. Total: 473.00K 354 | [0031] Write key rate per minute: 489.00K. Total: 489.00K 355 | [0032] Write key rate per minute: 507.00K. Total: 507.00K 356 | [0033] Write key rate per minute: 525.00K. Total: 525.00K 357 | [0034] Write key rate per minute: 543.00K. Total: 543.00K 358 | [0035] Write key rate per minute: 560.00K. Total: 560.00K 359 | [0036] Write key rate per minute: 575.00K. Total: 575.00K 360 | [0037] Write key rate per minute: 592.00K. Total: 592.00K 361 | [0038] Write key rate per minute: 608.00K. Total: 608.00K 362 | [0039] Write key rate per minute: 624.00K. Total: 624.00K 363 | [0040] Write key rate per minute: 640.00K. Total: 640.00K 364 | [0041] Write key rate per minute: 656.00K. Total: 656.00K 365 | [0042] Write key rate per minute: 671.00K. Total: 671.00K 366 | [0043] Write key rate per minute: 688.00K. Total: 688.00K 367 | [0044] Write key rate per minute: 704.00K. Total: 704.00K 368 | [0045] Write key rate per minute: 720.00K. Total: 720.00K 369 | [0046] Write key rate per minute: 735.00K. Total: 735.00K 370 | [0047] Write key rate per minute: 751.00K. Total: 751.00K 371 | [0048] Write key rate per minute: 766.00K. Total: 766.00K 372 | [0049] Write key rate per minute: 783.00K. Total: 783.00K 373 | [0050] Write key rate per minute: 799.00K. Total: 799.00K 374 | [0051] Write key rate per minute: 815.00K. Total: 815.00K 375 | [0052] Write key rate per minute: 830.00K. Total: 830.00K 376 | [0053] Write key rate per minute: 845.00K. Total: 845.00K 377 | [0054] Write key rate per minute: 862.00K. Total: 862.00K 378 | [0055] Write key rate per minute: 877.00K. Total: 877.00K 379 | [0056] Write key rate per minute: 893.00K. Total: 893.00K 380 | [0057] Write key rate per minute: 909.00K. Total: 909.00K 381 | [0058] Write key rate per minute: 923.00K. Total: 923.00K 382 | [0059] Write key rate per minute: 939.00K. Total: 939.00K 383 | [0060] Write key rate per minute: 911.00K. Total: 955.00K 384 | [0061] Write key rate per minute: 926.00K. Total: 970.00K 385 | [0062] Write key rate per minute: 943.00K. Total: 987.00K 386 | [0063] Write key rate per minute: 912.00K. Total: 1.00M 387 | closing lmdb 388 | 389 | WROTE 1008000 KEYS 390 | Command being timed: "populate --kv lmdb --valsz 16384 --keys_mil 1 --dir /data" 391 | User time (seconds): 84.95 392 | System time (seconds): 36.75 393 | Percent of CPU this job got: 187% 394 | Elapsed (wall clock) time (h:mm:ss or m:ss): 1:04.80 395 | Average shared text size (kbytes): 0 396 | Average unshared data size (kbytes): 0 397 | Average stack size (kbytes): 0 398 | Average total size (kbytes): 0 399 | Maximum resident set size (kbytes): 10369648 400 | Average resident set size (kbytes): 0 401 | Major (requiring I/O) page faults: 1 402 | Minor (reclaiming a frame) page faults: 2275134 403 | Voluntary context switches: 1066290 404 | Involuntary context switches: 1772 405 | Swaps: 0 406 | File system inputs: 824 407 | File system outputs: 47802024 408 | Socket messages sent: 0 409 | Socket messages received: 0 410 | Signals delivered: 0 411 | Page size (bytes): 4096 412 | Exit status: 0 413 | TOTAL KEYS TO WRITE: 1.00M 414 | Init BoltDB 415 | [0000] Write key rate per minute: 60.00K. Total: 60.00K 416 | [0001] Write key rate per minute: 108.00K. Total: 108.00K 417 | [0002] Write key rate per minute: 144.00K. Total: 144.00K 418 | [0003] Write key rate per minute: 192.00K. Total: 192.00K 419 | [0004] Write key rate per minute: 240.00K. Total: 240.00K 420 | [0005] Write key rate per minute: 276.00K. Total: 276.00K 421 | [0006] Write key rate per minute: 324.00K. Total: 324.00K 422 | [0007] Write key rate per minute: 360.00K. Total: 360.00K 423 | [0008] Write key rate per minute: 396.00K. Total: 396.00K 424 | [0009] Write key rate per minute: 444.00K. Total: 444.00K 425 | [0010] Write key rate per minute: 480.00K. Total: 480.00K 426 | [0011] Write key rate per minute: 516.00K. Total: 516.00K 427 | [0012] Write key rate per minute: 552.00K. Total: 552.00K 428 | [0013] Write key rate per minute: 588.00K. Total: 588.00K 429 | [0014] Write key rate per minute: 636.00K. Total: 636.00K 430 | [0015] Write key rate per minute: 661.00K. Total: 661.00K 431 | [0016] Write key rate per minute: 697.00K. Total: 697.00K 432 | [0017] Write key rate per minute: 733.00K. Total: 733.00K 433 | [0018] Write key rate per minute: 769.00K. Total: 769.00K 434 | [0019] Write key rate per minute: 805.00K. Total: 805.00K 435 | [0020] Write key rate per minute: 841.00K. Total: 841.00K 436 | [0021] Write key rate per minute: 877.00K. Total: 877.00K 437 | [0022] Write key rate per minute: 901.00K. Total: 901.00K 438 | [0023] Write key rate per minute: 937.00K. Total: 937.00K 439 | [0024] Write key rate per minute: 961.00K. Total: 961.00K 440 | [0025] Write key rate per minute: 997.00K. Total: 997.00K 441 | closing bolt 442 | 443 | WROTE 1008000 KEYS 444 | Command being timed: "populate --kv bolt --valsz 128 --keys_mil 1 --dir /data" 445 | User time (seconds): 33.00 446 | System time (seconds): 5.53 447 | Percent of CPU this job got: 147% 448 | Elapsed (wall clock) time (h:mm:ss or m:ss): 0:26.10 449 | Average shared text size (kbytes): 0 450 | Average unshared data size (kbytes): 0 451 | Average stack size (kbytes): 0 452 | Average total size (kbytes): 0 453 | Maximum resident set size (kbytes): 363976 454 | Average resident set size (kbytes): 0 455 | Major (requiring I/O) page faults: 0 456 | Minor (reclaiming a frame) page faults: 148198 457 | Voluntary context switches: 115123 458 | Involuntary context switches: 1567 459 | Swaps: 0 460 | File system inputs: 16 461 | File system outputs: 6144688 462 | Socket messages sent: 0 463 | Socket messages received: 0 464 | Signals delivered: 0 465 | Page size (bytes): 4096 466 | Exit status: 0 467 | TOTAL KEYS TO WRITE: 1.00M 468 | Init BoltDB 469 | [0000] Write key rate per minute: 26.00K. Total: 26.00K 470 | [0001] Write key rate per minute: 60.00K. Total: 60.00K 471 | [0002] Write key rate per minute: 86.00K. Total: 86.00K 472 | [0003] Write key rate per minute: 110.00K. Total: 110.00K 473 | [0004] Write key rate per minute: 132.00K. Total: 132.00K 474 | [0005] Write key rate per minute: 153.00K. Total: 153.00K 475 | [0006] Write key rate per minute: 175.00K. Total: 175.00K 476 | [0007] Write key rate per minute: 196.00K. Total: 196.00K 477 | [0008] Write key rate per minute: 216.00K. Total: 216.00K 478 | [0009] Write key rate per minute: 235.00K. Total: 235.00K 479 | [0010] Write key rate per minute: 254.00K. Total: 254.00K 480 | [0011] Write key rate per minute: 273.00K. Total: 273.00K 481 | [0012] Write key rate per minute: 292.00K. Total: 292.00K 482 | [0013] Write key rate per minute: 314.00K. Total: 314.00K 483 | [0014] Write key rate per minute: 331.00K. Total: 331.00K 484 | [0015] Write key rate per minute: 350.00K. Total: 350.00K 485 | [0016] Write key rate per minute: 369.00K. Total: 369.00K 486 | [0017] Write key rate per minute: 386.00K. Total: 386.00K 487 | [0018] Write key rate per minute: 403.00K. Total: 403.00K 488 | [0019] Write key rate per minute: 423.00K. Total: 424.00K 489 | [0020] Write key rate per minute: 441.00K. Total: 441.00K 490 | [0021] Write key rate per minute: 458.00K. Total: 458.00K 491 | [0022] Write key rate per minute: 477.00K. Total: 477.00K 492 | [0023] Write key rate per minute: 496.00K. Total: 496.00K 493 | [0024] Write key rate per minute: 513.00K. Total: 513.00K 494 | [0025] Write key rate per minute: 530.00K. Total: 530.00K 495 | [0026] Write key rate per minute: 549.00K. Total: 549.00K 496 | [0027] Write key rate per minute: 568.00K. Total: 568.00K 497 | [0028] Write key rate per minute: 585.00K. Total: 585.00K 498 | [0029] Write key rate per minute: 604.00K. Total: 604.00K 499 | [0030] Write key rate per minute: 621.00K. Total: 621.00K 500 | [0031] Write key rate per minute: 640.00K. Total: 640.00K 501 | [0032] Write key rate per minute: 657.00K. Total: 657.00K 502 | [0033] Write key rate per minute: 674.00K. Total: 674.00K 503 | [0034] Write key rate per minute: 691.00K. Total: 691.00K 504 | [0035] Write key rate per minute: 710.00K. Total: 710.00K 505 | [0036] Write key rate per minute: 727.00K. Total: 727.00K 506 | [0037] Write key rate per minute: 746.00K. Total: 746.00K 507 | [0038] Write key rate per minute: 765.00K. Total: 765.00K 508 | [0039] Write key rate per minute: 784.00K. Total: 784.00K 509 | [0040] Write key rate per minute: 801.00K. Total: 801.00K 510 | [0041] Write key rate per minute: 818.00K. Total: 818.00K 511 | [0042] Write key rate per minute: 837.00K. Total: 837.00K 512 | [0043] Write key rate per minute: 854.00K. Total: 854.00K 513 | [0044] Write key rate per minute: 873.00K. Total: 873.00K 514 | [0045] Write key rate per minute: 890.00K. Total: 890.00K 515 | [0046] Write key rate per minute: 909.00K. Total: 909.00K 516 | [0047] Write key rate per minute: 928.00K. Total: 928.00K 517 | [0048] Write key rate per minute: 945.00K. Total: 945.00K 518 | [0049] Write key rate per minute: 964.00K. Total: 964.00K 519 | [0050] Write key rate per minute: 981.00K. Total: 981.00K 520 | [0051] Write key rate per minute: 998.00K. Total: 998.00K 521 | closing bolt 522 | 523 | WROTE 1008000 KEYS 524 | Command being timed: "populate --kv bolt --valsz 1024 --keys_mil 1 --dir /data" 525 | User time (seconds): 56.32 526 | System time (seconds): 15.84 527 | Percent of CPU this job got: 136% 528 | Elapsed (wall clock) time (h:mm:ss or m:ss): 0:52.70 529 | Average shared text size (kbytes): 0 530 | Average unshared data size (kbytes): 0 531 | Average stack size (kbytes): 0 532 | Average total size (kbytes): 0 533 | Maximum resident set size (kbytes): 1178440 534 | Average resident set size (kbytes): 0 535 | Major (requiring I/O) page faults: 0 536 | Minor (reclaiming a frame) page faults: 285102 537 | Voluntary context switches: 267350 538 | Involuntary context switches: 3356 539 | Swaps: 0 540 | File system inputs: 80 541 | File system outputs: 16557064 542 | Socket messages sent: 0 543 | Socket messages received: 0 544 | Signals delivered: 0 545 | Page size (bytes): 4096 546 | Exit status: 0 547 | TOTAL KEYS TO WRITE: 1.00M 548 | Init BoltDB 549 | [0000] Write key rate per minute: 5.00K. Total: 5.00K 550 | [0001] Write key rate per minute: 13.00K. Total: 13.00K 551 | [0002] Write key rate per minute: 18.00K. Total: 18.00K 552 | [0003] Write key rate per minute: 24.00K. Total: 24.00K 553 | [0004] Write key rate per minute: 29.00K. Total: 29.00K 554 | [0005] Write key rate per minute: 34.00K. Total: 34.00K 555 | [0006] Write key rate per minute: 38.00K. Total: 38.00K 556 | [0007] Write key rate per minute: 43.00K. Total: 43.00K 557 | [0008] Write key rate per minute: 47.00K. Total: 47.00K 558 | [0009] Write key rate per minute: 51.00K. Total: 51.00K 559 | [0010] Write key rate per minute: 55.00K. Total: 55.00K 560 | [0011] Write key rate per minute: 59.00K. Total: 59.00K 561 | [0012] Write key rate per minute: 63.00K. Total: 63.00K 562 | [0013] Write key rate per minute: 67.00K. Total: 67.00K 563 | [0014] Write key rate per minute: 71.00K. Total: 71.00K 564 | [0015] Write key rate per minute: 75.00K. Total: 75.00K 565 | [0016] Write key rate per minute: 79.00K. Total: 79.00K 566 | [0017] Write key rate per minute: 82.00K. Total: 82.00K 567 | [0018] Write key rate per minute: 86.00K. Total: 86.00K 568 | [0019] Write key rate per minute: 90.00K. Total: 90.00K 569 | [0020] Write key rate per minute: 94.00K. Total: 94.00K 570 | [0021] Write key rate per minute: 97.00K. Total: 97.00K 571 | [0022] Write key rate per minute: 100.00K. Total: 100.00K 572 | [0023] Write key rate per minute: 104.00K. Total: 104.00K 573 | [0024] Write key rate per minute: 107.00K. Total: 107.00K 574 | [0025] Write key rate per minute: 111.00K. Total: 111.00K 575 | [0026] Write key rate per minute: 114.00K. Total: 114.00K 576 | [0027] Write key rate per minute: 117.00K. Total: 117.00K 577 | [0028] Write key rate per minute: 121.00K. Total: 121.00K 578 | [0029] Write key rate per minute: 124.00K. Total: 124.00K 579 | [0030] Write key rate per minute: 128.00K. Total: 128.00K 580 | [0031] Write key rate per minute: 131.00K. Total: 131.00K 581 | [0032] Write key rate per minute: 134.00K. Total: 134.00K 582 | [0033] Write key rate per minute: 138.00K. Total: 138.00K 583 | [0034] Write key rate per minute: 141.00K. Total: 141.00K 584 | [0035] Write key rate per minute: 145.00K. Total: 145.00K 585 | [0036] Write key rate per minute: 148.00K. Total: 148.00K 586 | [0037] Write key rate per minute: 151.00K. Total: 151.00K 587 | [0038] Write key rate per minute: 155.00K. Total: 155.00K 588 | [0039] Write key rate per minute: 158.00K. Total: 158.00K 589 | [0040] Write key rate per minute: 161.00K. Total: 161.00K 590 | [0041] Write key rate per minute: 165.00K. Total: 165.00K 591 | [0042] Write key rate per minute: 168.00K. Total: 168.00K 592 | [0043] Write key rate per minute: 171.00K. Total: 171.00K 593 | [0044] Write key rate per minute: 175.00K. Total: 175.00K 594 | [0045] Write key rate per minute: 178.00K. Total: 178.00K 595 | [0046] Write key rate per minute: 181.00K. Total: 181.00K 596 | [0047] Write key rate per minute: 184.00K. Total: 184.00K 597 | [0048] Write key rate per minute: 188.00K. Total: 188.00K 598 | [0049] Write key rate per minute: 191.00K. Total: 191.00K 599 | [0050] Write key rate per minute: 194.00K. Total: 194.00K 600 | [0051] Write key rate per minute: 197.00K. Total: 197.00K 601 | [0052] Write key rate per minute: 200.00K. Total: 200.00K 602 | [0053] Write key rate per minute: 203.00K. Total: 203.00K 603 | [0054] Write key rate per minute: 206.00K. Total: 206.00K 604 | [0055] Write key rate per minute: 209.00K. Total: 209.00K 605 | [0056] Write key rate per minute: 212.00K. Total: 212.00K 606 | [0057] Write key rate per minute: 215.00K. Total: 215.00K 607 | [0058] Write key rate per minute: 218.00K. Total: 218.00K 608 | [0059] Write key rate per minute: 221.00K. Total: 221.00K 609 | [0060] Write key rate per minute: 204.00K. Total: 225.00K 610 | [0061] Write key rate per minute: 206.00K. Total: 227.00K 611 | [0062] Write key rate per minute: 210.00K. Total: 231.00K 612 | [0063] Write key rate per minute: 197.00K. Total: 233.00K 613 | [0064] Write key rate per minute: 200.00K. Total: 236.00K 614 | [0065] Write key rate per minute: 203.00K. Total: 239.00K 615 | [0066] Write key rate per minute: 194.00K. Total: 242.00K 616 | [0067] Write key rate per minute: 197.00K. Total: 245.00K 617 | [0068] Write key rate per minute: 200.00K. Total: 248.00K 618 | [0069] Write key rate per minute: 191.00K. Total: 251.00K 619 | [0070] Write key rate per minute: 194.00K. Total: 254.00K 620 | [0071] Write key rate per minute: 197.00K. Total: 257.00K 621 | [0072] Write key rate per minute: 187.00K. Total: 260.00K 622 | [0073] Write key rate per minute: 189.00K. Total: 262.00K 623 | [0074] Write key rate per minute: 192.00K. Total: 265.00K 624 | [0075] Write key rate per minute: 184.00K. Total: 268.00K 625 | [0076] Write key rate per minute: 187.00K. Total: 271.00K 626 | [0077] Write key rate per minute: 190.00K. Total: 274.00K 627 | [0078] Write key rate per minute: 181.00K. Total: 276.00K 628 | [0079] Write key rate per minute: 184.00K. Total: 279.00K 629 | [0080] Write key rate per minute: 187.00K. Total: 282.00K 630 | [0081] Write key rate per minute: 180.00K. Total: 285.00K 631 | [0082] Write key rate per minute: 183.00K. Total: 288.00K 632 | [0083] Write key rate per minute: 186.00K. Total: 291.00K 633 | [0084] Write key rate per minute: 178.00K. Total: 293.00K 634 | [0085] Write key rate per minute: 181.00K. Total: 296.00K 635 | [0086] Write key rate per minute: 184.00K. Total: 299.00K 636 | [0087] Write key rate per minute: 176.00K. Total: 302.00K 637 | [0088] Write key rate per minute: 179.00K. Total: 305.00K 638 | [0089] Write key rate per minute: 181.00K. Total: 307.00K 639 | [0090] Write key rate per minute: 174.00K. Total: 310.00K 640 | [0091] Write key rate per minute: 177.00K. Total: 313.00K 641 | [0092] Write key rate per minute: 179.00K. Total: 315.00K 642 | [0093] Write key rate per minute: 172.00K. Total: 318.00K 643 | [0094] Write key rate per minute: 175.00K. Total: 321.00K 644 | [0095] Write key rate per minute: 177.00K. Total: 323.00K 645 | [0096] Write key rate per minute: 170.00K. Total: 326.00K 646 | [0097] Write key rate per minute: 173.00K. Total: 329.00K 647 | [0098] Write key rate per minute: 175.00K. Total: 331.00K 648 | [0099] Write key rate per minute: 168.00K. Total: 334.00K 649 | [0100] Write key rate per minute: 170.00K. Total: 336.00K 650 | [0101] Write key rate per minute: 173.00K. Total: 339.00K 651 | [0102] Write key rate per minute: 165.00K. Total: 341.00K 652 | [0103] Write key rate per minute: 168.00K. Total: 344.00K 653 | [0104] Write key rate per minute: 171.00K. Total: 347.00K 654 | [0105] Write key rate per minute: 163.00K. Total: 349.00K 655 | [0106] Write key rate per minute: 166.00K. Total: 352.00K 656 | [0107] Write key rate per minute: 168.00K. Total: 354.00K 657 | [0108] Write key rate per minute: 162.00K. Total: 357.00K 658 | [0109] Write key rate per minute: 164.00K. Total: 359.00K 659 | [0110] Write key rate per minute: 167.00K. Total: 362.00K 660 | [0111] Write key rate per minute: 161.00K. Total: 365.00K 661 | [0112] Write key rate per minute: 164.00K. Total: 368.00K 662 | [0113] Write key rate per minute: 166.00K. Total: 370.00K 663 | [0114] Write key rate per minute: 160.00K. Total: 373.00K 664 | [0115] Write key rate per minute: 162.00K. Total: 375.00K 665 | [0116] Write key rate per minute: 165.00K. Total: 378.00K 666 | [0117] Write key rate per minute: 157.00K. Total: 380.00K 667 | [0118] Write key rate per minute: 160.00K. Total: 383.00K 668 | [0119] Write key rate per minute: 162.00K. Total: 385.00K 669 | [0120] Write key rate per minute: 156.00K. Total: 388.00K 670 | [0121] Write key rate per minute: 158.00K. Total: 390.00K 671 | [0122] Write key rate per minute: 161.00K. Total: 393.00K 672 | [0123] Write key rate per minute: 154.00K. Total: 395.00K 673 | [0124] Write key rate per minute: 157.00K. Total: 398.00K 674 | [0125] Write key rate per minute: 160.00K. Total: 401.00K 675 | [0126] Write key rate per minute: 154.00K. Total: 403.00K 676 | [0127] Write key rate per minute: 156.00K. Total: 405.00K 677 | [0128] Write key rate per minute: 159.00K. Total: 408.00K 678 | [0129] Write key rate per minute: 152.00K. Total: 410.00K 679 | [0130] Write key rate per minute: 155.00K. Total: 413.00K 680 | [0131] Write key rate per minute: 157.00K. Total: 415.00K 681 | [0132] Write key rate per minute: 152.00K. Total: 418.00K 682 | [0133] Write key rate per minute: 154.00K. Total: 420.00K 683 | [0134] Write key rate per minute: 157.00K. Total: 423.00K 684 | [0135] Write key rate per minute: 150.00K. Total: 425.00K 685 | [0136] Write key rate per minute: 152.00K. Total: 427.00K 686 | [0137] Write key rate per minute: 155.00K. Total: 430.00K 687 | [0138] Write key rate per minute: 149.00K. Total: 432.00K 688 | [0139] Write key rate per minute: 152.00K. Total: 435.00K 689 | [0140] Write key rate per minute: 155.00K. Total: 438.00K 690 | [0141] Write key rate per minute: 148.00K. Total: 440.00K 691 | [0142] Write key rate per minute: 151.00K. Total: 443.00K 692 | [0143] Write key rate per minute: 153.00K. Total: 445.00K 693 | [0144] Write key rate per minute: 148.00K. Total: 448.00K 694 | [0145] Write key rate per minute: 150.00K. Total: 450.00K 695 | [0146] Write key rate per minute: 153.00K. Total: 453.00K 696 | [0147] Write key rate per minute: 147.00K. Total: 456.00K 697 | [0148] Write key rate per minute: 149.00K. Total: 458.00K 698 | [0149] Write key rate per minute: 151.00K. Total: 460.00K 699 | [0150] Write key rate per minute: 146.00K. Total: 462.00K 700 | [0151] Write key rate per minute: 149.00K. Total: 465.00K 701 | [0152] Write key rate per minute: 151.00K. Total: 467.00K 702 | [0153] Write key rate per minute: 144.00K. Total: 469.00K 703 | [0154] Write key rate per minute: 146.00K. Total: 471.00K 704 | [0155] Write key rate per minute: 149.00K. Total: 474.00K 705 | [0156] Write key rate per minute: 144.00K. Total: 476.00K 706 | [0157] Write key rate per minute: 146.00K. Total: 478.00K 707 | [0158] Write key rate per minute: 149.00K. Total: 481.00K 708 | [0159] Write key rate per minute: 143.00K. Total: 483.00K 709 | [0160] Write key rate per minute: 145.00K. Total: 485.00K 710 | [0161] Write key rate per minute: 148.00K. Total: 488.00K 711 | [0162] Write key rate per minute: 142.00K. Total: 490.00K 712 | [0163] Write key rate per minute: 144.00K. Total: 492.00K 713 | [0164] Write key rate per minute: 147.00K. Total: 495.00K 714 | [0165] Write key rate per minute: 142.00K. Total: 497.00K 715 | [0166] Write key rate per minute: 144.00K. Total: 499.00K 716 | [0167] Write key rate per minute: 147.00K. Total: 502.00K 717 | [0168] Write key rate per minute: 141.00K. Total: 504.00K 718 | [0169] Write key rate per minute: 144.00K. Total: 507.00K 719 | [0170] Write key rate per minute: 146.00K. Total: 509.00K 720 | [0171] Write key rate per minute: 140.00K. Total: 511.00K 721 | [0172] Write key rate per minute: 143.00K. Total: 514.00K 722 | [0173] Write key rate per minute: 145.00K. Total: 516.00K 723 | [0174] Write key rate per minute: 139.00K. Total: 518.00K 724 | [0175] Write key rate per minute: 142.00K. Total: 521.00K 725 | [0176] Write key rate per minute: 144.00K. Total: 523.00K 726 | [0177] Write key rate per minute: 140.00K. Total: 526.00K 727 | [0178] Write key rate per minute: 142.00K. Total: 528.00K 728 | [0179] Write key rate per minute: 144.00K. Total: 530.00K 729 | [0180] Write key rate per minute: 138.00K. Total: 532.00K 730 | [0181] Write key rate per minute: 141.00K. Total: 535.00K 731 | [0182] Write key rate per minute: 143.00K. Total: 537.00K 732 | [0183] Write key rate per minute: 137.00K. Total: 539.00K 733 | [0184] Write key rate per minute: 140.00K. Total: 542.00K 734 | [0185] Write key rate per minute: 142.00K. Total: 544.00K 735 | [0186] Write key rate per minute: 137.00K. Total: 546.00K 736 | [0187] Write key rate per minute: 139.00K. Total: 548.00K 737 | [0188] Write key rate per minute: 142.00K. Total: 551.00K 738 | [0189] Write key rate per minute: 136.00K. Total: 553.00K 739 | [0190] Write key rate per minute: 138.00K. Total: 555.00K 740 | [0191] Write key rate per minute: 140.00K. Total: 557.00K 741 | [0192] Write key rate per minute: 136.00K. Total: 560.00K 742 | [0193] Write key rate per minute: 138.00K. Total: 562.00K 743 | [0194] Write key rate per minute: 140.00K. Total: 564.00K 744 | [0195] Write key rate per minute: 135.00K. Total: 566.00K 745 | [0196] Write key rate per minute: 138.00K. Total: 569.00K 746 | [0197] Write key rate per minute: 140.00K. Total: 571.00K 747 | [0198] Write key rate per minute: 134.00K. Total: 573.00K 748 | [0199] Write key rate per minute: 137.00K. Total: 576.00K 749 | [0200] Write key rate per minute: 139.00K. Total: 578.00K 750 | [0201] Write key rate per minute: 134.00K. Total: 580.00K 751 | [0202] Write key rate per minute: 137.00K. Total: 583.00K 752 | [0203] Write key rate per minute: 139.00K. Total: 585.00K 753 | [0204] Write key rate per minute: 133.00K. Total: 587.00K 754 | [0205] Write key rate per minute: 135.00K. Total: 589.00K 755 | [0206] Write key rate per minute: 138.00K. Total: 592.00K 756 | [0207] Write key rate per minute: 133.00K. Total: 594.00K 757 | [0208] Write key rate per minute: 135.00K. Total: 596.00K 758 | [0209] Write key rate per minute: 138.00K. Total: 599.00K 759 | [0210] Write key rate per minute: 133.00K. Total: 601.00K 760 | [0211] Write key rate per minute: 135.00K. Total: 603.00K 761 | [0212] Write key rate per minute: 137.00K. Total: 605.00K 762 | [0213] Write key rate per minute: 133.00K. Total: 608.00K 763 | [0214] Write key rate per minute: 135.00K. Total: 610.00K 764 | [0215] Write key rate per minute: 137.00K. Total: 612.00K 765 | [0216] Write key rate per minute: 132.00K. Total: 614.00K 766 | [0217] Write key rate per minute: 135.00K. Total: 617.00K 767 | [0218] Write key rate per minute: 137.00K. Total: 619.00K 768 | [0219] Write key rate per minute: 132.00K. Total: 621.00K 769 | [0220] Write key rate per minute: 134.00K. Total: 623.00K 770 | [0221] Write key rate per minute: 136.00K. Total: 625.00K 771 | [0222] Write key rate per minute: 131.00K. Total: 627.00K 772 | [0223] Write key rate per minute: 133.00K. Total: 629.00K 773 | [0224] Write key rate per minute: 136.00K. Total: 632.00K 774 | [0225] Write key rate per minute: 131.00K. Total: 634.00K 775 | [0226] Write key rate per minute: 133.00K. Total: 636.00K 776 | [0227] Write key rate per minute: 135.00K. Total: 638.00K 777 | [0228] Write key rate per minute: 131.00K. Total: 641.00K 778 | [0229] Write key rate per minute: 133.00K. Total: 643.00K 779 | [0230] Write key rate per minute: 135.00K. Total: 645.00K 780 | [0231] Write key rate per minute: 130.00K. Total: 647.00K 781 | [0232] Write key rate per minute: 132.00K. Total: 649.00K 782 | [0233] Write key rate per minute: 134.00K. Total: 651.00K 783 | [0234] Write key rate per minute: 130.00K. Total: 654.00K 784 | [0235] Write key rate per minute: 132.00K. Total: 656.00K 785 | [0236] Write key rate per minute: 134.00K. Total: 658.00K 786 | [0237] Write key rate per minute: 129.00K. Total: 660.00K 787 | [0238] Write key rate per minute: 131.00K. Total: 662.00K 788 | [0239] Write key rate per minute: 133.00K. Total: 664.00K 789 | [0240] Write key rate per minute: 129.00K. Total: 667.00K 790 | [0241] Write key rate per minute: 131.00K. Total: 669.00K 791 | [0242] Write key rate per minute: 133.00K. Total: 671.00K 792 | [0243] Write key rate per minute: 128.00K. Total: 673.00K 793 | [0244] Write key rate per minute: 130.00K. Total: 675.00K 794 | [0245] Write key rate per minute: 133.00K. Total: 678.00K 795 | [0246] Write key rate per minute: 128.00K. Total: 680.00K 796 | [0247] Write key rate per minute: 130.00K. Total: 682.00K 797 | [0248] Write key rate per minute: 132.00K. Total: 684.00K 798 | [0249] Write key rate per minute: 128.00K. Total: 686.00K 799 | [0250] Write key rate per minute: 130.00K. Total: 688.00K 800 | [0251] Write key rate per minute: 133.00K. Total: 691.00K 801 | [0252] Write key rate per minute: 128.00K. Total: 693.00K 802 | [0253] Write key rate per minute: 130.00K. Total: 695.00K 803 | [0254] Write key rate per minute: 133.00K. Total: 698.00K 804 | [0255] Write key rate per minute: 128.00K. Total: 700.00K 805 | [0256] Write key rate per minute: 130.00K. Total: 702.00K 806 | [0257] Write key rate per minute: 132.00K. Total: 704.00K 807 | [0258] Write key rate per minute: 127.00K. Total: 706.00K 808 | [0259] Write key rate per minute: 130.00K. Total: 709.00K 809 | [0260] Write key rate per minute: 132.00K. Total: 711.00K 810 | [0261] Write key rate per minute: 127.00K. Total: 713.00K 811 | [0262] Write key rate per minute: 130.00K. Total: 716.00K 812 | [0263] Write key rate per minute: 132.00K. Total: 718.00K 813 | [0264] Write key rate per minute: 128.00K. Total: 721.00K 814 | [0265] Write key rate per minute: 130.00K. Total: 723.00K 815 | [0266] Write key rate per minute: 132.00K. Total: 725.00K 816 | [0267] Write key rate per minute: 127.00K. Total: 727.00K 817 | [0268] Write key rate per minute: 129.00K. Total: 729.00K 818 | [0269] Write key rate per minute: 131.00K. Total: 731.00K 819 | [0270] Write key rate per minute: 127.00K. Total: 734.00K 820 | [0271] Write key rate per minute: 128.00K. Total: 735.00K 821 | [0272] Write key rate per minute: 130.00K. Total: 737.00K 822 | [0273] Write key rate per minute: 127.00K. Total: 740.00K 823 | [0274] Write key rate per minute: 129.00K. Total: 742.00K 824 | [0275] Write key rate per minute: 131.00K. Total: 744.00K 825 | [0276] Write key rate per minute: 127.00K. Total: 746.00K 826 | [0277] Write key rate per minute: 129.00K. Total: 748.00K 827 | [0278] Write key rate per minute: 131.00K. Total: 750.00K 828 | [0279] Write key rate per minute: 127.00K. Total: 753.00K 829 | [0280] Write key rate per minute: 129.00K. Total: 755.00K 830 | [0281] Write key rate per minute: 131.00K. Total: 757.00K 831 | [0282] Write key rate per minute: 126.00K. Total: 759.00K 832 | [0283] Write key rate per minute: 129.00K. Total: 762.00K 833 | [0284] Write key rate per minute: 131.00K. Total: 764.00K 834 | [0285] Write key rate per minute: 127.00K. Total: 766.00K 835 | [0286] Write key rate per minute: 129.00K. Total: 768.00K 836 | [0287] Write key rate per minute: 131.00K. Total: 770.00K 837 | [0288] Write key rate per minute: 127.00K. Total: 773.00K 838 | [0289] Write key rate per minute: 129.00K. Total: 775.00K 839 | [0290] Write key rate per minute: 131.00K. Total: 777.00K 840 | [0291] Write key rate per minute: 127.00K. Total: 779.00K 841 | [0292] Write key rate per minute: 129.00K. Total: 781.00K 842 | [0293] Write key rate per minute: 132.00K. Total: 784.00K 843 | [0294] Write key rate per minute: 127.00K. Total: 786.00K 844 | [0295] Write key rate per minute: 129.00K. Total: 788.00K 845 | [0296] Write key rate per minute: 131.00K. Total: 790.00K 846 | [0297] Write key rate per minute: 127.00K. Total: 792.00K 847 | [0298] Write key rate per minute: 129.00K. Total: 794.00K 848 | [0299] Write key rate per minute: 131.00K. Total: 796.00K 849 | [0300] Write key rate per minute: 126.00K. Total: 798.00K 850 | [0301] Write key rate per minute: 128.00K. Total: 800.00K 851 | [0302] Write key rate per minute: 130.00K. Total: 802.00K 852 | [0303] Write key rate per minute: 125.00K. Total: 804.00K 853 | [0304] Write key rate per minute: 128.00K. Total: 807.00K 854 | [0305] Write key rate per minute: 130.00K. Total: 809.00K 855 | [0306] Write key rate per minute: 126.00K. Total: 811.00K 856 | [0307] Write key rate per minute: 128.00K. Total: 813.00K 857 | [0308] Write key rate per minute: 131.00K. Total: 816.00K 858 | [0309] Write key rate per minute: 125.00K. Total: 817.00K 859 | [0310] Write key rate per minute: 128.00K. Total: 820.00K 860 | [0311] Write key rate per minute: 130.00K. Total: 822.00K 861 | [0312] Write key rate per minute: 125.00K. Total: 824.00K 862 | [0313] Write key rate per minute: 127.00K. Total: 826.00K 863 | [0314] Write key rate per minute: 129.00K. Total: 828.00K 864 | [0315] Write key rate per minute: 126.00K. Total: 831.00K 865 | [0316] Write key rate per minute: 128.00K. Total: 833.00K 866 | [0317] Write key rate per minute: 130.00K. Total: 835.00K 867 | [0318] Write key rate per minute: 125.00K. Total: 837.00K 868 | [0319] Write key rate per minute: 127.00K. Total: 839.00K 869 | [0320] Write key rate per minute: 129.00K. Total: 841.00K 870 | [0321] Write key rate per minute: 124.00K. Total: 843.00K 871 | [0322] Write key rate per minute: 126.00K. Total: 845.00K 872 | [0323] Write key rate per minute: 128.00K. Total: 847.00K 873 | [0324] Write key rate per minute: 123.00K. Total: 849.00K 874 | [0325] Write key rate per minute: 125.00K. Total: 851.00K 875 | [0326] Write key rate per minute: 128.00K. Total: 854.00K 876 | [0327] Write key rate per minute: 124.00K. Total: 856.00K 877 | [0328] Write key rate per minute: 126.00K. Total: 858.00K 878 | [0329] Write key rate per minute: 128.00K. Total: 860.00K 879 | [0330] Write key rate per minute: 123.00K. Total: 862.00K 880 | [0331] Write key rate per minute: 125.00K. Total: 864.00K 881 | [0332] Write key rate per minute: 128.00K. Total: 867.00K 882 | [0333] Write key rate per minute: 124.00K. Total: 869.00K 883 | [0334] Write key rate per minute: 126.00K. Total: 871.00K 884 | [0335] Write key rate per minute: 128.00K. Total: 873.00K 885 | [0336] Write key rate per minute: 125.00K. Total: 876.00K 886 | [0337] Write key rate per minute: 127.00K. Total: 878.00K 887 | [0338] Write key rate per minute: 129.00K. Total: 880.00K 888 | [0339] Write key rate per minute: 124.00K. Total: 882.00K 889 | [0340] Write key rate per minute: 126.00K. Total: 884.00K 890 | [0341] Write key rate per minute: 128.00K. Total: 886.00K 891 | [0342] Write key rate per minute: 124.00K. Total: 889.00K 892 | [0343] Write key rate per minute: 126.00K. Total: 891.00K 893 | [0344] Write key rate per minute: 128.00K. Total: 893.00K 894 | [0345] Write key rate per minute: 124.00K. Total: 895.00K 895 | [0346] Write key rate per minute: 126.00K. Total: 897.00K 896 | [0347] Write key rate per minute: 128.00K. Total: 899.00K 897 | [0348] Write key rate per minute: 123.00K. Total: 901.00K 898 | [0349] Write key rate per minute: 125.00K. Total: 903.00K 899 | [0350] Write key rate per minute: 128.00K. Total: 906.00K 900 | [0351] Write key rate per minute: 123.00K. Total: 908.00K 901 | [0352] Write key rate per minute: 125.00K. Total: 910.00K 902 | [0353] Write key rate per minute: 127.00K. Total: 912.00K 903 | [0354] Write key rate per minute: 123.00K. Total: 914.00K 904 | [0355] Write key rate per minute: 125.00K. Total: 916.00K 905 | [0356] Write key rate per minute: 127.00K. Total: 918.00K 906 | [0357] Write key rate per minute: 123.00K. Total: 920.00K 907 | [0358] Write key rate per minute: 125.00K. Total: 922.00K 908 | [0359] Write key rate per minute: 127.00K. Total: 924.00K 909 | [0360] Write key rate per minute: 123.00K. Total: 926.00K 910 | [0361] Write key rate per minute: 126.00K. Total: 929.00K 911 | [0362] Write key rate per minute: 128.00K. Total: 931.00K 912 | [0363] Write key rate per minute: 123.00K. Total: 933.00K 913 | [0364] Write key rate per minute: 125.00K. Total: 935.00K 914 | [0365] Write key rate per minute: 127.00K. Total: 937.00K 915 | [0366] Write key rate per minute: 123.00K. Total: 939.00K 916 | [0367] Write key rate per minute: 126.00K. Total: 942.00K 917 | [0368] Write key rate per minute: 128.00K. Total: 944.00K 918 | [0369] Write key rate per minute: 124.00K. Total: 947.00K 919 | [0370] Write key rate per minute: 126.00K. Total: 949.00K 920 | [0371] Write key rate per minute: 128.00K. Total: 951.00K 921 | [0372] Write key rate per minute: 124.00K. Total: 953.00K 922 | [0373] Write key rate per minute: 127.00K. Total: 956.00K 923 | [0374] Write key rate per minute: 129.00K. Total: 958.00K 924 | [0375] Write key rate per minute: 124.00K. Total: 960.00K 925 | [0376] Write key rate per minute: 126.00K. Total: 962.00K 926 | [0377] Write key rate per minute: 128.00K. Total: 964.00K 927 | [0378] Write key rate per minute: 125.00K. Total: 967.00K 928 | [0379] Write key rate per minute: 127.00K. Total: 969.00K 929 | [0380] Write key rate per minute: 129.00K. Total: 971.00K 930 | [0381] Write key rate per minute: 125.00K. Total: 973.00K 931 | [0382] Write key rate per minute: 127.00K. Total: 975.00K 932 | [0383] Write key rate per minute: 129.00K. Total: 977.00K 933 | [0384] Write key rate per minute: 124.00K. Total: 979.00K 934 | [0385] Write key rate per minute: 126.00K. Total: 981.00K 935 | [0386] Write key rate per minute: 128.00K. Total: 983.00K 936 | [0387] Write key rate per minute: 124.00K. Total: 985.00K 937 | [0388] Write key rate per minute: 126.00K. Total: 987.00K 938 | [0389] Write key rate per minute: 128.00K. Total: 989.00K 939 | [0390] Write key rate per minute: 123.00K. Total: 991.00K 940 | [0391] Write key rate per minute: 125.00K. Total: 993.00K 941 | [0392] Write key rate per minute: 127.00K. Total: 995.00K 942 | [0393] Write key rate per minute: 123.00K. Total: 997.00K 943 | [0394] Write key rate per minute: 125.00K. Total: 999.00K 944 | [0395] Write key rate per minute: 127.00K. Total: 1.00M 945 | [0396] Write key rate per minute: 122.00K. Total: 1.00M 946 | [0397] Write key rate per minute: 124.00K. Total: 1.00M 947 | [0398] Write key rate per minute: 126.00K. Total: 1.01M 948 | closing bolt 949 | 950 | WROTE 1008000 KEYS 951 | Command being timed: "populate --kv bolt --valsz 16384 --keys_mil 1 --dir /data" 952 | User time (seconds): 356.24 953 | System time (seconds): 65.29 954 | Percent of CPU this job got: 105% 955 | Elapsed (wall clock) time (h:mm:ss or m:ss): 6:39.61 956 | Average shared text size (kbytes): 0 957 | Average unshared data size (kbytes): 0 958 | Average stack size (kbytes): 0 959 | Average total size (kbytes): 0 960 | Maximum resident set size (kbytes): 8998588 961 | Average resident set size (kbytes): 0 962 | Major (requiring I/O) page faults: 0 963 | Minor (reclaiming a frame) page faults: 5491970 964 | Voluntary context switches: 542274 965 | Involuntary context switches: 6368 966 | Swaps: 0 967 | File system inputs: 736 968 | File system outputs: 135719296 969 | Socket messages sent: 0 970 | Socket messages received: 0 971 | Signals delivered: 0 972 | Page size (bytes): 4096 973 | Exit status: 0 974 | --------------------------------------------------------------------------------