├── test ├── data │ └── testdata.bin ├── leveldown-test.js ├── batch-test.js ├── del-test.js ├── get-test.js ├── open-test.js ├── put-test.js ├── ranges-test.js ├── chained-batch-test.js ├── approximate-size-test.js ├── put-get-del-test.js ├── close-test.js ├── stack-blower.js ├── make.js ├── leak-tester.js ├── cleanup-hanging-iterators-test.js ├── getproperty-test.js ├── iterator-recursion-test.js ├── repair-test.js ├── iterator-test.js ├── leak-tester-batch.js ├── destroy-test.js └── compression-test.js ├── .gitignore ├── .travis.yml ├── .npmignore ├── .dntrc ├── src ├── batch_async.cc ├── batch_async.h ├── async.h ├── batch.h ├── flatrocks_async.h ├── common.h ├── flatrocks_async.cc ├── iterator_async.h ├── flatrocks.cc ├── iterator.h ├── iterator_async.cc ├── database.h ├── database_async.h ├── batch.cc ├── flatrocks.h ├── database_async.cc ├── database.cc └── iterator.cc ├── bench ├── write-sorted-plot.sh ├── write-random-plot.sh ├── write-random.js ├── write-sorted.js ├── db-bench-plot.sh └── db-bench.js ├── chained-batch.js ├── LICENSE.md ├── binding.gyp ├── package.json ├── .jshintrc ├── iterator.js ├── flat-rocks.js ├── README.md └── CHANGELOG.md /test/data/testdata.bin: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/andris9/flat-rocks/master/test/data/testdata.bin -------------------------------------------------------------------------------- /test/leveldown-test.js: -------------------------------------------------------------------------------- 1 | const test = require('tape') 2 | , leveldown = require('../') 3 | , abstract = require('abstract-leveldown/abstract/leveldown-test') 4 | 5 | abstract.args(leveldown, test) 6 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules/ 2 | build/ 3 | build-pre-gyp/ 4 | Release/ 5 | libleveldb.so 6 | libleveldb.a 7 | leakydb 8 | bench/ 9 | *.sln 10 | *.vcxproj 11 | *.vcxproj.filters 12 | *.tlog 13 | *.obj 14 | *.1sdk.pdb 15 | *.lastbuildstate 16 | npm-debug.log 17 | prebuilds/ 18 | -------------------------------------------------------------------------------- /test/batch-test.js: -------------------------------------------------------------------------------- 1 | const test = require('tape') 2 | , testCommon = require('abstract-leveldown/testCommon') 3 | , leveldown = require('../') 4 | , abstract = require('abstract-leveldown/abstract/batch-test') 5 | 6 | abstract.all(leveldown, test, testCommon) 7 | -------------------------------------------------------------------------------- /test/del-test.js: -------------------------------------------------------------------------------- 1 | const test = require('tape') 2 | , testCommon = require('abstract-leveldown/testCommon') 3 | , leveldown = require('../') 4 | , abstract = require('abstract-leveldown/abstract/del-test') 5 | 6 | abstract.all(leveldown, test, testCommon) 7 | -------------------------------------------------------------------------------- /test/get-test.js: -------------------------------------------------------------------------------- 1 | const test = require('tape') 2 | , testCommon = require('abstract-leveldown/testCommon') 3 | , leveldown = require('../') 4 | , abstract = require('abstract-leveldown/abstract/get-test') 5 | 6 | abstract.all(leveldown, test, testCommon) 7 | -------------------------------------------------------------------------------- /test/open-test.js: -------------------------------------------------------------------------------- 1 | const test = require('tape') 2 | , testCommon = require('abstract-leveldown/testCommon') 3 | , leveldown = require('../') 4 | , abstract = require('abstract-leveldown/abstract/open-test') 5 | 6 | abstract.all(leveldown, test, testCommon) 7 | -------------------------------------------------------------------------------- /test/put-test.js: -------------------------------------------------------------------------------- 1 | const test = require('tape') 2 | , testCommon = require('abstract-leveldown/testCommon') 3 | , leveldown = require('../') 4 | , abstract = require('abstract-leveldown/abstract/put-test') 5 | 6 | abstract.all(leveldown, test, testCommon) 7 | -------------------------------------------------------------------------------- /test/ranges-test.js: -------------------------------------------------------------------------------- 1 | const test = require('tape') 2 | , testCommon = require('abstract-leveldown/testCommon') 3 | , leveldown = require('../') 4 | , abstract = require('abstract-leveldown/abstract/ranges-test') 5 | 6 | abstract.all(leveldown, test, testCommon) 7 | -------------------------------------------------------------------------------- /test/chained-batch-test.js: -------------------------------------------------------------------------------- 1 | const test = require('tape') 2 | , testCommon = require('abstract-leveldown/testCommon') 3 | , leveldown = require('../') 4 | , abstract = require('abstract-leveldown/abstract/chained-batch-test') 5 | 6 | abstract.all(leveldown, test, testCommon) 7 | -------------------------------------------------------------------------------- /test/approximate-size-test.js: -------------------------------------------------------------------------------- 1 | const test = require('tape') 2 | , testCommon = require('abstract-leveldown/testCommon') 3 | , leveldown = require('../') 4 | , abstract = require('abstract-leveldown/abstract/approximate-size-test') 5 | 6 | abstract.all(leveldown, test, testCommon) 7 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | sudo: false 2 | 3 | language: node_js 4 | 5 | env: 6 | - CXX=g++-4.8 7 | 8 | addons: 9 | apt: 10 | sources: 11 | - ubuntu-toolchain-r-test 12 | packages: 13 | - g++-4.8 14 | 15 | before_install: 16 | - export JOBS=max 17 | 18 | node_js: 19 | - "5" 20 | - "4" 21 | - "0.12" 22 | - "0.10" 23 | -------------------------------------------------------------------------------- /.npmignore: -------------------------------------------------------------------------------- 1 | *.tar.gz 2 | build/ 3 | build-pre-gyp/ 4 | test-data.tar 5 | test-data.db.tar 6 | deps/leveldb/leveldb-basho/ 7 | deps/leveldb/leveldb-hyper/ 8 | deps/leveldb/leveldb-rocksdb/ 9 | deps/snappy/snappy-1.1.1/testdata/ 10 | leakydb 11 | bench/ 12 | test/ 13 | deps/leveldb/leveldb-1.17.0/doc/ 14 | README 15 | INSTALL 16 | NEWS 17 | AUTHORS 18 | -------------------------------------------------------------------------------- /test/put-get-del-test.js: -------------------------------------------------------------------------------- 1 | const test = require('tape') 2 | , testCommon = require('abstract-leveldown/testCommon') 3 | , leveldown = require('../') 4 | , fs = require('fs') 5 | , path = require('path') 6 | , testBuffer = fs.readFileSync(path.join(__dirname, 'data/testdata.bin')) 7 | , abstract = require('abstract-leveldown/abstract/put-get-del-test') 8 | 9 | abstract.all(leveldown, test, testCommon, testBuffer) 10 | -------------------------------------------------------------------------------- /test/close-test.js: -------------------------------------------------------------------------------- 1 | const test = require('tape') 2 | , testCommon = require('abstract-leveldown/testCommon') 3 | , leveldown = require('../') 4 | , abstract = require('abstract-leveldown/abstract/close-test') 5 | 6 | module.exports.setUp = function () { 7 | test('setUp', testCommon.setUp) 8 | } 9 | 10 | module.exports.close = abstract.close 11 | 12 | module.exports.tearDown = function () { 13 | test('tearDown', testCommon.tearDown) 14 | } 15 | 16 | module.exports.all = function (leveldown) { 17 | module.exports.setUp() 18 | module.exports.close(leveldown, test, testCommon) 19 | module.exports.tearDown() 20 | } 21 | 22 | module.exports.all(leveldown) 23 | -------------------------------------------------------------------------------- /.dntrc: -------------------------------------------------------------------------------- 1 | ## DNT config file 2 | ## see https://github.com/rvagg/dnt 3 | 4 | NODE_VERSIONS="\ 5 | v0.10.40 \ 6 | v0.12.7 \ 7 | " 8 | IOJS_VERSIONS="\ 9 | v1.8.4 \ 10 | v2.0.1 \ 11 | v2.3.4 \ 12 | v3.0.0-rc.3 \ 13 | " 14 | OUTPUT_PREFIX="flat-rocks-" 15 | TEST_CMD="\ 16 | cd /dnt/ && \ 17 | npm install && \ 18 | npm run-script prebuild \ 19 | --nodedir=/usr/src/node/ && \ 20 | node_modules/.bin/tape test/*-test.js \ 21 | " 22 | # for tape 23 | LOG_OK_CMD="tail -2 | head -1 | sed 's/^# //'" 24 | -------------------------------------------------------------------------------- /src/batch_async.cc: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2012-2016 LevelDOWN contributors 2 | * See list at 3 | * MIT License 4 | */ 5 | 6 | 7 | #include 8 | #include "batch.h" 9 | #include "batch_async.h" 10 | 11 | namespace flat_rocks { 12 | 13 | /** NEXT WORKER **/ 14 | 15 | BatchWriteWorker::BatchWriteWorker ( 16 | Batch* batch 17 | , Nan::Callback *callback 18 | ) : AsyncWorker(NULL, callback) 19 | , batch(batch) 20 | {}; 21 | 22 | BatchWriteWorker::~BatchWriteWorker () {} 23 | 24 | void BatchWriteWorker::Execute () { 25 | SetStatus(batch->Write()); 26 | } 27 | 28 | } // namespace flat_rocks 29 | -------------------------------------------------------------------------------- /bench/write-sorted-plot.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | gnuplot < 3 | * MIT License 4 | */ 5 | 6 | #ifndef LD_BATCH_ASYNC_H 7 | #define LD_BATCH_ASYNC_H 8 | 9 | #include 10 | #include 11 | 12 | #include "async.h" 13 | #include "batch.h" 14 | #include "database.h" 15 | 16 | namespace flat_rocks { 17 | 18 | class BatchWriteWorker : public AsyncWorker { 19 | public: 20 | BatchWriteWorker ( 21 | Batch* batch 22 | , Nan::Callback *callback 23 | ); 24 | 25 | virtual ~BatchWriteWorker (); 26 | virtual void Execute (); 27 | 28 | private: 29 | Batch* batch; 30 | }; 31 | 32 | } // namespace flat_rocks 33 | 34 | #endif 35 | -------------------------------------------------------------------------------- /bench/write-random-plot.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | gnuplot < 3 | * MIT License 4 | */ 5 | 6 | #ifndef LD_ASYNC_H 7 | #define LD_ASYNC_H 8 | 9 | #include 10 | #include 11 | #include "database.h" 12 | 13 | namespace flat_rocks { 14 | 15 | class Database; 16 | 17 | /* abstract */ class AsyncWorker : public Nan::AsyncWorker { 18 | public: 19 | AsyncWorker ( 20 | flat_rocks::Database* database 21 | , Nan::Callback *callback 22 | ) : Nan::AsyncWorker(callback), database(database) { } 23 | 24 | protected: 25 | void SetStatus(rocksdb::Status status) { 26 | this->status = status; 27 | if (!status.ok()) 28 | SetErrorMessage(status.ToString().c_str()); 29 | } 30 | Database* database; 31 | private: 32 | rocksdb::Status status; 33 | }; 34 | 35 | } // namespace flat_rocks 36 | 37 | #endif 38 | -------------------------------------------------------------------------------- /src/batch.h: -------------------------------------------------------------------------------- 1 | #ifndef LD_BATCH_H 2 | #define LD_BATCH_H 3 | 4 | #include 5 | #include 6 | 7 | #include 8 | 9 | #include "database.h" 10 | 11 | namespace flat_rocks { 12 | 13 | class Batch : public Nan::ObjectWrap { 14 | public: 15 | static void Init(); 16 | static v8::Local NewInstance ( 17 | v8::Local database 18 | , v8::Local optionsObj 19 | ); 20 | 21 | Batch (flat_rocks::Database* database, bool sync); 22 | ~Batch (); 23 | rocksdb::Status Write (); 24 | 25 | private: 26 | flat_rocks::Database* database; 27 | rocksdb::WriteOptions* options; 28 | rocksdb::WriteBatch* batch; 29 | bool hasData; // keep track of whether we're writing data or not 30 | 31 | static NAN_METHOD(New); 32 | static NAN_METHOD(Put); 33 | static NAN_METHOD(Del); 34 | static NAN_METHOD(Clear); 35 | static NAN_METHOD(Write); 36 | }; 37 | 38 | } // namespace flat_rocks 39 | 40 | #endif 41 | -------------------------------------------------------------------------------- /test/stack-blower.js: -------------------------------------------------------------------------------- 1 | /** 2 | * This test uses infinite recursion to test iterator creation with limited 3 | * stack space. In order to isolate the test harness, we run in a different 4 | * process. This is achieved through a fork() command in 5 | * iterator-recursion-test.js. To prevent tap from trying to run this test 6 | * directly, we check for a command-line argument. 7 | */ 8 | const testCommon = require('abstract-leveldown/testCommon') 9 | , leveldown = require('../') 10 | 11 | if (process.argv[2] == 'run') { 12 | testCommon.cleanup(function () { 13 | var db = leveldown(testCommon.location()) 14 | , depth = 0 15 | 16 | db.open(function () { 17 | function recurse() { 18 | db.iterator({ start: '0' }) 19 | depth++ 20 | recurse() 21 | } 22 | 23 | try { 24 | recurse() 25 | } catch (e) { 26 | process.send("Catchable error at depth " + depth) 27 | } 28 | }) 29 | }) 30 | } 31 | -------------------------------------------------------------------------------- /src/flatrocks_async.h: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2012-2016 LevelDOWN contributors 2 | * See list at 3 | * MIT License 4 | */ 5 | 6 | #ifndef LD_FLATROCKS_ASYNC_H 7 | #define LD_FLATROCKS_ASYNC_H 8 | 9 | #include 10 | 11 | #include "async.h" 12 | 13 | namespace flat_rocks { 14 | 15 | class DestroyWorker : public AsyncWorker { 16 | public: 17 | DestroyWorker ( 18 | Nan::Utf8String* location 19 | , Nan::Callback *callback 20 | ); 21 | 22 | virtual ~DestroyWorker (); 23 | virtual void Execute (); 24 | 25 | private: 26 | Nan::Utf8String* location; 27 | }; 28 | 29 | class RepairWorker : public AsyncWorker { 30 | public: 31 | RepairWorker ( 32 | Nan::Utf8String* location 33 | , Nan::Callback *callback 34 | ); 35 | 36 | virtual ~RepairWorker (); 37 | virtual void Execute (); 38 | 39 | private: 40 | Nan::Utf8String* location; 41 | }; 42 | 43 | } // namespace flat_rocks 44 | 45 | #endif 46 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | ===================== 3 | 4 | Copyright (c) 2016 John Manero 5 | --------------------------- 6 | 7 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 8 | 9 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 10 | 11 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 12 | -------------------------------------------------------------------------------- /src/common.h: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2012-2016 LevelDOWN contributors 2 | * See list at 3 | * MIT License 4 | */ 5 | 6 | #ifndef LD_COMMON_H 7 | #define LD_COMMON_H 8 | 9 | #include 10 | 11 | namespace flat_rocks { 12 | 13 | NAN_INLINE bool BooleanOptionValue(v8::Local options, 14 | const char* _key, 15 | bool def = false) { 16 | v8::Local key = Nan::New(_key).ToLocalChecked(); 17 | return !options.IsEmpty() 18 | && options->Has(key) 19 | ? options->Get(key)->BooleanValue() 20 | : def; 21 | } 22 | 23 | NAN_INLINE uint32_t UInt32OptionValue(v8::Local options, 24 | const char* _key, 25 | uint32_t def) { 26 | v8::Local key = Nan::New(_key).ToLocalChecked(); 27 | return !options.IsEmpty() 28 | && options->Has(key) 29 | && options->Get(key)->IsNumber() 30 | ? options->Get(key)->Uint32Value() 31 | : def; 32 | } 33 | 34 | } // namespace flat_rocks 35 | 36 | #endif 37 | -------------------------------------------------------------------------------- /binding.gyp: -------------------------------------------------------------------------------- 1 | { 2 | "targets": [{ 3 | "target_name": "flat-rocks", "conditions": [ 4 | ['OS == "linux"', { 5 | 'cflags': [ 6 | ], 'cflags!': ['-fno-tree-vrp'], 7 | "libraries": [ 8 | "/usr/lib/librocksdb.so" 9 | ] 10 | }], ['OS == "mac"', { 11 | 'xcode_settings': { 12 | 'WARNING_CFLAGS': [ 13 | '-Wno-ignored-qualifiers' 14 | ], 'OTHER_CPLUSPLUSFLAGS': [ 15 | '-mmacosx-version-min=10.7', '-stdlib=libc++' 16 | ], 'GCC_ENABLE_CPP_EXCEPTIONS': 'YES', 'MACOSX_DEPLOYMENT_TARGET': '10.7' 17 | }, "libraries": [ 18 | "librocksdb.dylib" 19 | ] 20 | }] 21 | ], "cflags": [ 22 | '-std=c++11' 23 | ], "include_dirs": [ 24 | " 3 | * MIT License 4 | */ 5 | 6 | #include 7 | 8 | #include "flatrocks.h" 9 | #include "flatrocks_async.h" 10 | 11 | namespace flat_rocks { 12 | 13 | /** DESTROY WORKER **/ 14 | 15 | DestroyWorker::DestroyWorker ( 16 | Nan::Utf8String* location 17 | , Nan::Callback *callback 18 | ) : AsyncWorker(NULL, callback) 19 | , location(location) 20 | {}; 21 | 22 | DestroyWorker::~DestroyWorker () { 23 | delete location; 24 | } 25 | 26 | void DestroyWorker::Execute () { 27 | rocksdb::Options options; 28 | SetStatus(rocksdb::DestroyDB(**location, options)); 29 | } 30 | 31 | /** REPAIR WORKER **/ 32 | 33 | RepairWorker::RepairWorker ( 34 | Nan::Utf8String* location 35 | , Nan::Callback *callback 36 | ) : AsyncWorker(NULL, callback) 37 | , location(location) 38 | {}; 39 | 40 | RepairWorker::~RepairWorker () { 41 | delete location; 42 | } 43 | 44 | void RepairWorker::Execute () { 45 | rocksdb::Options options; 46 | SetStatus(rocksdb::RepairDB(**location, options)); 47 | } 48 | 49 | } // namespace flat_rocks 50 | -------------------------------------------------------------------------------- /test/make.js: -------------------------------------------------------------------------------- 1 | const test = require('tape') 2 | , testCommon = require('abstract-leveldown/testCommon') 3 | , cleanup = testCommon.cleanup 4 | , location = testCommon.location 5 | , leveldown = require('../') 6 | 7 | function makeTest (name, testFn) { 8 | test(name, function (t) { 9 | cleanup(function () { 10 | var loc = location() 11 | , db = leveldown(loc) 12 | , done = function (close) { 13 | if (close === false) 14 | return cleanup(t.end.bind(t)) 15 | db.close(function (err) { 16 | t.notOk(err, 'no error from close()') 17 | cleanup(t.end.bind(t)) 18 | }) 19 | } 20 | db.open(function (err) { 21 | t.notOk(err, 'no error from open()') 22 | db.batch( 23 | [ 24 | { type: 'put', key: 'one', value: '1' } 25 | , { type: 'put', key: 'two', value: '2' } 26 | , { type: 'put', key: 'three', value: '3' } 27 | ] 28 | , function (err) { 29 | t.notOk(err, 'no error from batch()') 30 | testFn(db, t, done, loc) 31 | } 32 | ) 33 | }) 34 | }) 35 | }) 36 | } 37 | 38 | module.exports = makeTest 39 | -------------------------------------------------------------------------------- /src/iterator_async.h: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2012-2016 LevelDOWN contributors 2 | * See list at 3 | * MIT License 4 | */ 5 | 6 | #ifndef LD_ITERATOR_ASYNC_H 7 | #define LD_ITERATOR_ASYNC_H 8 | 9 | #include 10 | #include 11 | 12 | #include "async.h" 13 | #include "iterator.h" 14 | 15 | namespace flat_rocks { 16 | 17 | class NextWorker : public AsyncWorker { 18 | public: 19 | NextWorker ( 20 | Iterator* iterator 21 | , Nan::Callback *callback 22 | , void (*localCallback)(Iterator*) 23 | ); 24 | 25 | virtual ~NextWorker (); 26 | virtual void Execute (); 27 | virtual void HandleOKCallback (); 28 | 29 | private: 30 | Iterator* iterator; 31 | void (*localCallback)(Iterator*); 32 | std::vector > result; 33 | bool ok; 34 | }; 35 | 36 | class EndWorker : public AsyncWorker { 37 | public: 38 | EndWorker ( 39 | Iterator* iterator 40 | , Nan::Callback *callback 41 | ); 42 | 43 | virtual ~EndWorker (); 44 | virtual void Execute (); 45 | virtual void HandleOKCallback (); 46 | 47 | private: 48 | Iterator* iterator; 49 | }; 50 | 51 | } // namespace flat_rocks 52 | 53 | #endif 54 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "flat-rocks", 3 | "description": "A Node.js RocksDB binding for LevelUP", 4 | "version": "1.0.0", 5 | "author": "John Manero ", 6 | "repository": { 7 | "type": "git", 8 | "url": "https://github.com/jmanero/flat-rocks.git" 9 | }, 10 | "homepage": "https://github.com/jmanero/flat-rocks", 11 | "keywords": [ 12 | "rocksdb", 13 | "leveldown", 14 | "levelup", 15 | "level" 16 | ], 17 | "main": "flat-rocks.js", 18 | "dependencies": { 19 | "abstract-leveldown": "~2.6.1", 20 | "bindings": "~1.2.1", 21 | "fast-future": "~1.0.1", 22 | "nan": "~2.4.0", 23 | "prebuild": "^4.3.0" 24 | }, 25 | "devDependencies": { 26 | "async": "~2.1.2", 27 | "delayed": "~1.0.1", 28 | "du": "~0.1.0", 29 | "faucet": "0.0.1", 30 | "mkfiletree": "~1.0.1", 31 | "monotonic-timestamp": "~0.0.9", 32 | "node-uuid": "~1.4.7", 33 | "optimist": "~0.6.1", 34 | "readfiletree": "~0.0.1", 35 | "rimraf": "~2.5.4", 36 | "slump": "~2.0.1", 37 | "tape": "~4.6.2" 38 | }, 39 | "scripts": { 40 | "install": "prebuild --install", 41 | "test": "tape test/*-test.js | faucet", 42 | "rebuild": "prebuild --compile", 43 | "prebuild": "prebuild --all --strip --verbose" 44 | }, 45 | "license": "MIT", 46 | "gypfile": true 47 | } 48 | -------------------------------------------------------------------------------- /.jshintrc: -------------------------------------------------------------------------------- 1 | { 2 | "predef": [ ] 3 | , "bitwise": false 4 | , "camelcase": false 5 | , "curly": false 6 | , "eqeqeq": false 7 | , "forin": false 8 | , "immed": false 9 | , "latedef": false 10 | , "noarg": true 11 | , "noempty": true 12 | , "nonew": true 13 | , "plusplus": false 14 | , "quotmark": true 15 | , "regexp": false 16 | , "undef": true 17 | , "unused": true 18 | , "strict": false 19 | , "trailing": true 20 | , "maxlen": 120 21 | , "asi": true 22 | , "boss": true 23 | , "debug": true 24 | , "eqnull": true 25 | , "esnext": true 26 | , "evil": true 27 | , "expr": true 28 | , "funcscope": false 29 | , "globalstrict": false 30 | , "iterator": false 31 | , "lastsemic": true 32 | , "laxbreak": true 33 | , "laxcomma": true 34 | , "loopfunc": true 35 | , "multistr": false 36 | , "onecase": false 37 | , "proto": false 38 | , "regexdash": false 39 | , "scripturl": true 40 | , "smarttabs": false 41 | , "shadow": false 42 | , "sub": true 43 | , "supernew": false 44 | , "validthis": true 45 | , "browser": true 46 | , "couch": false 47 | , "devel": false 48 | , "dojo": false 49 | , "mootools": false 50 | , "node": true 51 | , "nonstandard": true 52 | , "prototypejs": false 53 | , "rhino": false 54 | , "worker": true 55 | , "wsh": false 56 | , "nomen": false 57 | , "onevar": true 58 | , "passfail": false 59 | } -------------------------------------------------------------------------------- /iterator.js: -------------------------------------------------------------------------------- 1 | const Util = require('util'); 2 | const AbstractIterator = require('abstract-leveldown').AbstractIterator; 3 | const fastFuture = require('fast-future'); 4 | 5 | function Iterator(db, options) { 6 | AbstractIterator.call(this, db); 7 | 8 | this.binding = db.binding.iterator(options); 9 | this.cache = null; 10 | this.finished = false; 11 | this.fastFuture = fastFuture(); 12 | } 13 | Util.inherits(Iterator, AbstractIterator); 14 | 15 | Iterator.prototype.seek = function(key) { 16 | if (typeof key !== 'string') 17 | throw new Error('seek requires a string key'); 18 | this.cache = null; 19 | this.binding.seek(key); 20 | }; 21 | 22 | Iterator.prototype._next = function(callback) { 23 | var that = this; 24 | var key, value; 25 | 26 | if (this.cache && this.cache.length) { 27 | key = this.cache.pop(); 28 | value = this.cache.pop(); 29 | 30 | this.fastFuture(function() { 31 | callback(null, key, value); 32 | }); 33 | 34 | } else if (this.finished) { 35 | this.fastFuture(function() { 36 | callback(); 37 | }); 38 | } else { 39 | this.binding.next(function(err, array, finished) { 40 | if (err) return callback(err); 41 | 42 | that.cache = array; 43 | that.finished = finished; 44 | that._next(callback); 45 | }); 46 | } 47 | 48 | return this; 49 | }; 50 | 51 | Iterator.prototype._end = function(callback) { 52 | delete this.cache; 53 | this.binding.end(callback); 54 | }; 55 | 56 | module.exports = Iterator; 57 | -------------------------------------------------------------------------------- /test/leak-tester.js: -------------------------------------------------------------------------------- 1 | const BUFFERS = false 2 | 3 | var leveldown = require('..') 4 | , crypto = require('crypto') 5 | , putCount = 0 6 | , getCount = 0 7 | , rssBase 8 | , db 9 | 10 | function run () { 11 | var key = 'long key to test memory usage ' + String(Math.floor(Math.random() * 10000000)) 12 | 13 | if (BUFFERS) key = new Buffer(key) 14 | 15 | db.get(key, function (err, value) { 16 | getCount++ 17 | 18 | if (err) { 19 | var putValue = crypto.randomBytes(1024) 20 | if (!BUFFERS) putValue = putValue.toString('hex') 21 | 22 | return db.put(key, putValue, function () { 23 | putCount++ 24 | process.nextTick(run) 25 | }) 26 | } 27 | 28 | process.nextTick(run) 29 | }) 30 | 31 | if (getCount % 1000 === 0) { 32 | if (typeof gc != 'undefined') 33 | gc() 34 | console.log( 35 | 'getCount =' 36 | , getCount 37 | , ', putCount = ' 38 | , putCount 39 | , ', rss =' 40 | , Math.round(process.memoryUsage().rss / rssBase * 100) + '%' 41 | , Math.round(process.memoryUsage().rss / 1024 / 1024) + 'M' 42 | , JSON.stringify([0,1,2,3,4,5,6].map(function (l) { 43 | return db.getProperty('rocksdb.num-files-at-level' + l) 44 | })) 45 | ) 46 | } 47 | } 48 | 49 | leveldown.destroy('./leakydb', function () { 50 | db = leveldown('./leakydb') 51 | db.open({ xcacheSize: 0, xmaxOpenFiles: 10 }, function () { 52 | rssBase = process.memoryUsage().rss 53 | run() 54 | }) 55 | }) -------------------------------------------------------------------------------- /bench/write-random.js: -------------------------------------------------------------------------------- 1 | const leveldown = require('../') 2 | , crypto = require('crypto') 3 | , fs = require('fs') 4 | , du = require('du') 5 | , uuid = require('node-uuid') 6 | 7 | , entryCount = 10000000 8 | , concurrency = 10 9 | , timesFile = './write_random_times.csv' 10 | , dbDir = './write_random.db' 11 | , data = crypto.randomBytes(256) // buffer 12 | 13 | var db = leveldown(dbDir) 14 | , timesStream = fs.createWriteStream(timesFile, 'utf8') 15 | 16 | function report (ms) { 17 | console.log('Wrote', entryCount, 'in', Math.floor(ms / 1000) + 's') 18 | timesStream.end() 19 | du(dbDir, function (err, size) { 20 | if (err) 21 | throw err 22 | console.log('Database size:', Math.floor(size / 1024 / 1024) + 'M') 23 | }) 24 | console.log('Wrote times to ', timesFile) 25 | } 26 | 27 | db.open(function (err) { 28 | if (err) 29 | throw err 30 | 31 | var inProgress = 0 32 | , totalWrites = 0 33 | , startTime = Date.now() 34 | , writeBuf = '' 35 | 36 | function write() { 37 | if (totalWrites % 100000 == 0) console.log(inProgress, totalWrites) 38 | 39 | if (totalWrites % 1000 == 0) { 40 | timesStream.write(writeBuf) 41 | writeBuf = '' 42 | } 43 | 44 | if (totalWrites++ == entryCount) 45 | return report(Date.now() - startTime) 46 | 47 | if (inProgress >= concurrency || totalWrites > entryCount) 48 | return 49 | 50 | var time = process.hrtime() 51 | inProgress++ 52 | 53 | db.put(uuid.v4(), data, function (err) { 54 | if (err) 55 | throw err 56 | writeBuf += (Date.now() - startTime) + ',' + process.hrtime(time)[1] + '\n' 57 | inProgress-- 58 | process.nextTick(write) 59 | }) 60 | 61 | process.nextTick(write) 62 | } 63 | 64 | write() 65 | }) 66 | -------------------------------------------------------------------------------- /test/cleanup-hanging-iterators-test.js: -------------------------------------------------------------------------------- 1 | const test = require('tape') 2 | , testCommon = require('abstract-leveldown/testCommon') 3 | , leveldown = require('../') 4 | , makeTest = require('./make') 5 | 6 | makeTest('test ended iterator', function (db, t, done) { 7 | // standard iterator with an end() properly called, easy 8 | 9 | var it = db.iterator({ keyAsBuffer: false, valueAsBuffer: false }) 10 | it.next(function (err, key, value) { 11 | t.notOk(err, 'no error from next()') 12 | t.equal(key, 'one', 'correct key') 13 | t.equal(value, '1', 'correct value') 14 | it.end(function (err) { 15 | t.notOk(err, 'no error from next()') 16 | done() 17 | }) 18 | }) 19 | }) 20 | 21 | makeTest('test non-ended iterator', function (db, t, done) { 22 | // no end() call on our iterator, cleanup should crash Node if not handled properly 23 | var it = db.iterator({ keyAsBuffer: false, valueAsBuffer: false }) 24 | it.next(function (err, key, value) { 25 | t.notOk(err, 'no error from next()') 26 | t.equal(key, 'one', 'correct key') 27 | t.equal(value, '1', 'correct value') 28 | done() 29 | }) 30 | }) 31 | 32 | makeTest('test multiple non-ended iterators', function (db, t, done) { 33 | // no end() call on our iterator, cleanup should crash Node if not handled properly 34 | db.iterator() 35 | db.iterator().next(function () {}) 36 | db.iterator().next(function () {}) 37 | db.iterator().next(function () {}) 38 | setTimeout(done, 50) 39 | }) 40 | 41 | makeTest('test ending iterators', function (db, t, done) { 42 | // at least one end() should be in progress when we try to close the db 43 | var it1 = db.iterator().next(function () { 44 | it1.end(function () {}) 45 | }) 46 | , it2 = db.iterator().next(function () { 47 | it2.end(function () {}) 48 | done() 49 | }) 50 | }) 51 | -------------------------------------------------------------------------------- /bench/write-sorted.js: -------------------------------------------------------------------------------- 1 | const leveldown = require('../') 2 | , timestamp = require('monotonic-timestamp') 3 | , crypto = require('crypto') 4 | , fs = require('fs') 5 | , du = require('du') 6 | 7 | , entryCount = 10000000 8 | , concurrency = 10 9 | , timesFile = './write_sorted_times.csv' 10 | , dbDir = './write_sorted.db' 11 | , data = crypto.randomBytes(256) // buffer 12 | 13 | var db = leveldown(dbDir) 14 | , timesStream = fs.createWriteStream(timesFile, 'utf8') 15 | 16 | function report (ms) { 17 | console.log('Wrote', entryCount, 'in', Math.floor(ms / 1000) + 's') 18 | timesStream.end() 19 | du(dbDir, function (err, size) { 20 | if (err) 21 | throw err 22 | console.log('Database size:', Math.floor(size / 1024 / 1024) + 'M') 23 | }) 24 | console.log('Wrote times to ', timesFile) 25 | } 26 | 27 | db.open({ errorIfExists: true, createIfMissing: true }, function (err) { 28 | if (err) 29 | throw err 30 | 31 | var inProgress = 0 32 | , totalWrites = 0 33 | , startTime = Date.now() 34 | , writeBuf = '' 35 | 36 | function write() { 37 | if (totalWrites % 100000 == 0) console.log(inProgress, totalWrites) 38 | 39 | if (totalWrites % 1000 == 0) { 40 | timesStream.write(writeBuf) 41 | writeBuf = '' 42 | } 43 | 44 | if (totalWrites++ == entryCount) 45 | return report(Date.now() - startTime) 46 | 47 | if (inProgress >= concurrency || totalWrites > entryCount) 48 | return 49 | 50 | var time = process.hrtime() 51 | inProgress++ 52 | 53 | db.put(timestamp(), data, function (err) { 54 | if (err) 55 | throw err 56 | writeBuf += (Date.now() - startTime) + ',' + process.hrtime(time)[1] + '\n' 57 | inProgress-- 58 | process.nextTick(write) 59 | }) 60 | 61 | process.nextTick(write) 62 | } 63 | 64 | write() 65 | }) 66 | -------------------------------------------------------------------------------- /test/getproperty-test.js: -------------------------------------------------------------------------------- 1 | const test = require('tape') 2 | , testCommon = require('abstract-leveldown/testCommon') 3 | , leveldown = require('../') 4 | 5 | var db 6 | 7 | test('setUp common', testCommon.setUp) 8 | 9 | test('setUp db', function (t) { 10 | db = leveldown(testCommon.location()) 11 | db.open(t.end.bind(t)) 12 | }) 13 | 14 | test('test argument-less getProperty() throws', function (t) { 15 | t.throws( 16 | db.getProperty.bind(db) 17 | , { name: 'Error', message: 'getProperty() requires a valid `property` argument' } 18 | , 'no-arg getProperty() throws' 19 | ) 20 | t.end() 21 | }) 22 | 23 | test('test non-string getProperty() throws', function (t) { 24 | t.throws( 25 | db.getProperty.bind(db, {}) 26 | , { name: 'Error', message: 'getProperty() requires a valid `property` argument' } 27 | , 'no-arg getProperty() throws' 28 | ) 29 | t.end() 30 | }) 31 | 32 | test('test invalid getProperty() returns empty string', function (t) { 33 | t.equal(db.getProperty('foo'), '', 'invalid property') 34 | t.equal(db.getProperty('rocksdb.foo'), '', 'invalid rocksdb.* property') 35 | t.end() 36 | }) 37 | 38 | test('test invalid getProperty("rocksdb.num-files-at-levelN") returns numbers', function (t) { 39 | for (var i = 0; i < 7; i++) 40 | t.equal(db.getProperty('rocksdb.num-files-at-level' + i), '0', '"rocksdb.num-files-at-levelN" === "0"') 41 | t.end() 42 | }) 43 | 44 | test('test invalid getProperty("rocksdb.stats")', function (t) { 45 | t.ok(db.getProperty('rocksdb.stats').split('\n').length > 3, 'rocksdb.stats has > 3 newlines') 46 | t.end() 47 | }) 48 | 49 | test('test invalid getProperty("rocksdb.sstables")', function (t) { 50 | var expected = [0,1,2,3,4,5,6].map(function (l) { return '--- level ' + l + ' ---' }).join('\n') + '\n' 51 | t.equal(db.getProperty('rocksdb.sstables'), expected, 'rocksdb.sstables') 52 | t.end() 53 | }) 54 | 55 | test('tearDown', function (t) { 56 | db.close(testCommon.tearDown.bind(null, t)) 57 | }) 58 | -------------------------------------------------------------------------------- /src/flatrocks.cc: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2012-2016 LevelDOWN contributors 2 | * See list at 3 | * MIT License 4 | */ 5 | 6 | #include 7 | 8 | #include "flatrocks.h" 9 | #include "database.h" 10 | #include "iterator.h" 11 | #include "batch.h" 12 | #include "flatrocks_async.h" 13 | 14 | namespace flat_rocks { 15 | 16 | NAN_METHOD(DestroyDB) { 17 | Nan::HandleScope scope; 18 | 19 | Nan::Utf8String* location = new Nan::Utf8String(info[0]); 20 | 21 | Nan::Callback* callback = new Nan::Callback( 22 | v8::Local::Cast(info[1])); 23 | 24 | DestroyWorker* worker = new DestroyWorker( 25 | location 26 | , callback 27 | ); 28 | 29 | Nan::AsyncQueueWorker(worker); 30 | 31 | info.GetReturnValue().SetUndefined(); 32 | } 33 | 34 | NAN_METHOD(RepairDB) { 35 | Nan::HandleScope scope; 36 | 37 | Nan::Utf8String* location = new Nan::Utf8String(info[0]); 38 | 39 | Nan::Callback* callback = new Nan::Callback( 40 | v8::Local::Cast(info[1])); 41 | 42 | RepairWorker* worker = new RepairWorker( 43 | location 44 | , callback 45 | ); 46 | 47 | Nan::AsyncQueueWorker(worker); 48 | 49 | info.GetReturnValue().SetUndefined(); 50 | } 51 | 52 | void Init (v8::Local target) { 53 | Database::Init(); 54 | flat_rocks::Iterator::Init(); 55 | flat_rocks::Batch::Init(); 56 | 57 | v8::Local flatrocks = 58 | Nan::New(FlatRocks)->GetFunction(); 59 | 60 | flatrocks->Set( 61 | Nan::New("destroy").ToLocalChecked() 62 | , Nan::New(DestroyDB)->GetFunction() 63 | ); 64 | 65 | flatrocks->Set( 66 | Nan::New("repair").ToLocalChecked() 67 | , Nan::New(RepairDB)->GetFunction() 68 | ); 69 | 70 | target->Set(Nan::New("flatrocks").ToLocalChecked(), flatrocks); 71 | } 72 | 73 | NODE_MODULE(flatrocks, Init) 74 | 75 | } // namespace flat_rocks 76 | -------------------------------------------------------------------------------- /test/iterator-recursion-test.js: -------------------------------------------------------------------------------- 1 | const test = require('tape') 2 | , testCommon = require('abstract-leveldown/testCommon') 3 | , leveldown = require('../') 4 | , child_process = require('child_process') 5 | 6 | var db 7 | , sourceData = (function () { 8 | var d = [] 9 | , i = 0 10 | , k 11 | for (; i < 100000; i++) { 12 | k = (i < 10 ? '0' : '') + i 13 | d.push({ 14 | type : 'put' 15 | , key : k 16 | , value : Math.random() 17 | }) 18 | } 19 | return d 20 | }()) 21 | 22 | test('setUp common', testCommon.setUp) 23 | 24 | test('setUp db', function (t) { 25 | db = leveldown(testCommon.location()) 26 | db.open(function () { 27 | db.batch(sourceData, t.end.bind(t)) 28 | }) 29 | }) 30 | 31 | test('try to create an iterator with a blown stack', function (t) { 32 | // Reducing the stack size down from the default 984 for the child node 33 | // process makes it easier to trigger the bug condition. But making it too low 34 | // causes the child process to die for other reasons. 35 | var opts = { execArgv: [ '--stack-size=128' ] } 36 | , child = child_process.fork(__dirname + '/stack-blower.js', [ 'run' ], opts) 37 | 38 | t.plan(2) 39 | 40 | child.on('message', function (m) { 41 | t.ok(true, m) 42 | child.disconnect() 43 | }) 44 | 45 | child.on('exit', function (code, sig) { 46 | t.equal(code, 0, 'child exited normally') 47 | }) 48 | }) 49 | 50 | test('iterate over a large iterator with a large watermark', function (t) { 51 | var iterator = db.iterator({ 52 | highWaterMark: 10000000 53 | }) 54 | , count = 0 55 | , read = function () { 56 | iterator.next(function () { 57 | count++ 58 | 59 | if (!arguments.length) 60 | t.end() 61 | else 62 | read() 63 | }) 64 | } 65 | 66 | read() 67 | }) 68 | 69 | test('tearDown', function (t) { 70 | db.close(testCommon.tearDown.bind(null, t)) 71 | }) 72 | -------------------------------------------------------------------------------- /test/repair-test.js: -------------------------------------------------------------------------------- 1 | const test = require('tape') 2 | , fs = require('fs') 3 | , path = require('path') 4 | , mkfiletree = require('mkfiletree') 5 | , readfiletree = require('readfiletree') 6 | , testCommon = require('abstract-leveldown/testCommon') 7 | , leveldown = require('../') 8 | , makeTest = require('./make') 9 | 10 | test('test argument-less repair() throws', function (t) { 11 | t.throws( 12 | leveldown.repair 13 | , { name: 'Error', message: 'repair() requires `location` and `callback` arguments' } 14 | , 'no-arg repair() throws' 15 | ) 16 | t.end() 17 | }) 18 | 19 | test('test callback-less, 1-arg, repair() throws', function (t) { 20 | t.throws( 21 | leveldown.repair.bind(null, 'foo') 22 | , { name: 'Error', message: 'repair() requires `location` and `callback` arguments' } 23 | , 'callback-less, 1-arg repair() throws' 24 | ) 25 | t.end() 26 | }) 27 | 28 | test('test repair non-existent directory returns error', function (t) { 29 | leveldown.repair('/1/2/3/4', function (err) { 30 | if (process.platform !== 'win32') 31 | t.ok(/no such file or directory/i.test(err), 'error on callback') 32 | else 33 | t.ok(/IO error/i.test(err), 'error on callback') 34 | t.end() 35 | }) 36 | }) 37 | 38 | // a proxy indicator that RepairDB is being called and doing its thing 39 | makeTest('test repair() compacts', function (db, t, done, location) { 40 | db.close(function (err) { 41 | t.notOk(err, 'no error') 42 | var files = fs.readdirSync(location) 43 | t.ok(files.some(function (f) { return (/\.log$/).test(f) }), 'directory contains log file(s)') 44 | t.notOk(files.some(function (f) { return (/\.ldb$/).test(f) }), 'directory does not contain ldb file(s)') 45 | leveldown.repair(location, function () { 46 | files = fs.readdirSync(location) 47 | t.notOk(files.some(function (f) { return (/\.log$/).test(f) }), 'directory does not contain log file(s)') 48 | t.ok(files.some(function (f) { return (/\.ldb$/).test(f) }), 'directory contains ldb file(s)') 49 | done(false) 50 | }) 51 | }) 52 | }) 53 | -------------------------------------------------------------------------------- /test/iterator-test.js: -------------------------------------------------------------------------------- 1 | const test = require('tape') 2 | , testCommon = require('abstract-leveldown/testCommon') 3 | , leveldown = require('../') 4 | , abstract = require('abstract-leveldown/abstract/iterator-test') 5 | , make = require('./make') 6 | 7 | abstract.all(leveldown, test, testCommon) 8 | 9 | make('iterator throws if key is not a string', function (db, t, done) { 10 | var ite = db.iterator() 11 | var error 12 | try { 13 | ite.seek() 14 | } catch (e) { 15 | error = e 16 | } 17 | 18 | t.ok(error, 'had error') 19 | t.end() 20 | }) 21 | 22 | make('iterator is seekable', function (db, t, done) { 23 | var ite = db.iterator() 24 | ite.seek('two') 25 | ite.next(function (err, key, value) { 26 | t.error(err, 'no error') 27 | t.same(key.toString(), 'two', 'key matches') 28 | t.same(value.toString(), '2', 'value matches') 29 | ite.next(function (err, key, value) { 30 | t.error(err, 'no error') 31 | t.same(key, undefined, 'end of iterator') 32 | t.same(value, undefined, 'end of iterator') 33 | ite.end(done) 34 | }) 35 | }) 36 | }) 37 | 38 | make('reverse seek in the middle', function (db, t, done) { 39 | var ite = db.iterator({reverse: true, limit: 1}) 40 | ite.seek('three!') 41 | ite.next(function (err, key, value) { 42 | t.error(err, 'no error') 43 | t.same(key.toString(), 'three', 'key matches') 44 | t.same(value.toString(), '3', 'value matches') 45 | ite.end(done) 46 | }) 47 | }) 48 | 49 | make('iterator invalid seek', function (db, t, done) { 50 | var ite = db.iterator() 51 | ite.seek('zzz') 52 | ite.next(function (err, key, value) { 53 | t.error(err, 'no error') 54 | t.same(key, undefined, 'end of iterator') 55 | t.same(value, undefined, 'end of iterator') 56 | ite.end(done) 57 | }) 58 | }) 59 | 60 | make('reverse seek from invalid range', function (db, t, done) { 61 | var ite = db.iterator({reverse: true}) 62 | ite.seek('zzz') 63 | ite.next(function (err, key, value) { 64 | t.error(err, 'no error') 65 | t.same(key.toString(), 'two', 'end of iterator') 66 | t.same(value.toString(), '2', 'end of iterator') 67 | ite.end(done) 68 | }) 69 | }) 70 | -------------------------------------------------------------------------------- /test/leak-tester-batch.js: -------------------------------------------------------------------------------- 1 | const BUFFERS = false 2 | , CHAINED = false 3 | 4 | var leveldown = require('..') 5 | , crypto = require('crypto') 6 | , assert = require('assert') 7 | , writeCount = 0 8 | , rssBase 9 | , db 10 | 11 | function print () { 12 | if (writeCount % 100 === 0) { 13 | if (typeof gc != 'undefined') 14 | gc() 15 | 16 | console.log( 17 | 'writeCount =' 18 | , writeCount 19 | , ', rss =' 20 | , Math.round(process.memoryUsage().rss / rssBase * 100) + '%' 21 | , Math.round(process.memoryUsage().rss / 1024 / 1024) + 'M' 22 | , JSON.stringify([0,1,2,3,4,5,6].map(function (l) { 23 | return db.getProperty('rocksdb.num-files-at-level' + l) 24 | })) 25 | ) 26 | } 27 | } 28 | 29 | var run = CHAINED 30 | ? function () { 31 | var batch = db.batch() 32 | , i = 0 33 | , key 34 | , value 35 | 36 | for (i = 0; i < 100; i++) { 37 | key = 'long key to test memory usage ' + String(Math.floor(Math.random() * 10000000)) 38 | if (BUFFERS) 39 | key = new Buffer(key) 40 | value = crypto.randomBytes(1024) 41 | if (!BUFFERS) 42 | value = value.toString('hex') 43 | batch.put(key, value) 44 | } 45 | 46 | batch.write(function (err) { 47 | assert(!err) 48 | process.nextTick(run) 49 | }) 50 | 51 | writeCount++ 52 | 53 | print() 54 | } 55 | : function () { 56 | var batch = [] 57 | , i 58 | , key 59 | , value 60 | 61 | for (i = 0; i < 100; i++) { 62 | key = 'long key to test memory usage ' + String(Math.floor(Math.random() * 10000000)) 63 | if (BUFFERS) 64 | key = new Buffer(key) 65 | value = crypto.randomBytes(1024) 66 | if (!BUFFERS) 67 | value = value.toString('hex') 68 | batch.push({ type: 'put', key: key, value: value }) 69 | } 70 | 71 | db.batch(batch, function (err) { 72 | assert(!err) 73 | process.nextTick(run) 74 | }) 75 | 76 | writeCount++ 77 | 78 | print() 79 | } 80 | 81 | leveldown.destroy('./leakydb', function () { 82 | db = leveldown('./leakydb') 83 | db.open({ xcacheSize: 0, xmaxOpenFiles: 10 }, function () { 84 | rssBase = process.memoryUsage().rss 85 | run() 86 | }) 87 | }) -------------------------------------------------------------------------------- /src/iterator.h: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2012-2016 LevelDOWN contributors 2 | * See list at 3 | * MIT License 4 | */ 5 | 6 | #ifndef LD_ITERATOR_H 7 | #define LD_ITERATOR_H 8 | 9 | #include 10 | #include 11 | #include 12 | 13 | #include "flatrocks.h" 14 | #include "database.h" 15 | #include "async.h" 16 | 17 | namespace flat_rocks { 18 | 19 | class Database; 20 | class AsyncWorker; 21 | 22 | class Iterator : public Nan::ObjectWrap { 23 | public: 24 | static void Init (); 25 | static v8::Local NewInstance ( 26 | v8::Local database 27 | , v8::Local id 28 | , v8::Local optionsObj 29 | ); 30 | 31 | Iterator ( 32 | Database* database 33 | , uint32_t id 34 | , rocksdb::Slice* start 35 | , std::string* end 36 | , bool reverse 37 | , bool keys 38 | , bool values 39 | , int limit 40 | , std::string* lt 41 | , std::string* lte 42 | , std::string* gt 43 | , std::string* gte 44 | , bool fillCache 45 | , bool keyAsBuffer 46 | , bool valueAsBuffer 47 | , v8::Local &startHandle 48 | , size_t highWaterMark 49 | ); 50 | 51 | ~Iterator (); 52 | 53 | bool IteratorNext (std::vector >& result); 54 | rocksdb::Status IteratorStatus (); 55 | void IteratorEnd (); 56 | void Release (); 57 | 58 | private: 59 | Database* database; 60 | uint32_t id; 61 | rocksdb::Iterator* dbIterator; 62 | rocksdb::ReadOptions* options; 63 | rocksdb::Slice* start; 64 | std::string* end; 65 | bool seeking; 66 | bool reverse; 67 | bool keys; 68 | bool values; 69 | int limit; 70 | std::string* lt; 71 | std::string* lte; 72 | std::string* gt; 73 | std::string* gte; 74 | int count; 75 | size_t highWaterMark; 76 | 77 | public: 78 | bool keyAsBuffer; 79 | bool valueAsBuffer; 80 | bool nexting; 81 | bool ended; 82 | AsyncWorker* endWorker; 83 | 84 | private: 85 | Nan::Persistent persistentHandle; 86 | 87 | bool Read (std::string& key, std::string& value); 88 | bool GetIterator (); 89 | 90 | static NAN_METHOD(New); 91 | static NAN_METHOD(Seek); 92 | static NAN_METHOD(Next); 93 | static NAN_METHOD(End); 94 | }; 95 | 96 | } // namespace flat_rocks 97 | 98 | #endif 99 | -------------------------------------------------------------------------------- /bench/db-bench-plot.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | gnuplot < 5 | _[Image © Copyright 2016, Facebook](http://rocksdb.org/)_ 6 | 7 | **Native LevelUP bindings to RocksDB** 8 | 9 | ## Installing RocksDB 10 | 11 | ``` 12 | apt-get update 13 | apt-get -q -y install build-essential python libgflags-dev libsnappy-dev zlib1g-dev libbz2-dev 14 | 15 | git clone git://github.com/facebook/rocksdb.git 16 | cd rocksdb 17 | 18 | make static_lib 19 | make shared_lib 20 | make install -e INSTALL_PATH=/usr 21 | ``` 22 | 23 | ## Building and Dependencies 24 | 25 | Flat Rocks dynamically links to librocksdb. To build this module, both the RocksDB headers and dynamic library must be in the system's build-path and library-path, respectively. 26 | 27 | Run `npm install` to fetch dependencies and compile bindings. 28 | 29 | ## API and Usage 30 | 31 | Flat Rocks is API compatible with LevelDOWN. See the [LevelDOWN documentation](https://github.com/Level/leveldown#leveldown) for details. 32 | 33 | ## Thanks 34 | 35 | This project is forked from [LevelDOWN](https://github.com/Level/leveldown), a native binding for LevelDB. It is API compatible, and remains largely the work of LevelDOWN's contributors: 36 | 37 | * Rod Vagg (https://github.com/rvagg), 38 | * John Chesley (https://github.com/chesles/), 39 | * Jake Verbaten (https://github.com/raynos), 40 | * Dominic Tarr (https://github.com/dominictarr), 41 | * Max Ogden (https://github.com/maxogden), 42 | * Lars-Magnus Skog (https://github.com/ralphtheninja), 43 | * David Björklund (https://github.com/kesla), 44 | * Julian Gruber (https://github.com/juliangruber), 45 | * Paolo Fragomeni (https://github.com/hij1nx), 46 | * Anton Whalley (https://github.com/No9), 47 | * Matteo Collina (https://github.com/mcollina), 48 | * Pedro Teixeira (https://github.com/pgte), 49 | * James Halliday (https://github.com/substack) 50 | 51 | ## License and Copyright 52 | 53 | Copyright © 2016 John Manero. 54 | 55 | **Flat Rocks** is released and distributed under terms of the MIT license. See the included LICENSE.md file for more details. 56 | 57 | **LevelDOWN** and **LevelUP** are the property of their respective contributers: 58 | 59 | * **LevelDOWN** is licensed under the MIT license. All rights not explicitly granted in the MIT license are reserved. See the [LevelDOWN project](https://github.com/Level/leveldown/blob/master/LICENSE.md) for more details. 60 | * **LevelUP** is licensed under the MIT license. All rights not explicitly granted in the MIT license are reserved. See the [LevelUP project](https://github.com/Level/levelup/blob/master/LICENSE.md) for more details. 61 | 62 | **RocksDB** is distributed under the terms of the BSD License. It is the property Facebook, Inc. See the [RocksDB project](https://github.com/facebook/rocksdb/blob/master/LICENSE) for more details. 63 | -------------------------------------------------------------------------------- /src/iterator_async.cc: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2012-2016 LevelDOWN contributors 2 | * See list at 3 | * MIT License 4 | */ 5 | 6 | #include 7 | #include 8 | 9 | #include "database.h" 10 | #include "flatrocks.h" 11 | #include "async.h" 12 | #include "iterator_async.h" 13 | 14 | namespace flat_rocks { 15 | 16 | /** NEXT-MULTI WORKER **/ 17 | 18 | NextWorker::NextWorker ( 19 | Iterator* iterator 20 | , Nan::Callback *callback 21 | , void (*localCallback)(Iterator*) 22 | ) : AsyncWorker(NULL, callback) 23 | , iterator(iterator) 24 | , localCallback(localCallback) 25 | {}; 26 | 27 | NextWorker::~NextWorker () {} 28 | 29 | void NextWorker::Execute () { 30 | ok = iterator->IteratorNext(result); 31 | if (!ok) 32 | SetStatus(iterator->IteratorStatus()); 33 | } 34 | 35 | void NextWorker::HandleOKCallback () { 36 | size_t idx = 0; 37 | 38 | size_t arraySize = result.size() * 2; 39 | v8::Local returnArray = Nan::New(arraySize); 40 | 41 | for(idx = 0; idx < result.size(); ++idx) { 42 | std::pair row = result[idx]; 43 | std::string key = row.first; 44 | std::string value = row.second; 45 | 46 | v8::Local returnKey; 47 | if (iterator->keyAsBuffer) { 48 | //TODO: use NewBuffer, see database_async.cc 49 | returnKey = Nan::CopyBuffer((char*)key.data(), key.size()).ToLocalChecked(); 50 | } else { 51 | returnKey = Nan::New((char*)key.data(), key.size()).ToLocalChecked(); 52 | } 53 | 54 | v8::Local returnValue; 55 | if (iterator->valueAsBuffer) { 56 | //TODO: use NewBuffer, see database_async.cc 57 | returnValue = Nan::CopyBuffer((char*)value.data(), value.size()).ToLocalChecked(); 58 | } else { 59 | returnValue = Nan::New((char*)value.data(), value.size()).ToLocalChecked(); 60 | } 61 | 62 | // put the key & value in a descending order, so that they can be .pop:ed in javascript-land 63 | returnArray->Set(Nan::New(static_cast(arraySize - idx * 2 - 1)), returnKey); 64 | returnArray->Set(Nan::New(static_cast(arraySize - idx * 2 - 2)), returnValue); 65 | } 66 | 67 | // clean up & handle the next/end state see iterator.cc/checkEndCallback 68 | localCallback(iterator); 69 | 70 | v8::Local argv[] = { 71 | Nan::Null() 72 | , returnArray 73 | // when ok === false all data has been read, so it's then finished 74 | , Nan::New(!ok) 75 | }; 76 | callback->Call(3, argv); 77 | } 78 | 79 | /** END WORKER **/ 80 | 81 | EndWorker::EndWorker ( 82 | Iterator* iterator 83 | , Nan::Callback *callback 84 | ) : AsyncWorker(NULL, callback) 85 | , iterator(iterator) 86 | {}; 87 | 88 | EndWorker::~EndWorker () { } 89 | 90 | void EndWorker::Execute () { 91 | iterator->IteratorEnd(); 92 | } 93 | 94 | void EndWorker::HandleOKCallback () { 95 | iterator->Release(); 96 | callback->Call(0, NULL); 97 | } 98 | 99 | } // namespace flat_rocks 100 | -------------------------------------------------------------------------------- /test/compression-test.js: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2012-2016 LevelUP contributors 2 | * See list at 3 | * MIT License 4 | */ 5 | 6 | var async = require('async') 7 | , du = require('du') 8 | , delayed = require('delayed') 9 | , common = require('abstract-leveldown/testCommon') 10 | , leveldown = require('../') 11 | , test = require('tape') 12 | 13 | , compressableData = new Buffer(Array.apply(null, Array(1024 * 100)).map(function () { return 'aaaaaaaaaa' }).join('')) 14 | , multiples = 10 15 | , dataSize = compressableData.length * multiples 16 | 17 | , verify = function (location, compression, t) { 18 | du(location, function (err, size) { 19 | t.error(err) 20 | if (compression) 21 | t.ok(size < dataSize, 'on-disk size (' + size + ') is less than data size (' + dataSize + ')') 22 | else 23 | t.ok(size >= dataSize, 'on-disk size (' + size + ') is greater than data size (' + dataSize + ')') 24 | t.end() 25 | }) 26 | } 27 | 28 | // close, open, close again.. 'compaction' is also performed on open()s 29 | , cycle = function (db, compression, t, callback) { 30 | var location = db.location 31 | db.close(function (err) { 32 | t.error(err) 33 | db = leveldown(location) 34 | db.open({ errorIfExists: false, compression: compression }, function () { 35 | t.error(err) 36 | db.close(function (err) { 37 | t.error(err) 38 | callback() 39 | }) 40 | }) 41 | }) 42 | } 43 | 44 | test('Compression', function (t) { 45 | t.test('set up', common.setUp) 46 | 47 | t.test('test data is compressed by default (db.put())', function (t) { 48 | var db = leveldown(common.location()) 49 | db.open(function (err) { 50 | t.error(err) 51 | async.forEach( 52 | Array.apply(null, Array(multiples)).map(function (e, i) { 53 | return [ i, compressableData ] 54 | }) 55 | , function (args, callback) { 56 | db.put.apply(db, args.concat([callback])) 57 | } 58 | , cycle.bind(null, db, true, t, delayed.delayed(verify.bind(null, db.location, true, t), 0.01)) 59 | ) 60 | }) 61 | }) 62 | 63 | t.test('test data is not compressed with compression=false on open() (db.put())', function (t) { 64 | var db = leveldown(common.location()) 65 | db.open({ compression: false }, function (err) { 66 | t.error(err) 67 | async.forEach( 68 | Array.apply(null, Array(multiples)).map(function (e, i) { 69 | return [ i, compressableData ] 70 | }) 71 | , function (args, callback) { 72 | db.put.apply(db, args.concat([callback])) 73 | } 74 | , cycle.bind(null, db, false, t, delayed.delayed(verify.bind(null, db.location, false, t), 0.01)) 75 | ) 76 | }) 77 | }) 78 | 79 | t.test('test data is compressed by default (db.batch())', function (t) { 80 | var db = leveldown(common.location()) 81 | db.open(function (err) { 82 | t.error(err) 83 | db.batch( 84 | Array.apply(null, Array(multiples)).map(function (e, i) { 85 | return { type: 'put', key: i, value: compressableData } 86 | }) 87 | , cycle.bind(null, db, false, t, delayed.delayed(verify.bind(null, db.location, false, t), 0.01)) 88 | ) 89 | }) 90 | }) 91 | }) 92 | 93 | -------------------------------------------------------------------------------- /src/database.h: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2012-2016 LevelDOWN contributors 2 | * See list at 3 | * MIT License 4 | */ 5 | 6 | #ifndef LD_DATABASE_H 7 | #define LD_DATABASE_H 8 | 9 | #include 10 | #include 11 | #include 12 | 13 | #include 14 | #include 15 | #include 16 | #include 17 | 18 | #include "flatrocks.h" 19 | #include "iterator.h" 20 | 21 | namespace flat_rocks { 22 | 23 | NAN_METHOD(FlatRocks); 24 | 25 | struct Reference { 26 | Nan::Persistent handle; 27 | rocksdb::Slice slice; 28 | 29 | Reference(v8::Local obj, rocksdb::Slice slice) : slice(slice) { 30 | v8::Local _obj = Nan::New(); 31 | _obj->Set(Nan::New("obj").ToLocalChecked(), obj); 32 | handle.Reset(_obj); 33 | }; 34 | }; 35 | 36 | static inline void ClearReferences (std::vector *references) { 37 | for (std::vector::iterator it = references->begin() 38 | ; it != references->end() 39 | ; ) { 40 | DisposeStringOrBufferFromSlice((*it)->handle, (*it)->slice); 41 | it = references->erase(it); 42 | } 43 | delete references; 44 | } 45 | 46 | class Database : public Nan::ObjectWrap { 47 | public: 48 | static void Init (); 49 | static v8::Local NewInstance (v8::Local &location); 50 | 51 | rocksdb::Status OpenDatabase (rocksdb::Options* options); 52 | rocksdb::Status PutToDatabase ( 53 | rocksdb::WriteOptions* options 54 | , rocksdb::Slice key 55 | , rocksdb::Slice value 56 | ); 57 | rocksdb::Status GetFromDatabase ( 58 | rocksdb::ReadOptions* options 59 | , rocksdb::Slice key 60 | , std::string& value 61 | ); 62 | rocksdb::Status DeleteFromDatabase ( 63 | rocksdb::WriteOptions* options 64 | , rocksdb::Slice key 65 | ); 66 | rocksdb::Status WriteBatchToDatabase ( 67 | rocksdb::WriteOptions* options 68 | , rocksdb::WriteBatch* batch 69 | ); 70 | uint64_t ApproximateSizeFromDatabase (const rocksdb::Range* range); 71 | void GetPropertyFromDatabase (const rocksdb::Slice& property, std::string* value); 72 | rocksdb::Iterator* NewIterator (rocksdb::ReadOptions* options); 73 | const rocksdb::Snapshot* NewSnapshot (); 74 | void ReleaseSnapshot (const rocksdb::Snapshot* snapshot); 75 | void CloseDatabase (); 76 | void ReleaseIterator (uint32_t id); 77 | 78 | Database (const v8::Local& from); 79 | ~Database (); 80 | 81 | private: 82 | Nan::Utf8String* location; 83 | rocksdb::DB* db; 84 | uint32_t currentIteratorId; 85 | void(*pendingCloseWorker); 86 | std::shared_ptr blockCache; 87 | std::shared_ptr filterPolicy; 88 | 89 | std::map< uint32_t, flat_rocks::Iterator * > iterators; 90 | 91 | static void WriteDoing(uv_work_t *req); 92 | static void WriteAfter(uv_work_t *req); 93 | 94 | static NAN_METHOD(New); 95 | static NAN_METHOD(Open); 96 | static NAN_METHOD(Close); 97 | static NAN_METHOD(Put); 98 | static NAN_METHOD(Delete); 99 | static NAN_METHOD(Get); 100 | static NAN_METHOD(Batch); 101 | static NAN_METHOD(Write); 102 | static NAN_METHOD(Iterator); 103 | static NAN_METHOD(ApproximateSize); 104 | static NAN_METHOD(GetProperty); 105 | }; 106 | 107 | } // namespace flat_rocks 108 | 109 | #endif 110 | -------------------------------------------------------------------------------- /bench/db-bench.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | const leveldown = require('../') 4 | , fs = require('fs') 5 | , du = require('du') 6 | , rimraf = require('rimraf') 7 | 8 | , argv = require('optimist').argv 9 | 10 | , options = { 11 | benchmark : argv.benchmark 12 | , useExisting : argv.use_existing 13 | , db : argv.db || __dirname + '/db' 14 | , num : argv.num || 1000000 15 | , concurrency : argv.concurrency || 4 16 | , cacheSize : argv.cacheSize || 8 17 | , writeBufferSize : argv.writeBufferSize || 4 18 | , valueSize : argv.valueSize || 100 19 | , timingOutput : argv.timingOutput || __dirname + '/timingOutput' 20 | , throughputOutput : argv.throughputOutput 21 | } 22 | 23 | , randomString = require('slump').string 24 | , keyTmpl = '0000000000000000' 25 | 26 | if (!options.useExisting) { 27 | leveldown.destroy(options.db, function () {}) 28 | } 29 | 30 | var db = leveldown(options.db) 31 | , timesStream = fs.createWriteStream(options.timingOutput, 'utf8') 32 | 33 | function make16CharPaddedKey () { 34 | var r = Math.floor(Math.random() * options.num) 35 | , k = keyTmpl + r 36 | return k.substr(k.length - 16) 37 | } 38 | 39 | timesStream.write('Elapsed (ms), Entries, Bytes, Last 1000 Avg Time, MB/s\n') 40 | 41 | function start () { 42 | var inProgress = 0 43 | , totalWrites = 0 44 | , totalBytes = 0 45 | , startTime = Date.now() 46 | , timesAccum = 0 47 | , writeBuf = '' 48 | , elapsed 49 | 50 | function report () { 51 | console.log( 52 | 'Wrote' 53 | , options.num 54 | , 'entries in' 55 | , Math.floor((Date.now() - startTime) / 1000) + 's,' 56 | , (Math.floor((totalBytes / 1048576) * 100) / 100) + 'MB' 57 | ) 58 | timesStream.end() 59 | 60 | du(options.db, function (err, size) { 61 | if (err) 62 | throw err 63 | console.log('Database size:', Math.floor(size / 1024 / 1024) + 'M') 64 | }) 65 | } 66 | 67 | 68 | function write () { 69 | if (totalWrites++ == options.num) { 70 | db.close(function () { 71 | report(Date.now() - startTime) 72 | }) 73 | } 74 | if (inProgress >= options.concurrency || totalWrites > options.num) 75 | return 76 | 77 | inProgress++ 78 | 79 | if (totalWrites % 100000 === 0) 80 | console.log('' + inProgress, totalWrites, Math.round(totalWrites / options.num * 100) + '%') 81 | 82 | if (totalWrites % 1000 === 0) { 83 | elapsed = Date.now() - startTime 84 | timesStream.write( 85 | elapsed 86 | + ',' + totalWrites 87 | + ',' + totalBytes 88 | + ',' + Math.floor(timesAccum / 1000) 89 | + ',' + (Math.floor(((totalBytes / 1048576) / (elapsed / 1000)) * 100) / 100) 90 | + '\n') 91 | timesAccum = 0 92 | } 93 | 94 | var time = process.hrtime() 95 | 96 | db.put(make16CharPaddedKey(), randomString({ length: options.valueSize }), function (err) { 97 | if (err) 98 | throw err 99 | 100 | totalBytes += keyTmpl.length + options.valueSize 101 | timesAccum += process.hrtime(time)[1] 102 | inProgress-- 103 | process.nextTick(write) 104 | }) 105 | } 106 | 107 | for (var i = 0; i < options.concurrency; i++) 108 | write() 109 | } 110 | 111 | setTimeout(function () { 112 | db.open({ 113 | errorIfExists : false 114 | , createIfMissing : true 115 | , cacheSize : options.cacheSize << 20 116 | , writeBufferSize : options.writeBufferSize << 20 117 | }, function (err) { 118 | if (err) 119 | throw err 120 | 121 | start() 122 | 123 | }) 124 | }, 500) 125 | -------------------------------------------------------------------------------- /src/database_async.h: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2012-2016 LevelDOWN contributors 2 | * See list at 3 | * MIT License 4 | */ 5 | 6 | #ifndef LD_DATABASE_ASYNC_H 7 | #define LD_DATABASE_ASYNC_H 8 | 9 | #include 10 | #include 11 | 12 | #include 13 | 14 | #include "async.h" 15 | 16 | namespace flat_rocks { 17 | 18 | class OpenWorker : public AsyncWorker { 19 | public: 20 | OpenWorker ( 21 | Database *database 22 | , Nan::Callback *callback 23 | , std::shared_ptr blockCache 24 | , std::shared_ptr filterPolicy 25 | , bool createIfMissing 26 | , bool errorIfExists 27 | , bool compression 28 | , uint32_t writeBufferSize 29 | , uint32_t blockSize 30 | , uint32_t maxOpenFiles 31 | , uint32_t blockRestartInterval 32 | ); 33 | 34 | virtual ~OpenWorker (); 35 | virtual void Execute (); 36 | 37 | private: 38 | rocksdb::Options* options; 39 | }; 40 | 41 | class CloseWorker : public AsyncWorker { 42 | public: 43 | CloseWorker ( 44 | Database *database 45 | , Nan::Callback *callback 46 | ); 47 | 48 | virtual ~CloseWorker (); 49 | virtual void Execute (); 50 | virtual void WorkComplete (); 51 | }; 52 | 53 | class IOWorker : public AsyncWorker { 54 | public: 55 | IOWorker ( 56 | Database *database 57 | , Nan::Callback *callback 58 | , rocksdb::Slice key 59 | , v8::Local &keyHandle 60 | ); 61 | 62 | virtual ~IOWorker (); 63 | virtual void WorkComplete (); 64 | 65 | protected: 66 | rocksdb::Slice key; 67 | }; 68 | 69 | class ReadWorker : public IOWorker { 70 | public: 71 | ReadWorker ( 72 | Database *database 73 | , Nan::Callback *callback 74 | , rocksdb::Slice key 75 | , bool asBuffer 76 | , bool fillCache 77 | , v8::Local &keyHandle 78 | ); 79 | 80 | virtual ~ReadWorker (); 81 | virtual void Execute (); 82 | virtual void HandleOKCallback (); 83 | 84 | private: 85 | bool asBuffer; 86 | rocksdb::ReadOptions* options; 87 | std::string value; 88 | }; 89 | 90 | class DeleteWorker : public IOWorker { 91 | public: 92 | DeleteWorker ( 93 | Database *database 94 | , Nan::Callback *callback 95 | , rocksdb::Slice key 96 | , bool sync 97 | , v8::Local &keyHandle 98 | ); 99 | 100 | virtual ~DeleteWorker (); 101 | virtual void Execute (); 102 | 103 | protected: 104 | rocksdb::WriteOptions* options; 105 | }; 106 | 107 | class WriteWorker : public DeleteWorker { 108 | public: 109 | WriteWorker ( 110 | Database *database 111 | , Nan::Callback *callback 112 | , rocksdb::Slice key 113 | , rocksdb::Slice value 114 | , bool sync 115 | , v8::Local &keyHandle 116 | , v8::Local &valueHandle 117 | ); 118 | 119 | virtual ~WriteWorker (); 120 | virtual void Execute (); 121 | virtual void WorkComplete (); 122 | 123 | private: 124 | rocksdb::Slice value; 125 | }; 126 | 127 | class BatchWorker : public AsyncWorker { 128 | public: 129 | BatchWorker ( 130 | Database *database 131 | , Nan::Callback *callback 132 | , rocksdb::WriteBatch* batch 133 | , bool sync 134 | ); 135 | 136 | virtual ~BatchWorker (); 137 | virtual void Execute (); 138 | 139 | private: 140 | rocksdb::WriteOptions* options; 141 | rocksdb::WriteBatch* batch; 142 | }; 143 | 144 | class ApproximateSizeWorker : public AsyncWorker { 145 | public: 146 | ApproximateSizeWorker ( 147 | Database *database 148 | , Nan::Callback *callback 149 | , rocksdb::Slice start 150 | , rocksdb::Slice end 151 | , v8::Local &startHandle 152 | , v8::Local &endHandle 153 | ); 154 | 155 | virtual ~ApproximateSizeWorker (); 156 | virtual void Execute (); 157 | virtual void HandleOKCallback (); 158 | virtual void WorkComplete (); 159 | 160 | private: 161 | rocksdb::Range range; 162 | uint64_t size; 163 | }; 164 | 165 | } // namespace flat_rocks 166 | 167 | #endif 168 | -------------------------------------------------------------------------------- /src/batch.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include "database.h" 6 | #include "batch_async.h" 7 | #include "batch.h" 8 | #include "common.h" 9 | 10 | namespace flat_rocks { 11 | 12 | static Nan::Persistent batch_constructor; 13 | 14 | Batch::Batch (flat_rocks::Database* database, bool sync) : database(database) { 15 | options = new rocksdb::WriteOptions(); 16 | options->sync = sync; 17 | batch = new rocksdb::WriteBatch(); 18 | hasData = false; 19 | } 20 | 21 | Batch::~Batch () { 22 | delete options; 23 | delete batch; 24 | } 25 | 26 | rocksdb::Status Batch::Write () { 27 | return database->WriteBatchToDatabase(options, batch); 28 | } 29 | 30 | void Batch::Init () { 31 | v8::Local tpl = Nan::New(Batch::New); 32 | batch_constructor.Reset(tpl); 33 | tpl->SetClassName(Nan::New("Batch").ToLocalChecked()); 34 | tpl->InstanceTemplate()->SetInternalFieldCount(1); 35 | Nan::SetPrototypeMethod(tpl, "put", Batch::Put); 36 | Nan::SetPrototypeMethod(tpl, "del", Batch::Del); 37 | Nan::SetPrototypeMethod(tpl, "clear", Batch::Clear); 38 | Nan::SetPrototypeMethod(tpl, "write", Batch::Write); 39 | } 40 | 41 | NAN_METHOD(Batch::New) { 42 | Database* database = Nan::ObjectWrap::Unwrap(info[0]->ToObject()); 43 | v8::Local optionsObj; 44 | 45 | if (info.Length() > 1 && info[1]->IsObject()) { 46 | optionsObj = v8::Local::Cast(info[1]); 47 | } 48 | 49 | bool sync = BooleanOptionValue(optionsObj, "sync"); 50 | 51 | Batch* batch = new Batch(database, sync); 52 | batch->Wrap(info.This()); 53 | 54 | info.GetReturnValue().Set(info.This()); 55 | } 56 | 57 | v8::Local Batch::NewInstance ( 58 | v8::Local database 59 | , v8::Local optionsObj 60 | ) { 61 | 62 | Nan::EscapableHandleScope scope; 63 | 64 | v8::Local instance; 65 | 66 | v8::Local constructorHandle = 67 | Nan::New(batch_constructor); 68 | 69 | if (optionsObj.IsEmpty()) { 70 | v8::Local argv[1] = { database }; 71 | instance = constructorHandle->GetFunction()->NewInstance(1, argv); 72 | } else { 73 | v8::Local argv[2] = { database, optionsObj }; 74 | instance = constructorHandle->GetFunction()->NewInstance(2, argv); 75 | } 76 | 77 | return scope.Escape(instance); 78 | } 79 | 80 | NAN_METHOD(Batch::Put) { 81 | Batch* batch = ObjectWrap::Unwrap(info.Holder()); 82 | v8::Local callback; // purely for the error macros 83 | 84 | v8::Local keyBuffer = info[0]; 85 | v8::Local valueBuffer = info[1]; 86 | LD_STRING_OR_BUFFER_TO_SLICE(key, keyBuffer, key) 87 | LD_STRING_OR_BUFFER_TO_SLICE(value, valueBuffer, value) 88 | 89 | batch->batch->Put(key, value); 90 | if (!batch->hasData) 91 | batch->hasData = true; 92 | 93 | DisposeStringOrBufferFromSlice(keyBuffer, key); 94 | DisposeStringOrBufferFromSlice(valueBuffer, value); 95 | 96 | info.GetReturnValue().Set(info.Holder()); 97 | } 98 | 99 | NAN_METHOD(Batch::Del) { 100 | Batch* batch = ObjectWrap::Unwrap(info.Holder()); 101 | 102 | v8::Local callback; // purely for the error macros 103 | 104 | v8::Local keyBuffer = info[0]; 105 | LD_STRING_OR_BUFFER_TO_SLICE(key, keyBuffer, key) 106 | 107 | batch->batch->Delete(key); 108 | if (!batch->hasData) 109 | batch->hasData = true; 110 | 111 | DisposeStringOrBufferFromSlice(keyBuffer, key); 112 | 113 | info.GetReturnValue().Set(info.Holder()); 114 | } 115 | 116 | NAN_METHOD(Batch::Clear) { 117 | Batch* batch = ObjectWrap::Unwrap(info.Holder()); 118 | 119 | batch->batch->Clear(); 120 | batch->hasData = false; 121 | 122 | info.GetReturnValue().Set(info.Holder()); 123 | } 124 | 125 | NAN_METHOD(Batch::Write) { 126 | Batch* batch = ObjectWrap::Unwrap(info.Holder()); 127 | 128 | if (batch->hasData) { 129 | Nan::Callback *callback = 130 | new Nan::Callback(v8::Local::Cast(info[0])); 131 | BatchWriteWorker* worker = new BatchWriteWorker(batch, callback); 132 | // persist to prevent accidental GC 133 | v8::Local _this = info.This(); 134 | worker->SaveToPersistent("batch", _this); 135 | Nan::AsyncQueueWorker(worker); 136 | } else { 137 | LD_RUN_CALLBACK(v8::Local::Cast(info[0]), 0, NULL); 138 | } 139 | } 140 | 141 | } // namespace flat_rocks 142 | -------------------------------------------------------------------------------- /src/flatrocks.h: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2012-2016 LevelDOWN contributors 2 | * See list at 3 | * MIT License 4 | */ 5 | #ifndef LD_FLATROCKS_H 6 | #define LD_FLATROCKS_H 7 | 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | static inline size_t StringOrBufferLength(v8::Local obj) { 14 | Nan::HandleScope scope; 15 | 16 | return (!obj->ToObject().IsEmpty() 17 | && node::Buffer::HasInstance(obj->ToObject())) 18 | ? node::Buffer::Length(obj->ToObject()) 19 | : obj->ToString()->Utf8Length(); 20 | } 21 | 22 | // NOTE: this MUST be called on objects created by 23 | // LD_STRING_OR_BUFFER_TO_SLICE 24 | static inline void DisposeStringOrBufferFromSlice( 25 | Nan::Persistent &handle 26 | , rocksdb::Slice slice) { 27 | Nan::HandleScope scope; 28 | 29 | if (!slice.empty()) { 30 | v8::Local obj = Nan::New(handle)->Get(Nan::New("obj").ToLocalChecked()); 31 | if (!node::Buffer::HasInstance(obj)) 32 | delete[] slice.data(); 33 | } 34 | 35 | handle.Reset(); 36 | } 37 | 38 | static inline void DisposeStringOrBufferFromSlice( 39 | v8::Local handle 40 | , rocksdb::Slice slice) { 41 | 42 | if (!slice.empty() && !node::Buffer::HasInstance(handle)) 43 | delete[] slice.data(); 44 | } 45 | 46 | // NOTE: must call DisposeStringOrBufferFromSlice() on objects created here 47 | #define LD_STRING_OR_BUFFER_TO_SLICE(to, from, name) \ 48 | size_t to ## Sz_; \ 49 | char* to ## Ch_; \ 50 | if (from->IsNull() || from->IsUndefined()) { \ 51 | to ## Sz_ = 0; \ 52 | to ## Ch_ = 0; \ 53 | } else if (!from->ToObject().IsEmpty() \ 54 | && node::Buffer::HasInstance(from->ToObject())) { \ 55 | to ## Sz_ = node::Buffer::Length(from->ToObject()); \ 56 | to ## Ch_ = node::Buffer::Data(from->ToObject()); \ 57 | } else { \ 58 | v8::Local to ## Str = from->ToString(); \ 59 | to ## Sz_ = to ## Str->Utf8Length(); \ 60 | to ## Ch_ = new char[to ## Sz_]; \ 61 | to ## Str->WriteUtf8( \ 62 | to ## Ch_ \ 63 | , -1 \ 64 | , NULL, v8::String::NO_NULL_TERMINATION \ 65 | ); \ 66 | } \ 67 | rocksdb::Slice to(to ## Ch_, to ## Sz_); 68 | 69 | #define LD_RETURN_CALLBACK_OR_ERROR(callback, msg) \ 70 | if (!callback.IsEmpty() && callback->IsFunction()) { \ 71 | v8::Local argv[] = { \ 72 | Nan::Error(msg) \ 73 | }; \ 74 | LD_RUN_CALLBACK(callback, 1, argv) \ 75 | info.GetReturnValue().SetUndefined(); \ 76 | return; \ 77 | } \ 78 | return Nan::ThrowError(msg); 79 | 80 | #define LD_RUN_CALLBACK(callback, argc, argv) \ 81 | Nan::MakeCallback( \ 82 | Nan::GetCurrentContext()->Global(), callback, argc, argv); 83 | 84 | /* LD_METHOD_SETUP_COMMON setup the following objects: 85 | * - Database* database 86 | * - v8::Local optionsObj (may be empty) 87 | * - Nan::Persistent callback (won't be empty) 88 | * Will throw/return if there isn't a callback in arg 0 or 1 89 | */ 90 | #define LD_METHOD_SETUP_COMMON(name, optionPos, callbackPos) \ 91 | if (info.Length() == 0) \ 92 | return Nan::ThrowError(#name "() requires a callback argument"); \ 93 | flat_rocks::Database* database = \ 94 | Nan::ObjectWrap::Unwrap(info.This()); \ 95 | v8::Local optionsObj; \ 96 | v8::Local callback; \ 97 | if (optionPos == -1 && info[callbackPos]->IsFunction()) { \ 98 | callback = info[callbackPos].As(); \ 99 | } else if (optionPos != -1 && info[callbackPos - 1]->IsFunction()) { \ 100 | callback = info[callbackPos - 1].As(); \ 101 | } else if (optionPos != -1 \ 102 | && info[optionPos]->IsObject() \ 103 | && info[callbackPos]->IsFunction()) { \ 104 | optionsObj = info[optionPos].As(); \ 105 | callback = info[callbackPos].As(); \ 106 | } else { \ 107 | return Nan::ThrowError(#name "() requires a callback argument"); \ 108 | } 109 | 110 | #define LD_METHOD_SETUP_COMMON_ONEARG(name) LD_METHOD_SETUP_COMMON(name, -1, 0) 111 | 112 | #endif 113 | -------------------------------------------------------------------------------- /src/database_async.cc: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2012-2016 LevelDOWN contributors 2 | * See list at 3 | * MIT License 4 | */ 5 | 6 | #include 7 | #include 8 | 9 | #include 10 | #include 11 | #include 12 | 13 | #include "database.h" 14 | #include "flatrocks.h" 15 | #include "async.h" 16 | #include "database_async.h" 17 | 18 | namespace flat_rocks { 19 | 20 | /** OPEN WORKER **/ 21 | 22 | OpenWorker::OpenWorker ( 23 | Database *database 24 | , Nan::Callback *callback 25 | , std::shared_ptr blockCache 26 | , std::shared_ptr filterPolicy 27 | , bool createIfMissing 28 | , bool errorIfExists 29 | , bool compression 30 | , uint32_t writeBufferSize 31 | , uint32_t blockSize 32 | , uint32_t maxOpenFiles 33 | , uint32_t blockRestartInterval 34 | ) : AsyncWorker(database, callback) 35 | { 36 | options = new rocksdb::Options(); 37 | rocksdb::BlockBasedTableOptions table_options; 38 | 39 | table_options.block_cache = blockCache; 40 | table_options.filter_policy = filterPolicy; 41 | table_options.block_size = blockSize; 42 | table_options.block_restart_interval = blockRestartInterval; 43 | 44 | options->create_if_missing = createIfMissing; 45 | options->error_if_exists = errorIfExists; 46 | 47 | options->compression = compression 48 | ? rocksdb::kSnappyCompression 49 | : rocksdb::kNoCompression; 50 | 51 | options->write_buffer_size = writeBufferSize; 52 | options->max_open_files = maxOpenFiles; 53 | 54 | options->table_factory.reset(rocksdb::NewBlockBasedTableFactory(table_options)); 55 | }; 56 | 57 | OpenWorker::~OpenWorker () { 58 | delete options; 59 | } 60 | 61 | void OpenWorker::Execute () { 62 | SetStatus(database->OpenDatabase(options)); 63 | } 64 | 65 | /** CLOSE WORKER **/ 66 | 67 | CloseWorker::CloseWorker ( 68 | Database *database 69 | , Nan::Callback *callback 70 | ) : AsyncWorker(database, callback) 71 | {}; 72 | 73 | CloseWorker::~CloseWorker () {} 74 | 75 | void CloseWorker::Execute () { 76 | database->CloseDatabase(); 77 | } 78 | 79 | void CloseWorker::WorkComplete () { 80 | Nan::HandleScope scope; 81 | HandleOKCallback(); 82 | delete callback; 83 | callback = NULL; 84 | } 85 | 86 | /** IO WORKER (abstract) **/ 87 | 88 | IOWorker::IOWorker ( 89 | Database *database 90 | , Nan::Callback *callback 91 | , rocksdb::Slice key 92 | , v8::Local &keyHandle 93 | ) : AsyncWorker(database, callback) 94 | , key(key) 95 | { 96 | Nan::HandleScope scope; 97 | 98 | SaveToPersistent("key", keyHandle); 99 | }; 100 | 101 | IOWorker::~IOWorker () {} 102 | 103 | void IOWorker::WorkComplete () { 104 | Nan::HandleScope scope; 105 | 106 | DisposeStringOrBufferFromSlice(GetFromPersistent("key"), key); 107 | AsyncWorker::WorkComplete(); 108 | } 109 | 110 | /** READ WORKER **/ 111 | 112 | ReadWorker::ReadWorker ( 113 | Database *database 114 | , Nan::Callback *callback 115 | , rocksdb::Slice key 116 | , bool asBuffer 117 | , bool fillCache 118 | , v8::Local &keyHandle 119 | ) : IOWorker(database, callback, key, keyHandle) 120 | , asBuffer(asBuffer) 121 | { 122 | Nan::HandleScope scope; 123 | 124 | options = new rocksdb::ReadOptions(); 125 | options->fill_cache = fillCache; 126 | SaveToPersistent("key", keyHandle); 127 | }; 128 | 129 | ReadWorker::~ReadWorker () { 130 | delete options; 131 | } 132 | 133 | void ReadWorker::Execute () { 134 | SetStatus(database->GetFromDatabase(options, key, value)); 135 | } 136 | 137 | void ReadWorker::HandleOKCallback () { 138 | Nan::HandleScope scope; 139 | 140 | v8::Local returnValue; 141 | if (asBuffer) { 142 | //TODO: could use NewBuffer if we carefully manage the lifecycle of `value` 143 | //and avoid an an extra allocation. We'd have to clean up properly when not OK 144 | //and let the new Buffer manage the data when OK 145 | returnValue = Nan::CopyBuffer((char*)value.data(), value.size()).ToLocalChecked(); 146 | } else { 147 | returnValue = Nan::New((char*)value.data(), value.size()).ToLocalChecked(); 148 | } 149 | v8::Local argv[] = { 150 | Nan::Null() 151 | , returnValue 152 | }; 153 | callback->Call(2, argv); 154 | } 155 | 156 | /** DELETE WORKER **/ 157 | 158 | DeleteWorker::DeleteWorker ( 159 | Database *database 160 | , Nan::Callback *callback 161 | , rocksdb::Slice key 162 | , bool sync 163 | , v8::Local &keyHandle 164 | ) : IOWorker(database, callback, key, keyHandle) 165 | { 166 | Nan::HandleScope scope; 167 | 168 | options = new rocksdb::WriteOptions(); 169 | options->sync = sync; 170 | SaveToPersistent("key", keyHandle); 171 | }; 172 | 173 | DeleteWorker::~DeleteWorker () { 174 | delete options; 175 | } 176 | 177 | void DeleteWorker::Execute () { 178 | SetStatus(database->DeleteFromDatabase(options, key)); 179 | } 180 | 181 | /** WRITE WORKER **/ 182 | 183 | WriteWorker::WriteWorker ( 184 | Database *database 185 | , Nan::Callback *callback 186 | , rocksdb::Slice key 187 | , rocksdb::Slice value 188 | , bool sync 189 | , v8::Local &keyHandle 190 | , v8::Local &valueHandle 191 | ) : DeleteWorker(database, callback, key, sync, keyHandle) 192 | , value(value) 193 | { 194 | Nan::HandleScope scope; 195 | 196 | SaveToPersistent("value", valueHandle); 197 | }; 198 | 199 | WriteWorker::~WriteWorker () { } 200 | 201 | void WriteWorker::Execute () { 202 | SetStatus(database->PutToDatabase(options, key, value)); 203 | } 204 | 205 | void WriteWorker::WorkComplete () { 206 | Nan::HandleScope scope; 207 | 208 | DisposeStringOrBufferFromSlice(GetFromPersistent("value"), value); 209 | IOWorker::WorkComplete(); 210 | } 211 | 212 | /** BATCH WORKER **/ 213 | 214 | BatchWorker::BatchWorker ( 215 | Database *database 216 | , Nan::Callback *callback 217 | , rocksdb::WriteBatch* batch 218 | , bool sync 219 | ) : AsyncWorker(database, callback) 220 | , batch(batch) 221 | { 222 | options = new rocksdb::WriteOptions(); 223 | options->sync = sync; 224 | }; 225 | 226 | BatchWorker::~BatchWorker () { 227 | delete batch; 228 | delete options; 229 | } 230 | 231 | void BatchWorker::Execute () { 232 | SetStatus(database->WriteBatchToDatabase(options, batch)); 233 | } 234 | 235 | /** APPROXIMATE SIZE WORKER **/ 236 | 237 | ApproximateSizeWorker::ApproximateSizeWorker ( 238 | Database *database 239 | , Nan::Callback *callback 240 | , rocksdb::Slice start 241 | , rocksdb::Slice end 242 | , v8::Local &startHandle 243 | , v8::Local &endHandle 244 | ) : AsyncWorker(database, callback) 245 | , range(start, end) 246 | { 247 | Nan::HandleScope scope; 248 | 249 | SaveToPersistent("start", startHandle); 250 | SaveToPersistent("end", endHandle); 251 | }; 252 | 253 | ApproximateSizeWorker::~ApproximateSizeWorker () {} 254 | 255 | void ApproximateSizeWorker::Execute () { 256 | size = database->ApproximateSizeFromDatabase(&range); 257 | } 258 | 259 | void ApproximateSizeWorker::WorkComplete() { 260 | Nan::HandleScope scope; 261 | 262 | DisposeStringOrBufferFromSlice(GetFromPersistent("start"), range.start); 263 | DisposeStringOrBufferFromSlice(GetFromPersistent("end"), range.limit); 264 | AsyncWorker::WorkComplete(); 265 | } 266 | 267 | void ApproximateSizeWorker::HandleOKCallback () { 268 | Nan::HandleScope scope; 269 | 270 | v8::Local returnValue = Nan::New((double) size); 271 | v8::Local argv[] = { 272 | Nan::Null() 273 | , returnValue 274 | }; 275 | callback->Call(2, argv); 276 | } 277 | 278 | } // namespace flat_rocks 279 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | Note that we have filtered out commits related to new tags, updating changelog and we're also not listing any merge commits, i.e. we are only listing things that changed between versions. 2 | 3 | ### 1.0.7 May 27 2015 4 | 5 | * [[`61398a0056`](https://github.com/level/leveldown/commit/61398a0056)] - link to level/community (Lars-Magnus Skog) 6 | * [[`382a1a7fa3`](https://github.com/level/leveldown/commit/382a1a7fa3)] - add compression test suite (Julian Gruber) 7 | * [[`139db7bc7f`](https://github.com/level/leveldown/commit/139db7bc7f)] - use const reference instead of by value in Database constructor (Lars-Magnus Skog) 8 | * [[`b56a86323e`](https://github.com/level/leveldown/commit/b56a86323e)] - refactor NanNew() on strings into option value functions (Lars-Magnus Skog) 9 | * [[`ca1f4746c4`](https://github.com/level/leveldown/commit/ca1f4746c4)] - refactor BooleanOptionValue (Lars-Magnus Skog) 10 | * [[`56def2d7c8`](https://github.com/level/leveldown/commit/56def2d7c8)] - NanUInt32OptionValue -> UInt32OptionValue (Lars-Magnus Skog) 11 | * [[`39c614a24f`](https://github.com/level/leveldown/commit/39c614a24f)] - NanBooleanOptionValue -> BooleanOptionValue (Lars-Magnus Skog) 12 | * [[`fcdc46183e`](https://github.com/level/leveldown/commit/fcdc46183e)] - simplify location logic, let Database take care of allocation (Lars-Magnus Skog) 13 | * [[`8cb90e6b6d`](https://github.com/level/leveldown/commit/8cb90e6b6d)] - update abstract-leveldown (Lars-Magnus Skog) 14 | * [[`f70b6576e7`](https://github.com/level/leveldown/commit/f70b6576e7)] - update .travis.yml (nvm works on travis now) (Lars-Magnus Skog) 15 | * [[`007550e7f7`](https://github.com/level/leveldown/commit/007550e7f7)] - 1.0.6 (Lars-Magnus Skog) 16 | 17 | ### 1.0.5/1.0.6 May 5 2015 18 | 19 | * [[`9064099fe7`](https://github.com/level/leveldown/commit/9064099fe7)] - pass db to abstractIterator so gc keeps it (Julian Gruber) 20 | 21 | ### 1.0.4 May 5 2015 22 | 23 | * [[`b550c98291`](https://github.com/level/leveldown/commit/b550c98291)] - update nan for iojs 2.0.0 (Lars-Magnus Skog) 24 | 25 | ### 1.0.3 May 2 2015 26 | 27 | * [[`82479b689f`](https://github.com/level/leveldown/commit/82479b689f)] - tap -> tape + faucet (Lars-Magnus Skog) 28 | * [[`ca9101542a`](https://github.com/level/leveldown/commit/ca9101542a)] - fix write-random.js, use leveldown instead of lmdb (Lars-Magnus Skog) 29 | * [[`03fbbfb99f`](https://github.com/level/leveldown/commit/03fbbfb99f)] - fix bench/db-bench.js (Lars-Magnus Skog) 30 | 31 | ### 1.0.2 Apr 26 2015 32 | * [[`8470a63678`](https://github.com/level/leveldown/commit/8470a63678)] - s/rvagg\/node-/level\// (Lars-Magnus Skog) 33 | * [[`9cbf592bea`](https://github.com/level/leveldown/commit/9cbf592bea)] - add documentation about snapshots (Max Ogden) 34 | * [[`b57827cd29`](https://github.com/level/leveldown/commit/b57827cd29)] - use n instead of nvm for working iojs support (Lars-Magnus Skog) 35 | * [[`a19927667a`](https://github.com/level/leveldown/commit/a19927667a)] - abstract-leveldown ~2.1.0 (ralphtheninja) 36 | * [[`95ccdf0850`](https://github.com/level/leveldown/commit/95ccdf0850)] - update logo and copyright (Lars-Magnus Skog) 37 | * [[`09e89d7abb`](https://github.com/level/leveldown/commit/09e89d7abb)] - updated my email (ralphtheninja) 38 | 39 | ### 1.0.1 Jan 16 2015 40 | * [[`6df3ecd6f5`](https://github.com/level/leveldown/commit/6df3ecd6f5)] - nan 1.5 for io.js support (Rod Vagg) 41 | * [[`5198231a88`](https://github.com/level/leveldown/commit/5198231a88)] - Fix LevelDB builds for modern gcc versions (Sharvil Nanavati) 42 | 43 | ### 1.0.0 Aug 26 2014 44 | * NAN@1.3 for Node 0.11.13+ support (@rvagg) 45 | * Allow writing empty values: null, undefined, '', [] and Buffer(0). Entries come out as '' or Buffer(0) (@ggreer, @juliangruber, @rvagg) 46 | * Fix clang build (@thlorenz) 47 | * Massive speed up of iterators by chunking reads (@kesla) 48 | * Wrap in abstract-leveldown for consistent type-checking across *DOWNs (@kesla) 49 | * Upgrade to LevelDB 1.17.0 (@kesla) 50 | * Minor memory leaks 51 | * Remove compile option that borked EL5 compiles 52 | * Switch to plain MIT license 53 | 54 | ### 0.10.2 @ Nov 30 2013 55 | 56 | * Apply fix by @rescrv for long-standing OSX corruption bug, https://groups.google.com/forum/#!topic/leveldb/GXhx8YvFiig (@rvagg / @rescrv) 57 | 58 | ### 0.10.1 @ Nov 21 2013 59 | 60 | * NAN@0.6 for Node@0.11.6 support, v8::Local::New(val) rewritten to 61 | NanNewLocal(val) (@rvagg) 62 | 63 | ### 0.10.0 @ Nov 18 2013 64 | 65 | * Fix array-batch memory leak, levelup/#171 (@rvagg) 66 | * Fix chained-batch write() segfaults, details in #73, (@rvagg and 67 | @mcollina) 68 | * Remove `Persistent` references for all `batch()` operations as 69 | `WriteBatch` takes an explicit copy of the data (@mcollina and 70 | @rvagg) 71 | * Upgrade to Snappy 1.1.1 (@rvagg and @no9) 72 | * Upgrade to NAN@0.5.x (@rvagg) 73 | * Switch all `callback->Call()`s to `node::MakeCallback()` to properly 74 | support Node.js domains (@rvagg) 75 | * Properly enable compression by default (@Kyotoweb) 76 | * Enable LevelDB's BloomFilter (@Kyotoweb) 77 | * Upgrade to AbstractLevelDOWN@0.11.x for testing (@rvagg) 78 | * Add new simple batch() leak tester (@rvagg) 79 | 80 | ### 0.9.2 @ Nov 02 2013 81 | 82 | * Minor fixes to support Node 0.11.8 and new Linux gcc (warnings) (@rvagg) 83 | 84 | ### 0.9.1 @ Oct 03 2013 85 | 86 | * Include port_uv.h for Windows compile, added test to suite to make sure this happens every time LevelDB is upgraded (@rvagg) 87 | 88 | ### 0.9.0 @ Oct 01 2013 89 | 90 | * Upgrade from LevelDB@0.11.0 to LevelDB@0.14.0, includes change from .sst to .ldb file extension for SST files (@rvagg) 91 | 92 | ### 0.8.3 @ Sept 18 2013 93 | 94 | * Upgrade to nan@0.4.0, better support for latest Node master & support for installing within directory structures containing spaces in directory names (@rvagg) 95 | 96 | ### 0.8.2 @ Sept 2 2013 97 | 98 | * FreeBSD support (@rvagg, @kelexel) 99 | 100 | ### 0.8.1 @ Sept 1 2013 101 | 102 | * Fixed some minor V8-level leaks (@rvagg) 103 | 104 | ### 0.8.0 @ Aug 19 2013 105 | 106 | * Added `gt`, `lt`, `gte`, `lte` for iterators (@dominictarr) 107 | * Switch to NAN as an npm dependency (@rvagg) 108 | 109 | ### 0.7.0 @ Aug 11 2013 110 | 111 | * Added @pgte to contributors list 112 | * (very) Minor perf improvements in C++ (@mscdex) 113 | * Use NAN for Node 0.8->0.11 compatibility 114 | 115 | ### 0.6.2 @ Jul 07 2013 116 | 117 | * Compatibility for Node 0.11.3, breaks compatibility with 0.11.2 118 | 119 | ### 0.6.1 @ Jun 15 2013 120 | 121 | * Fix broken Windows compile, apply port-uv patch to LevelDB's port.h (@rvagg) 122 | 123 | ### 0.6.0 @ Jun 14 2013 124 | 125 | * Upgrade to LevelDB 1.11.0, some important bugfixes: https://groups.google.com/forum/#!topic/leveldb/vS1JvmGlp4E 126 | 127 | ### 0.5.0 @ May 18 2013 128 | 129 | * Bumped major version for db.getProperty() addition (should have been done in 0.4.4) (@rvagg) 130 | * Disallow batch() operations after a write() (@rvagg) 131 | 132 | ### 0.4.4 @ May 18 2013 133 | 134 | * db.getProperty() implemented, see README for details (@rvagg) 135 | * More work on memory management, including late-creation of Persistent handles (@rvagg) 136 | 137 | ### 0.4.3 @ May 18 2013 138 | 139 | * Better memory leak fix (@rvagg) 140 | 141 | ### 0.2.2 @ May 17 2013 142 | 143 | * BACKPORT memory leak fixes (@rvagg) 144 | 145 | ### 0.4.2 @ May 17 2013 146 | 147 | * Same memory leak fixes as 0.4.1, properly applied to batch() operations too (@rvagg) 148 | 149 | ### 0.4.1 @ May 17 2013 150 | 151 | * Fix memory leak caused when passing String objects in as keys and values, conversion to Slice created new char[] but wasn't being disposed. Buffers are automatically disposed. (@rvagg, reported by @kylegetson levelup/#140) 152 | 153 | ### 0.4.0 @ May 15 2013 154 | 155 | * Upgrade to LevelDB 1.10.0, fairly minor changes, mostly bugfixes see https://groups.google.com/forum/#!topic/leveldb/O2Zdbi9Lrao for more info (@rvagg) 156 | 157 | ### 0.3.1 @ May 14 2013 158 | 159 | * Don't allow empty batch() operations through to LevelDB, on chained of array forms (@rvagg) 160 | 161 | ### 0.3.0 (& 0.2.2) @ May 14 2013 162 | 163 | * Pull API tests up into AbstractLevelDOWN, require it to run the tests. AbstractLevelDOWN can now be used to test LevelDOWN-compliant APIs. (@maxogden) 164 | * Change Iterator methods to return errors on the callbacks rather than throw (@mcollina & @rvagg) 165 | 166 | 0.2.1 @ Apr 8 2013 167 | ================== 168 | * Start on correct value when reverse=true, also handle end-of-store case #27 (@kesla) 169 | * Ignore empty string/buffer start/end options on iterators (@kesla) 170 | * Macro cleanup, replace some with static inline functions (@rvagg) 171 | 172 | ### 0.2.0 @ Mar 30 2013 173 | 174 | * Windows support--using a combination of libuv and Windows-specific code. See README for details about what's required (@rvagg) 175 | * leveldown.destroy(location, callback) to delete an existing LevelDB store, exposes LevelDB.DestroyDB() (@rvagg) 176 | * leveldown.repair(location, callback) to repair an existing LevelDB store, exposes LevelDB.RepairDB() (@rvagg) 177 | * advanced options: writeBufferSize, blockSize, maxOpenFiles, blockRestartInterval, exposes LevelDB options (@rvagg) 178 | * chained-batch operations. Argument-less db.batch() will return a new Batch object that can .put() and .del() and then .write(). API in flux so not documented yet. (@juliangruber / @rvagg) 179 | * auto-cleanup iterators that are left open when you close a database; any iterators left open when you close a database instance will kill your process so we now keep track of iterators and auto-close them before a db.close completes. 180 | * Node 0.11 support (no compile warnings) 181 | 182 | ### 0.1.4 @ Mar 11 2013 183 | 184 | * return error when batch ops contain null or undefined (@rvagg / @ralphtheninja / @dominictarr) (additional tests in LevelUP for this) 185 | 186 | 0.1.3 @ Mar 9 2013 187 | ================== 188 | * add 'standalone_static_library':1 in dependency gyp files to fix SmartOS build problems (@wolfeidau) 189 | 190 | ### 0.1.2 @ Jan 25 2013 191 | 192 | * upgrade to LevelDB 1.9.0, fairly minor changes since 1.7.0 (@rvagg) 193 | * upgrade to Snappy 1.1.0, changes block size to improve compression ~3%, slight decrease in speed (@rvagg) 194 | 195 | ### 0.1.1 @ Jan 25 2013 196 | 197 | * compile error on Mac OS (@kesla / @rvagg) 198 | 199 | ### 0.1.0 @ Jan 24 2013 200 | 201 | * change API to export single function `levelup()` (@rvagg) 202 | * move `createIterator()` to `levelup#iterator()` (@rvagg) 203 | * make all `options` arguments optional (@rvagg) 204 | * argument number & type checking on all methods (@rvagg) 205 | * stricter checking on key & value types, String/Object.toString()/Buffer, non-zero-length (@rvagg) 206 | * remove `use namespace` and add `namespace leveldown` everywhere (@rvagg) 207 | * race condition in Iterator end()/next() fix, merged from LevelUP (@ralphtheninja / @rvagg) 208 | * add complete, independent test suite (@rvagg) 209 | 210 | ### 0.0.1 & 0.0.2 @ Jan 2013 211 | 212 | * finalise rename of internal components to LevelDOWN, removing LevelUP references (@rvagg) 213 | * complete documentation of current API (@rvagg) 214 | 215 | ### 0.0.0 @ Jan 06 2013 216 | 217 | * extracted from LevelUP as stand-alone package (@rvagg) 218 | -------------------------------------------------------------------------------- /src/database.cc: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2012-2016 LevelDOWN contributors 2 | * See list at 3 | * MIT License 4 | */ 5 | 6 | #include 7 | #include 8 | 9 | #include 10 | #include 11 | 12 | #include "flatrocks.h" 13 | #include "database.h" 14 | #include "async.h" 15 | #include "database_async.h" 16 | #include "batch.h" 17 | #include "iterator.h" 18 | #include "common.h" 19 | 20 | namespace flat_rocks { 21 | 22 | static Nan::Persistent database_constructor; 23 | 24 | Database::Database (const v8::Local& from) 25 | : location(new Nan::Utf8String(from)) 26 | , db(NULL) 27 | , currentIteratorId(0) 28 | , pendingCloseWorker(NULL) 29 | , blockCache(NULL) 30 | , filterPolicy(NULL) {}; 31 | 32 | Database::~Database () { 33 | if (db != NULL) 34 | delete db; 35 | delete location; 36 | }; 37 | 38 | /* Calls from worker threads, NO V8 HERE *****************************/ 39 | 40 | rocksdb::Status Database::OpenDatabase ( 41 | rocksdb::Options* options 42 | ) { 43 | return rocksdb::DB::Open(*options, **location, &db); 44 | } 45 | 46 | rocksdb::Status Database::PutToDatabase ( 47 | rocksdb::WriteOptions* options 48 | , rocksdb::Slice key 49 | , rocksdb::Slice value 50 | ) { 51 | return db->Put(*options, key, value); 52 | } 53 | 54 | rocksdb::Status Database::GetFromDatabase ( 55 | rocksdb::ReadOptions* options 56 | , rocksdb::Slice key 57 | , std::string& value 58 | ) { 59 | return db->Get(*options, key, &value); 60 | } 61 | 62 | rocksdb::Status Database::DeleteFromDatabase ( 63 | rocksdb::WriteOptions* options 64 | , rocksdb::Slice key 65 | ) { 66 | return db->Delete(*options, key); 67 | } 68 | 69 | rocksdb::Status Database::WriteBatchToDatabase ( 70 | rocksdb::WriteOptions* options 71 | , rocksdb::WriteBatch* batch 72 | ) { 73 | return db->Write(*options, batch); 74 | } 75 | 76 | uint64_t Database::ApproximateSizeFromDatabase (const rocksdb::Range* range) { 77 | uint64_t size; 78 | db->GetApproximateSizes(range, 1, &size); 79 | return size; 80 | } 81 | 82 | void Database::GetPropertyFromDatabase ( 83 | const rocksdb::Slice& property 84 | , std::string* value) { 85 | 86 | db->GetProperty(property, value); 87 | } 88 | 89 | rocksdb::Iterator* Database::NewIterator (rocksdb::ReadOptions* options) { 90 | return db->NewIterator(*options); 91 | } 92 | 93 | const rocksdb::Snapshot* Database::NewSnapshot () { 94 | return db->GetSnapshot(); 95 | } 96 | 97 | void Database::ReleaseSnapshot (const rocksdb::Snapshot* snapshot) { 98 | return db->ReleaseSnapshot(snapshot); 99 | } 100 | 101 | void Database::ReleaseIterator (uint32_t id) { 102 | // called each time an Iterator is End()ed, in the main thread 103 | // we have to remove our reference to it and if it's the last iterator 104 | // we have to invoke a pending CloseWorker if there is one 105 | // if there is a pending CloseWorker it means that we're waiting for 106 | // iterators to end before we can close them 107 | iterators.erase(id); 108 | if (iterators.empty() && pendingCloseWorker != NULL) { 109 | Nan::AsyncQueueWorker((AsyncWorker*)pendingCloseWorker); 110 | pendingCloseWorker = NULL; 111 | } 112 | } 113 | 114 | void Database::CloseDatabase () { 115 | delete db; 116 | db = NULL; 117 | 118 | blockCache = NULL; 119 | filterPolicy = NULL; 120 | } 121 | 122 | /* V8 exposed functions *****************************/ 123 | 124 | NAN_METHOD(FlatRocks) { 125 | v8::Local location = info[0].As(); 126 | info.GetReturnValue().Set(Database::NewInstance(location)); 127 | } 128 | 129 | void Database::Init () { 130 | v8::Local tpl = Nan::New(Database::New); 131 | database_constructor.Reset(tpl); 132 | tpl->SetClassName(Nan::New("Database").ToLocalChecked()); 133 | tpl->InstanceTemplate()->SetInternalFieldCount(1); 134 | Nan::SetPrototypeMethod(tpl, "open", Database::Open); 135 | Nan::SetPrototypeMethod(tpl, "close", Database::Close); 136 | Nan::SetPrototypeMethod(tpl, "put", Database::Put); 137 | Nan::SetPrototypeMethod(tpl, "get", Database::Get); 138 | Nan::SetPrototypeMethod(tpl, "del", Database::Delete); 139 | Nan::SetPrototypeMethod(tpl, "batch", Database::Batch); 140 | Nan::SetPrototypeMethod(tpl, "approximateSize", Database::ApproximateSize); 141 | Nan::SetPrototypeMethod(tpl, "getProperty", Database::GetProperty); 142 | Nan::SetPrototypeMethod(tpl, "iterator", Database::Iterator); 143 | } 144 | 145 | NAN_METHOD(Database::New) { 146 | Database* obj = new Database(info[0]); 147 | obj->Wrap(info.This()); 148 | 149 | info.GetReturnValue().Set(info.This()); 150 | } 151 | 152 | v8::Local Database::NewInstance (v8::Local &location) { 153 | Nan::EscapableHandleScope scope; 154 | 155 | v8::Local instance; 156 | 157 | v8::Local constructorHandle = 158 | Nan::New(database_constructor); 159 | 160 | v8::Local argv[] = { location }; 161 | instance = constructorHandle->GetFunction()->NewInstance(1, argv); 162 | 163 | return scope.Escape(instance); 164 | } 165 | 166 | NAN_METHOD(Database::Open) { 167 | LD_METHOD_SETUP_COMMON(open, 0, 1) 168 | 169 | bool createIfMissing = BooleanOptionValue(optionsObj, "createIfMissing", true); 170 | bool errorIfExists = BooleanOptionValue(optionsObj, "errorIfExists"); 171 | bool compression = BooleanOptionValue(optionsObj, "compression", true); 172 | 173 | uint32_t cacheSize = UInt32OptionValue(optionsObj, "cacheSize", 8 << 20); 174 | uint32_t writeBufferSize = UInt32OptionValue( 175 | optionsObj 176 | , "writeBufferSize" 177 | , 4 << 20 178 | ); 179 | uint32_t blockSize = UInt32OptionValue(optionsObj, "blockSize", 4096); 180 | uint32_t maxOpenFiles = UInt32OptionValue(optionsObj, "maxOpenFiles", 1000); 181 | uint32_t blockRestartInterval = UInt32OptionValue( 182 | optionsObj 183 | , "blockRestartInterval" 184 | , 16 185 | ); 186 | 187 | database->blockCache = rocksdb::NewLRUCache(cacheSize); 188 | database->filterPolicy.reset(rocksdb::NewBloomFilterPolicy(10)); 189 | 190 | OpenWorker* worker = new OpenWorker( 191 | database 192 | , new Nan::Callback(callback) 193 | , database->blockCache 194 | , database->filterPolicy 195 | , createIfMissing 196 | , errorIfExists 197 | , compression 198 | , writeBufferSize 199 | , blockSize 200 | , maxOpenFiles 201 | , blockRestartInterval 202 | ); 203 | // persist to prevent accidental GC 204 | v8::Local _this = info.This(); 205 | worker->SaveToPersistent("database", _this); 206 | Nan::AsyncQueueWorker(worker); 207 | } 208 | 209 | // for an empty callback to iterator.end() 210 | NAN_METHOD(EmptyMethod) { 211 | } 212 | 213 | NAN_METHOD(Database::Close) { 214 | LD_METHOD_SETUP_COMMON_ONEARG(close) 215 | 216 | CloseWorker* worker = new CloseWorker( 217 | database 218 | , new Nan::Callback(callback) 219 | ); 220 | // persist to prevent accidental GC 221 | v8::Local _this = info.This(); 222 | worker->SaveToPersistent("database", _this); 223 | 224 | if (!database->iterators.empty()) { 225 | // yikes, we still have iterators open! naughty naughty. 226 | // we have to queue up a CloseWorker and manually close each of them. 227 | // the CloseWorker will be invoked once they are all cleaned up 228 | database->pendingCloseWorker = worker; 229 | 230 | for ( 231 | std::map< uint32_t, flat_rocks::Iterator * >::iterator it 232 | = database->iterators.begin() 233 | ; it != database->iterators.end() 234 | ; ++it) { 235 | 236 | // for each iterator still open, first check if it's already in 237 | // the process of ending (ended==true means an async End() is 238 | // in progress), if not, then we call End() with an empty callback 239 | // function and wait for it to hit ReleaseIterator() where our 240 | // CloseWorker will be invoked 241 | 242 | flat_rocks::Iterator *iterator = it->second; 243 | 244 | if (!iterator->ended) { 245 | v8::Local end = 246 | v8::Local::Cast(iterator->handle()->Get( 247 | Nan::New("end").ToLocalChecked())); 248 | v8::Local argv[] = { 249 | Nan::New(EmptyMethod)->GetFunction() // empty callback 250 | }; 251 | Nan::MakeCallback( 252 | iterator->handle() 253 | , end 254 | , 1 255 | , argv 256 | ); 257 | } 258 | } 259 | } else { 260 | Nan::AsyncQueueWorker(worker); 261 | } 262 | } 263 | 264 | NAN_METHOD(Database::Put) { 265 | LD_METHOD_SETUP_COMMON(put, 2, 3) 266 | 267 | v8::Local keyHandle = info[0].As(); 268 | v8::Local valueHandle = info[1].As(); 269 | LD_STRING_OR_BUFFER_TO_SLICE(key, keyHandle, key); 270 | LD_STRING_OR_BUFFER_TO_SLICE(value, valueHandle, value); 271 | 272 | bool sync = BooleanOptionValue(optionsObj, "sync"); 273 | 274 | WriteWorker* worker = new WriteWorker( 275 | database 276 | , new Nan::Callback(callback) 277 | , key 278 | , value 279 | , sync 280 | , keyHandle 281 | , valueHandle 282 | ); 283 | 284 | // persist to prevent accidental GC 285 | v8::Local _this = info.This(); 286 | worker->SaveToPersistent("database", _this); 287 | Nan::AsyncQueueWorker(worker); 288 | } 289 | 290 | NAN_METHOD(Database::Get) { 291 | LD_METHOD_SETUP_COMMON(get, 1, 2) 292 | 293 | v8::Local keyHandle = info[0].As(); 294 | LD_STRING_OR_BUFFER_TO_SLICE(key, keyHandle, key); 295 | 296 | bool asBuffer = BooleanOptionValue(optionsObj, "asBuffer", true); 297 | bool fillCache = BooleanOptionValue(optionsObj, "fillCache", true); 298 | 299 | ReadWorker* worker = new ReadWorker( 300 | database 301 | , new Nan::Callback(callback) 302 | , key 303 | , asBuffer 304 | , fillCache 305 | , keyHandle 306 | ); 307 | // persist to prevent accidental GC 308 | v8::Local _this = info.This(); 309 | worker->SaveToPersistent("database", _this); 310 | Nan::AsyncQueueWorker(worker); 311 | } 312 | 313 | NAN_METHOD(Database::Delete) { 314 | LD_METHOD_SETUP_COMMON(del, 1, 2) 315 | 316 | v8::Local keyHandle = info[0].As(); 317 | LD_STRING_OR_BUFFER_TO_SLICE(key, keyHandle, key); 318 | 319 | bool sync = BooleanOptionValue(optionsObj, "sync"); 320 | 321 | DeleteWorker* worker = new DeleteWorker( 322 | database 323 | , new Nan::Callback(callback) 324 | , key 325 | , sync 326 | , keyHandle 327 | ); 328 | // persist to prevent accidental GC 329 | v8::Local _this = info.This(); 330 | worker->SaveToPersistent("database", _this); 331 | Nan::AsyncQueueWorker(worker); 332 | } 333 | 334 | NAN_METHOD(Database::Batch) { 335 | if ((info.Length() == 0 || info.Length() == 1) && !info[0]->IsArray()) { 336 | v8::Local optionsObj; 337 | if (info.Length() > 0 && info[0]->IsObject()) { 338 | optionsObj = info[0].As(); 339 | } 340 | info.GetReturnValue().Set(Batch::NewInstance(info.This(), optionsObj)); 341 | return; 342 | } 343 | 344 | LD_METHOD_SETUP_COMMON(batch, 1, 2); 345 | 346 | bool sync = BooleanOptionValue(optionsObj, "sync"); 347 | 348 | v8::Local array = v8::Local::Cast(info[0]); 349 | 350 | rocksdb::WriteBatch* batch = new rocksdb::WriteBatch(); 351 | bool hasData = false; 352 | 353 | for (unsigned int i = 0; i < array->Length(); i++) { 354 | if (!array->Get(i)->IsObject()) 355 | continue; 356 | 357 | v8::Local obj = v8::Local::Cast(array->Get(i)); 358 | v8::Local keyBuffer = obj->Get(Nan::New("key").ToLocalChecked()); 359 | v8::Local type = obj->Get(Nan::New("type").ToLocalChecked()); 360 | 361 | if (type->StrictEquals(Nan::New("del").ToLocalChecked())) { 362 | LD_STRING_OR_BUFFER_TO_SLICE(key, keyBuffer, key) 363 | 364 | batch->Delete(key); 365 | if (!hasData) 366 | hasData = true; 367 | 368 | DisposeStringOrBufferFromSlice(keyBuffer, key); 369 | } else if (type->StrictEquals(Nan::New("put").ToLocalChecked())) { 370 | v8::Local valueBuffer = obj->Get(Nan::New("value").ToLocalChecked()); 371 | 372 | LD_STRING_OR_BUFFER_TO_SLICE(key, keyBuffer, key) 373 | LD_STRING_OR_BUFFER_TO_SLICE(value, valueBuffer, value) 374 | batch->Put(key, value); 375 | if (!hasData) 376 | hasData = true; 377 | 378 | DisposeStringOrBufferFromSlice(keyBuffer, key); 379 | DisposeStringOrBufferFromSlice(valueBuffer, value); 380 | } 381 | } 382 | 383 | // don't allow an empty batch through 384 | if (hasData) { 385 | BatchWorker* worker = new BatchWorker( 386 | database 387 | , new Nan::Callback(callback) 388 | , batch 389 | , sync 390 | ); 391 | // persist to prevent accidental GC 392 | v8::Local _this = info.This(); 393 | worker->SaveToPersistent("database", _this); 394 | Nan::AsyncQueueWorker(worker); 395 | } else { 396 | LD_RUN_CALLBACK(callback, 0, NULL); 397 | } 398 | } 399 | 400 | NAN_METHOD(Database::ApproximateSize) { 401 | v8::Local startHandle = info[0].As(); 402 | v8::Local endHandle = info[1].As(); 403 | 404 | LD_METHOD_SETUP_COMMON(approximateSize, -1, 2) 405 | 406 | LD_STRING_OR_BUFFER_TO_SLICE(start, startHandle, start) 407 | LD_STRING_OR_BUFFER_TO_SLICE(end, endHandle, end) 408 | 409 | ApproximateSizeWorker* worker = new ApproximateSizeWorker( 410 | database 411 | , new Nan::Callback(callback) 412 | , start 413 | , end 414 | , startHandle 415 | , endHandle 416 | ); 417 | // persist to prevent accidental GC 418 | v8::Local _this = info.This(); 419 | worker->SaveToPersistent("database", _this); 420 | Nan::AsyncQueueWorker(worker); 421 | } 422 | 423 | NAN_METHOD(Database::GetProperty) { 424 | v8::Local propertyHandle = info[0].As(); 425 | v8::Local callback; // for LD_STRING_OR_BUFFER_TO_SLICE 426 | 427 | LD_STRING_OR_BUFFER_TO_SLICE(property, propertyHandle, property) 428 | 429 | flat_rocks::Database* database = 430 | Nan::ObjectWrap::Unwrap(info.This()); 431 | 432 | std::string* value = new std::string(); 433 | database->GetPropertyFromDatabase(property, value); 434 | v8::Local returnValue 435 | = Nan::New(value->c_str(), value->length()).ToLocalChecked(); 436 | delete value; 437 | delete[] property.data(); 438 | 439 | info.GetReturnValue().Set(returnValue); 440 | } 441 | 442 | NAN_METHOD(Database::Iterator) { 443 | Database* database = Nan::ObjectWrap::Unwrap(info.This()); 444 | 445 | v8::Local optionsObj; 446 | if (info.Length() > 0 && info[0]->IsObject()) { 447 | optionsObj = v8::Local::Cast(info[0]); 448 | } 449 | 450 | // each iterator gets a unique id for this Database, so we can 451 | // easily store & lookup on our `iterators` map 452 | uint32_t id = database->currentIteratorId++; 453 | v8::TryCatch try_catch; 454 | v8::Local iteratorHandle = Iterator::NewInstance( 455 | info.This() 456 | , Nan::New(id) 457 | , optionsObj 458 | ); 459 | if (try_catch.HasCaught()) { 460 | // NB: node::FatalException can segfault here if there is no room on stack. 461 | return Nan::ThrowError("Fatal Error in Database::Iterator!"); 462 | } 463 | 464 | flat_rocks::Iterator *iterator = 465 | Nan::ObjectWrap::Unwrap(iteratorHandle); 466 | 467 | database->iterators[id] = iterator; 468 | 469 | // register our iterator 470 | /* 471 | v8::Local obj = Nan::New(); 472 | obj->Set(Nan::New("iterator"), iteratorHandle); 473 | Nan::Persistent persistent; 474 | persistent.Reset(nan_isolate, obj); 475 | database->iterators.insert(std::pair< uint32_t, Nan::Persistent & > 476 | (id, persistent)); 477 | */ 478 | 479 | info.GetReturnValue().Set(iteratorHandle); 480 | } 481 | 482 | 483 | } // namespace flat_rocks 484 | -------------------------------------------------------------------------------- /src/iterator.cc: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2012-2016 LevelDOWN contributors 2 | * See list at 3 | * MIT License 4 | */ 5 | 6 | #include 7 | #include 8 | 9 | #include "database.h" 10 | #include "iterator.h" 11 | #include "iterator_async.h" 12 | #include "common.h" 13 | 14 | namespace flat_rocks { 15 | 16 | static Nan::Persistent iterator_constructor; 17 | 18 | Iterator::Iterator ( 19 | Database* database 20 | , uint32_t id 21 | , rocksdb::Slice* start 22 | , std::string* end 23 | , bool reverse 24 | , bool keys 25 | , bool values 26 | , int limit 27 | , std::string* lt 28 | , std::string* lte 29 | , std::string* gt 30 | , std::string* gte 31 | , bool fillCache 32 | , bool keyAsBuffer 33 | , bool valueAsBuffer 34 | , v8::Local &startHandle 35 | , size_t highWaterMark 36 | ) : database(database) 37 | , id(id) 38 | , start(start) 39 | , end(end) 40 | , reverse(reverse) 41 | , keys(keys) 42 | , values(values) 43 | , limit(limit) 44 | , lt(lt) 45 | , lte(lte) 46 | , gt(gt) 47 | , gte(gte) 48 | , highWaterMark(highWaterMark) 49 | , keyAsBuffer(keyAsBuffer) 50 | , valueAsBuffer(valueAsBuffer) 51 | { 52 | Nan::HandleScope scope; 53 | 54 | v8::Local obj = Nan::New(); 55 | if (!startHandle.IsEmpty()) 56 | obj->Set(Nan::New("start").ToLocalChecked(), startHandle); 57 | persistentHandle.Reset(obj); 58 | 59 | options = new rocksdb::ReadOptions(); 60 | options->fill_cache = fillCache; 61 | // get a snapshot of the current state 62 | options->snapshot = database->NewSnapshot(); 63 | dbIterator = NULL; 64 | count = 0; 65 | seeking = false; 66 | nexting = false; 67 | ended = false; 68 | endWorker = NULL; 69 | }; 70 | 71 | Iterator::~Iterator () { 72 | delete options; 73 | if (!persistentHandle.IsEmpty()) 74 | persistentHandle.Reset(); 75 | if (start != NULL) 76 | delete start; 77 | if (end != NULL) 78 | delete end; 79 | }; 80 | 81 | bool Iterator::GetIterator () { 82 | if (dbIterator == NULL) { 83 | dbIterator = database->NewIterator(options); 84 | 85 | if (start != NULL) { 86 | dbIterator->Seek(*start); 87 | 88 | if (reverse) { 89 | if (!dbIterator->Valid()) { 90 | // if it's past the last key, step back 91 | dbIterator->SeekToLast(); 92 | } else { 93 | std::string key_ = dbIterator->key().ToString(); 94 | 95 | if (lt != NULL) { 96 | if (lt->compare(key_) <= 0) 97 | dbIterator->Prev(); 98 | } else if (lte != NULL) { 99 | if (lte->compare(key_) < 0) 100 | dbIterator->Prev(); 101 | } else if (start != NULL) { 102 | if (start->compare(key_)) 103 | dbIterator->Prev(); 104 | } 105 | } 106 | 107 | if (dbIterator->Valid() && lt != NULL) { 108 | if (lt->compare(dbIterator->key().ToString()) <= 0) 109 | dbIterator->Prev(); 110 | } 111 | } else { 112 | if (dbIterator->Valid() && gt != NULL 113 | && gt->compare(dbIterator->key().ToString()) == 0) 114 | dbIterator->Next(); 115 | } 116 | } else if (reverse) { 117 | dbIterator->SeekToLast(); 118 | } else { 119 | dbIterator->SeekToFirst(); 120 | } 121 | 122 | return true; 123 | } 124 | return false; 125 | } 126 | 127 | bool Iterator::Read (std::string& key, std::string& value) { 128 | // if it's not the first call, move to next item. 129 | if (!GetIterator() && !seeking) { 130 | if (reverse) 131 | dbIterator->Prev(); 132 | else 133 | dbIterator->Next(); 134 | } 135 | 136 | seeking = false; 137 | 138 | // now check if this is the end or not, if not then return the key & value 139 | if (dbIterator->Valid()) { 140 | std::string key_ = dbIterator->key().ToString(); 141 | int isEnd = end == NULL ? 1 : end->compare(key_); 142 | 143 | if ((limit < 0 || ++count <= limit) 144 | && (end == NULL 145 | || (reverse && (isEnd <= 0)) 146 | || (!reverse && (isEnd >= 0))) 147 | && ( lt != NULL ? (lt->compare(key_) > 0) 148 | : lte != NULL ? (lte->compare(key_) >= 0) 149 | : true ) 150 | && ( gt != NULL ? (gt->compare(key_) < 0) 151 | : gte != NULL ? (gte->compare(key_) <= 0) 152 | : true ) 153 | ) { 154 | if (keys) 155 | key.assign(dbIterator->key().data(), dbIterator->key().size()); 156 | if (values) 157 | value.assign(dbIterator->value().data(), dbIterator->value().size()); 158 | return true; 159 | } 160 | } 161 | 162 | return false; 163 | } 164 | 165 | bool Iterator::IteratorNext (std::vector >& result) { 166 | size_t size = 0; 167 | while(true) { 168 | std::string key, value; 169 | bool ok = Read(key, value); 170 | 171 | if (ok) { 172 | result.push_back(std::make_pair(key, value)); 173 | size = size + key.size() + value.size(); 174 | 175 | if (size > highWaterMark) 176 | return true; 177 | 178 | } else { 179 | return false; 180 | } 181 | } 182 | } 183 | 184 | rocksdb::Status Iterator::IteratorStatus () { 185 | return dbIterator->status(); 186 | } 187 | 188 | void Iterator::IteratorEnd () { 189 | //TODO: could return it->status() 190 | delete dbIterator; 191 | dbIterator = NULL; 192 | } 193 | 194 | void Iterator::Release () { 195 | database->ReleaseIterator(id); 196 | } 197 | 198 | void checkEndCallback (Iterator* iterator) { 199 | iterator->nexting = false; 200 | if (iterator->endWorker != NULL) { 201 | Nan::AsyncQueueWorker(iterator->endWorker); 202 | iterator->endWorker = NULL; 203 | } 204 | } 205 | 206 | NAN_METHOD(Iterator::Seek) { 207 | Iterator* iterator = Nan::ObjectWrap::Unwrap(info.This()); 208 | iterator->GetIterator(); 209 | rocksdb::Iterator* dbIterator = iterator->dbIterator; 210 | Nan::Utf8String key(info[0]); 211 | 212 | dbIterator->Seek(*key); 213 | iterator->seeking = true; 214 | 215 | if (dbIterator->Valid()) { 216 | int cmp = dbIterator->key().compare(*key); 217 | if (cmp > 0 && iterator->reverse) { 218 | dbIterator->Prev(); 219 | } else if (cmp < 0 && !iterator->reverse) { 220 | dbIterator->Next(); 221 | } 222 | } else { 223 | if (iterator->reverse) { 224 | dbIterator->SeekToLast(); 225 | } else { 226 | dbIterator->SeekToFirst(); 227 | } 228 | if (dbIterator->Valid()) { 229 | int cmp = dbIterator->key().compare(*key); 230 | if (cmp > 0 && iterator->reverse) { 231 | dbIterator->SeekToFirst(); 232 | dbIterator->Prev(); 233 | } else if (cmp < 0 && !iterator->reverse) { 234 | dbIterator->SeekToLast(); 235 | dbIterator->Next(); 236 | } 237 | } 238 | } 239 | 240 | info.GetReturnValue().Set(info.Holder()); 241 | } 242 | 243 | NAN_METHOD(Iterator::Next) { 244 | Iterator* iterator = Nan::ObjectWrap::Unwrap(info.This()); 245 | 246 | if (!info[0]->IsFunction()) { 247 | return Nan::ThrowError("next() requires a callback argument"); 248 | } 249 | 250 | v8::Local callback = info[0].As(); 251 | 252 | NextWorker* worker = new NextWorker( 253 | iterator 254 | , new Nan::Callback(callback) 255 | , checkEndCallback 256 | ); 257 | // persist to prevent accidental GC 258 | v8::Local _this = info.This(); 259 | worker->SaveToPersistent("iterator", _this); 260 | iterator->nexting = true; 261 | Nan::AsyncQueueWorker(worker); 262 | 263 | info.GetReturnValue().Set(info.Holder()); 264 | } 265 | 266 | NAN_METHOD(Iterator::End) { 267 | Iterator* iterator = Nan::ObjectWrap::Unwrap(info.This()); 268 | 269 | if (!info[0]->IsFunction()) { 270 | return Nan::ThrowError("end() requires a callback argument"); 271 | } 272 | 273 | if (!iterator->ended) { 274 | v8::Local callback = v8::Local::Cast(info[0]); 275 | 276 | EndWorker* worker = new EndWorker( 277 | iterator 278 | , new Nan::Callback(callback) 279 | ); 280 | // persist to prevent accidental GC 281 | v8::Local _this = info.This(); 282 | worker->SaveToPersistent("iterator", _this); 283 | iterator->ended = true; 284 | 285 | if (iterator->nexting) { 286 | // waiting for a next() to return, queue the end 287 | iterator->endWorker = worker; 288 | } else { 289 | Nan::AsyncQueueWorker(worker); 290 | } 291 | } 292 | 293 | info.GetReturnValue().Set(info.Holder()); 294 | } 295 | 296 | void Iterator::Init () { 297 | v8::Local tpl = 298 | Nan::New(Iterator::New); 299 | iterator_constructor.Reset(tpl); 300 | tpl->SetClassName(Nan::New("Iterator").ToLocalChecked()); 301 | tpl->InstanceTemplate()->SetInternalFieldCount(1); 302 | Nan::SetPrototypeMethod(tpl, "seek", Iterator::Seek); 303 | Nan::SetPrototypeMethod(tpl, "next", Iterator::Next); 304 | Nan::SetPrototypeMethod(tpl, "end", Iterator::End); 305 | } 306 | 307 | v8::Local Iterator::NewInstance ( 308 | v8::Local database 309 | , v8::Local id 310 | , v8::Local optionsObj 311 | ) { 312 | 313 | Nan::EscapableHandleScope scope; 314 | 315 | v8::Local instance; 316 | v8::Local constructorHandle = 317 | Nan::New(iterator_constructor); 318 | 319 | if (optionsObj.IsEmpty()) { 320 | v8::Local argv[2] = { database, id }; 321 | instance = constructorHandle->GetFunction()->NewInstance(2, argv); 322 | } else { 323 | v8::Local argv[3] = { database, id, optionsObj }; 324 | instance = constructorHandle->GetFunction()->NewInstance(3, argv); 325 | } 326 | 327 | return scope.Escape(instance); 328 | } 329 | 330 | NAN_METHOD(Iterator::New) { 331 | Database* database = Nan::ObjectWrap::Unwrap(info[0]->ToObject()); 332 | 333 | //TODO: remove this, it's only here to make LD_STRING_OR_BUFFER_TO_SLICE happy 334 | v8::Local callback; 335 | 336 | v8::Local startHandle; 337 | rocksdb::Slice* start = NULL; 338 | std::string* end = NULL; 339 | int limit = -1; 340 | // default highWaterMark from Readble-streams 341 | size_t highWaterMark = 16 * 1024; 342 | 343 | v8::Local id = info[1]; 344 | 345 | v8::Local optionsObj; 346 | 347 | v8::Local ltHandle; 348 | v8::Local lteHandle; 349 | v8::Local gtHandle; 350 | v8::Local gteHandle; 351 | 352 | std::string* lt = NULL; 353 | std::string* lte = NULL; 354 | std::string* gt = NULL; 355 | std::string* gte = NULL; 356 | 357 | //default to forward. 358 | bool reverse = false; 359 | 360 | if (info.Length() > 1 && info[2]->IsObject()) { 361 | optionsObj = v8::Local::Cast(info[2]); 362 | 363 | reverse = BooleanOptionValue(optionsObj, "reverse"); 364 | 365 | if (optionsObj->Has(Nan::New("start").ToLocalChecked()) 366 | && (node::Buffer::HasInstance(optionsObj->Get(Nan::New("start").ToLocalChecked())) 367 | || optionsObj->Get(Nan::New("start").ToLocalChecked())->IsString())) { 368 | 369 | startHandle = optionsObj->Get(Nan::New("start").ToLocalChecked()).As(); 370 | 371 | // ignore start if it has size 0 since a Slice can't have length 0 372 | if (StringOrBufferLength(startHandle) > 0) { 373 | LD_STRING_OR_BUFFER_TO_SLICE(_start, startHandle, start) 374 | start = new rocksdb::Slice(_start.data(), _start.size()); 375 | } 376 | } 377 | 378 | if (optionsObj->Has(Nan::New("end").ToLocalChecked()) 379 | && (node::Buffer::HasInstance(optionsObj->Get(Nan::New("end").ToLocalChecked())) 380 | || optionsObj->Get(Nan::New("end").ToLocalChecked())->IsString())) { 381 | 382 | v8::Local endBuffer = optionsObj->Get(Nan::New("end").ToLocalChecked()); 383 | 384 | // ignore end if it has size 0 since a Slice can't have length 0 385 | if (StringOrBufferLength(endBuffer) > 0) { 386 | LD_STRING_OR_BUFFER_TO_SLICE(_end, endBuffer, end) 387 | end = new std::string(_end.data(), _end.size()); 388 | } 389 | } 390 | 391 | if (!optionsObj.IsEmpty() && optionsObj->Has(Nan::New("limit").ToLocalChecked())) { 392 | limit = v8::Local::Cast(optionsObj->Get( 393 | Nan::New("limit").ToLocalChecked()))->Value(); 394 | } 395 | 396 | if (optionsObj->Has(Nan::New("highWaterMark").ToLocalChecked())) { 397 | highWaterMark = v8::Local::Cast(optionsObj->Get( 398 | Nan::New("highWaterMark").ToLocalChecked()))->Value(); 399 | } 400 | 401 | if (optionsObj->Has(Nan::New("lt").ToLocalChecked()) 402 | && (node::Buffer::HasInstance(optionsObj->Get(Nan::New("lt").ToLocalChecked())) 403 | || optionsObj->Get(Nan::New("lt").ToLocalChecked())->IsString())) { 404 | 405 | v8::Local ltBuffer = optionsObj->Get(Nan::New("lt").ToLocalChecked()); 406 | 407 | // ignore end if it has size 0 since a Slice can't have length 0 408 | if (StringOrBufferLength(ltBuffer) > 0) { 409 | LD_STRING_OR_BUFFER_TO_SLICE(_lt, ltBuffer, lt) 410 | lt = new std::string(_lt.data(), _lt.size()); 411 | if (reverse) 412 | start = new rocksdb::Slice(_lt.data(), _lt.size()); 413 | } 414 | } 415 | 416 | if (optionsObj->Has(Nan::New("lte").ToLocalChecked()) 417 | && (node::Buffer::HasInstance(optionsObj->Get(Nan::New("lte").ToLocalChecked())) 418 | || optionsObj->Get(Nan::New("lte").ToLocalChecked())->IsString())) { 419 | 420 | v8::Local lteBuffer = optionsObj->Get(Nan::New("lte").ToLocalChecked()); 421 | 422 | // ignore end if it has size 0 since a Slice can't have length 0 423 | if (StringOrBufferLength(lteBuffer) > 0) { 424 | LD_STRING_OR_BUFFER_TO_SLICE(_lte, lteBuffer, lte) 425 | lte = new std::string(_lte.data(), _lte.size()); 426 | if (reverse) 427 | start = new rocksdb::Slice(_lte.data(), _lte.size()); 428 | } 429 | } 430 | 431 | if (optionsObj->Has(Nan::New("gt").ToLocalChecked()) 432 | && (node::Buffer::HasInstance(optionsObj->Get(Nan::New("gt").ToLocalChecked())) 433 | || optionsObj->Get(Nan::New("gt").ToLocalChecked())->IsString())) { 434 | 435 | v8::Local gtBuffer = optionsObj->Get(Nan::New("gt").ToLocalChecked()); 436 | 437 | // ignore end if it has size 0 since a Slice can't have length 0 438 | if (StringOrBufferLength(gtBuffer) > 0) { 439 | LD_STRING_OR_BUFFER_TO_SLICE(_gt, gtBuffer, gt) 440 | gt = new std::string(_gt.data(), _gt.size()); 441 | if (!reverse) 442 | start = new rocksdb::Slice(_gt.data(), _gt.size()); 443 | } 444 | } 445 | 446 | if (optionsObj->Has(Nan::New("gte").ToLocalChecked()) 447 | && (node::Buffer::HasInstance(optionsObj->Get(Nan::New("gte").ToLocalChecked())) 448 | || optionsObj->Get(Nan::New("gte").ToLocalChecked())->IsString())) { 449 | 450 | v8::Local gteBuffer = optionsObj->Get(Nan::New("gte").ToLocalChecked()); 451 | 452 | // ignore end if it has size 0 since a Slice can't have length 0 453 | if (StringOrBufferLength(gteBuffer) > 0) { 454 | LD_STRING_OR_BUFFER_TO_SLICE(_gte, gteBuffer, gte) 455 | gte = new std::string(_gte.data(), _gte.size()); 456 | if (!reverse) 457 | start = new rocksdb::Slice(_gte.data(), _gte.size()); 458 | } 459 | } 460 | 461 | } 462 | 463 | bool keys = BooleanOptionValue(optionsObj, "keys", true); 464 | bool values = BooleanOptionValue(optionsObj, "values", true); 465 | bool keyAsBuffer = BooleanOptionValue(optionsObj, "keyAsBuffer", true); 466 | bool valueAsBuffer = BooleanOptionValue(optionsObj, "valueAsBuffer", true); 467 | bool fillCache = BooleanOptionValue(optionsObj, "fillCache"); 468 | 469 | Iterator* iterator = new Iterator( 470 | database 471 | , (uint32_t)id->Int32Value() 472 | , start 473 | , end 474 | , reverse 475 | , keys 476 | , values 477 | , limit 478 | , lt 479 | , lte 480 | , gt 481 | , gte 482 | , fillCache 483 | , keyAsBuffer 484 | , valueAsBuffer 485 | , startHandle 486 | , highWaterMark 487 | ); 488 | iterator->Wrap(info.This()); 489 | 490 | info.GetReturnValue().Set(info.This()); 491 | } 492 | 493 | } // namespace flat_rocks 494 | --------------------------------------------------------------------------------