├── .gitignore ├── README.md ├── conf ├── bench.json ├── settings.json └── xjdrew.conf ├── deactivate.zsh ├── env.sh ├── env.zsh ├── include └── leveldb │ ├── c.h │ ├── cache.h │ ├── comparator.h │ ├── db.h │ ├── env.h │ ├── filter_policy.h │ ├── iterator.h │ ├── options.h │ ├── slice.h │ ├── status.h │ ├── table.h │ ├── table_builder.h │ └── write_batch.h ├── src ├── app │ ├── agent.go │ ├── command.go │ ├── context.go │ ├── leveldb.go │ ├── log.go │ ├── main.go │ ├── monitor.go │ ├── storer.go │ └── util.go ├── conf │ ├── AUTHORS │ ├── COPYRIGHT │ ├── conf.go │ ├── conf_test.go │ ├── get.go │ ├── read.go │ └── write.go ├── kmonitor │ └── kmonitor.go ├── levigo │ ├── .gitignore │ ├── LICENSE │ ├── README.md │ ├── batch.go │ ├── cache.go │ ├── comparator.go │ ├── conv.go │ ├── db.go │ ├── doc.go │ ├── env.go │ ├── examples │ │ └── comparator_example.go │ ├── filterpolicy.go │ ├── iterator.go │ ├── leveldb_test.go │ ├── options.go │ └── version.go ├── redis │ ├── redis.go │ └── redis_test.go └── unqlitego │ ├── .gitignore │ ├── .travis.yml │ ├── LICENSE │ ├── README.md │ ├── unqlite.c │ ├── unqlite.go │ ├── unqlite.h │ └── unqlite_test.go └── tests └── test_read_leveldb_in_json_rpc.py /.gitignore: -------------------------------------------------------------------------------- 1 | pkg/ 2 | bin/ 3 | data/ 4 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## env 2 | depend on leveldb, snappy, levigo 3 | 4 | please refer to [this blog] (http://xjdrew.github.io/blog/2014/09/09/build-static-leveldb-with-snappy/) for how to build a snappy enabled leveldb. 5 | 6 | ## Build 7 | ``` 8 | source env.sh 9 | go install app 10 | ``` 11 | 12 | ## Test 13 | * start a redis-server listen on 127.0.0.1:6300 14 | * bin/app conf/settings.json 15 | * use a redis client, run command as follow 16 | 17 | ``` 18 | hset key1 v1 1 19 | rename key1 key2 20 | ``` 21 | 22 | -------------------------------------------------------------------------------- /conf/bench.json: -------------------------------------------------------------------------------- 1 | { 2 | "redis":{ 3 | "host":"172.16.100.205:3536", 4 | "password":"foobared", 5 | "db":2, 6 | "event":"rename_to", 7 | "expire": false 8 | }, 9 | 10 | "leveldb":{ 11 | "dbname":"./data/redis6" 12 | }, 13 | 14 | "manager":{ 15 | "addr":"0.0.0.0:3580" 16 | }, 17 | 18 | "log":{ 19 | "file":"/tmp/land1.log", 20 | "level" : 3 21 | }, 22 | 23 | "agent":{ 24 | "addr":"0.0.0.0:5200" 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /conf/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "redis":{ 3 | "host":"127.0.0.1:6400", 4 | "password":"foobared", 5 | "db":0, 6 | "event":"rename_to" 7 | }, 8 | 9 | "leveldb":{ 10 | "dbname":"./data/redis_mirror" 11 | }, 12 | 13 | "manager":{ 14 | "addr":"0.0.0.0:3580" 15 | }, 16 | 17 | "log":{ 18 | "file":"/tmp/land1.log" 19 | }, 20 | 21 | "zinc":{ 22 | "addr":"0.0.0.0:5200" 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /conf/xjdrew.conf: -------------------------------------------------------------------------------- 1 | [redis] 2 | host = 127.0.0.1:6400 3 | password = foobared 4 | db = 2 5 | 6 | # pub/sub 7 | events = gE 8 | channel = __keyevent@2__:rename_to 9 | 10 | [leveldb] 11 | dbname = ./redis1 12 | 13 | [manager] 14 | addr = 0.0.0.0:3580 15 | 16 | [log] 17 | file = /tmp/land1.log 18 | 19 | -------------------------------------------------------------------------------- /deactivate.zsh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env zsh 2 | 3 | export GOPATH=$OLDGOPATH 4 | unset OLDGOPATH 5 | export CGO_CFLAGS=-I$GOPATH/include 6 | export CGO_LDFLAGS=-L$GOPATH/bin 7 | export LD_LIBRARY_PATH=$GOPATH/bin 8 | -------------------------------------------------------------------------------- /env.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export GOPATH=`pwd` 4 | export GOMAXPROCS=3 5 | export CGO_CFLAGS=-I$GOPATH/include 6 | export CGO_LDFLAGS=-L$GOPATH/bin 7 | export LD_LIBRARY_PATH=$GOPATH/bin 8 | 9 | 10 | -------------------------------------------------------------------------------- /env.zsh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env zsh 2 | 3 | export OLDGOPATH=$GOPATH 4 | export GOPATH=`pwd` 5 | export CGO_CFLAGS=-I$GOPATH/include 6 | export CGO_LDFLAGS=-L$GOPATH/bin 7 | export LD_LIBRARY_PATH=$GOPATH/bin 8 | export GOMAXPROCS=3 9 | -------------------------------------------------------------------------------- /include/leveldb/c.h: -------------------------------------------------------------------------------- 1 | /* Copyright (c) 2011 The LevelDB Authors. All rights reserved. 2 | Use of this source code is governed by a BSD-style license that can be 3 | found in the LICENSE file. See the AUTHORS file for names of contributors. 4 | 5 | C bindings for leveldb. May be useful as a stable ABI that can be 6 | used by programs that keep leveldb in a shared library, or for 7 | a JNI api. 8 | 9 | Does not support: 10 | . getters for the option types 11 | . custom comparators that implement key shortening 12 | . custom iter, db, env, cache implementations using just the C bindings 13 | 14 | Some conventions: 15 | 16 | (1) We expose just opaque struct pointers and functions to clients. 17 | This allows us to change internal representations without having to 18 | recompile clients. 19 | 20 | (2) For simplicity, there is no equivalent to the Slice type. Instead, 21 | the caller has to pass the pointer and length as separate 22 | arguments. 23 | 24 | (3) Errors are represented by a null-terminated c string. NULL 25 | means no error. All operations that can raise an error are passed 26 | a "char** errptr" as the last argument. One of the following must 27 | be true on entry: 28 | *errptr == NULL 29 | *errptr points to a malloc()ed null-terminated error message 30 | (On Windows, *errptr must have been malloc()-ed by this library.) 31 | On success, a leveldb routine leaves *errptr unchanged. 32 | On failure, leveldb frees the old value of *errptr and 33 | set *errptr to a malloc()ed error message. 34 | 35 | (4) Bools have the type unsigned char (0 == false; rest == true) 36 | 37 | (5) All of the pointer arguments must be non-NULL. 38 | */ 39 | 40 | #ifndef STORAGE_LEVELDB_INCLUDE_C_H_ 41 | #define STORAGE_LEVELDB_INCLUDE_C_H_ 42 | 43 | #ifdef __cplusplus 44 | extern "C" { 45 | #endif 46 | 47 | #include 48 | #include 49 | #include 50 | 51 | /* Exported types */ 52 | 53 | typedef struct leveldb_t leveldb_t; 54 | typedef struct leveldb_cache_t leveldb_cache_t; 55 | typedef struct leveldb_comparator_t leveldb_comparator_t; 56 | typedef struct leveldb_env_t leveldb_env_t; 57 | typedef struct leveldb_filelock_t leveldb_filelock_t; 58 | typedef struct leveldb_filterpolicy_t leveldb_filterpolicy_t; 59 | typedef struct leveldb_iterator_t leveldb_iterator_t; 60 | typedef struct leveldb_logger_t leveldb_logger_t; 61 | typedef struct leveldb_options_t leveldb_options_t; 62 | typedef struct leveldb_randomfile_t leveldb_randomfile_t; 63 | typedef struct leveldb_readoptions_t leveldb_readoptions_t; 64 | typedef struct leveldb_seqfile_t leveldb_seqfile_t; 65 | typedef struct leveldb_snapshot_t leveldb_snapshot_t; 66 | typedef struct leveldb_writablefile_t leveldb_writablefile_t; 67 | typedef struct leveldb_writebatch_t leveldb_writebatch_t; 68 | typedef struct leveldb_writeoptions_t leveldb_writeoptions_t; 69 | 70 | /* DB operations */ 71 | 72 | extern leveldb_t* leveldb_open( 73 | const leveldb_options_t* options, 74 | const char* name, 75 | char** errptr); 76 | 77 | extern void leveldb_close(leveldb_t* db); 78 | 79 | extern void leveldb_put( 80 | leveldb_t* db, 81 | const leveldb_writeoptions_t* options, 82 | const char* key, size_t keylen, 83 | const char* val, size_t vallen, 84 | char** errptr); 85 | 86 | extern void leveldb_delete( 87 | leveldb_t* db, 88 | const leveldb_writeoptions_t* options, 89 | const char* key, size_t keylen, 90 | char** errptr); 91 | 92 | extern void leveldb_write( 93 | leveldb_t* db, 94 | const leveldb_writeoptions_t* options, 95 | leveldb_writebatch_t* batch, 96 | char** errptr); 97 | 98 | /* Returns NULL if not found. A malloc()ed array otherwise. 99 | Stores the length of the array in *vallen. */ 100 | extern char* leveldb_get( 101 | leveldb_t* db, 102 | const leveldb_readoptions_t* options, 103 | const char* key, size_t keylen, 104 | size_t* vallen, 105 | char** errptr); 106 | 107 | extern leveldb_iterator_t* leveldb_create_iterator( 108 | leveldb_t* db, 109 | const leveldb_readoptions_t* options); 110 | 111 | extern const leveldb_snapshot_t* leveldb_create_snapshot( 112 | leveldb_t* db); 113 | 114 | extern void leveldb_release_snapshot( 115 | leveldb_t* db, 116 | const leveldb_snapshot_t* snapshot); 117 | 118 | /* Returns NULL if property name is unknown. 119 | Else returns a pointer to a malloc()-ed null-terminated value. */ 120 | extern char* leveldb_property_value( 121 | leveldb_t* db, 122 | const char* propname); 123 | 124 | extern void leveldb_approximate_sizes( 125 | leveldb_t* db, 126 | int num_ranges, 127 | const char* const* range_start_key, const size_t* range_start_key_len, 128 | const char* const* range_limit_key, const size_t* range_limit_key_len, 129 | uint64_t* sizes); 130 | 131 | extern void leveldb_compact_range( 132 | leveldb_t* db, 133 | const char* start_key, size_t start_key_len, 134 | const char* limit_key, size_t limit_key_len); 135 | 136 | /* Management operations */ 137 | 138 | extern void leveldb_destroy_db( 139 | const leveldb_options_t* options, 140 | const char* name, 141 | char** errptr); 142 | 143 | extern void leveldb_repair_db( 144 | const leveldb_options_t* options, 145 | const char* name, 146 | char** errptr); 147 | 148 | /* Iterator */ 149 | 150 | extern void leveldb_iter_destroy(leveldb_iterator_t*); 151 | extern unsigned char leveldb_iter_valid(const leveldb_iterator_t*); 152 | extern void leveldb_iter_seek_to_first(leveldb_iterator_t*); 153 | extern void leveldb_iter_seek_to_last(leveldb_iterator_t*); 154 | extern void leveldb_iter_seek(leveldb_iterator_t*, const char* k, size_t klen); 155 | extern void leveldb_iter_next(leveldb_iterator_t*); 156 | extern void leveldb_iter_prev(leveldb_iterator_t*); 157 | extern const char* leveldb_iter_key(const leveldb_iterator_t*, size_t* klen); 158 | extern const char* leveldb_iter_value(const leveldb_iterator_t*, size_t* vlen); 159 | extern void leveldb_iter_get_error(const leveldb_iterator_t*, char** errptr); 160 | 161 | /* Write batch */ 162 | 163 | extern leveldb_writebatch_t* leveldb_writebatch_create(); 164 | extern void leveldb_writebatch_destroy(leveldb_writebatch_t*); 165 | extern void leveldb_writebatch_clear(leveldb_writebatch_t*); 166 | extern void leveldb_writebatch_put( 167 | leveldb_writebatch_t*, 168 | const char* key, size_t klen, 169 | const char* val, size_t vlen); 170 | extern void leveldb_writebatch_delete( 171 | leveldb_writebatch_t*, 172 | const char* key, size_t klen); 173 | extern void leveldb_writebatch_iterate( 174 | leveldb_writebatch_t*, 175 | void* state, 176 | void (*put)(void*, const char* k, size_t klen, const char* v, size_t vlen), 177 | void (*deleted)(void*, const char* k, size_t klen)); 178 | 179 | /* Options */ 180 | 181 | extern leveldb_options_t* leveldb_options_create(); 182 | extern void leveldb_options_destroy(leveldb_options_t*); 183 | extern void leveldb_options_set_comparator( 184 | leveldb_options_t*, 185 | leveldb_comparator_t*); 186 | extern void leveldb_options_set_filter_policy( 187 | leveldb_options_t*, 188 | leveldb_filterpolicy_t*); 189 | extern void leveldb_options_set_create_if_missing( 190 | leveldb_options_t*, unsigned char); 191 | extern void leveldb_options_set_error_if_exists( 192 | leveldb_options_t*, unsigned char); 193 | extern void leveldb_options_set_paranoid_checks( 194 | leveldb_options_t*, unsigned char); 195 | extern void leveldb_options_set_env(leveldb_options_t*, leveldb_env_t*); 196 | extern void leveldb_options_set_info_log(leveldb_options_t*, leveldb_logger_t*); 197 | extern void leveldb_options_set_write_buffer_size(leveldb_options_t*, size_t); 198 | extern void leveldb_options_set_max_open_files(leveldb_options_t*, int); 199 | extern void leveldb_options_set_cache(leveldb_options_t*, leveldb_cache_t*); 200 | extern void leveldb_options_set_block_size(leveldb_options_t*, size_t); 201 | extern void leveldb_options_set_block_restart_interval(leveldb_options_t*, int); 202 | 203 | enum { 204 | leveldb_no_compression = 0, 205 | leveldb_snappy_compression = 1 206 | }; 207 | extern void leveldb_options_set_compression(leveldb_options_t*, int); 208 | 209 | /* Comparator */ 210 | 211 | extern leveldb_comparator_t* leveldb_comparator_create( 212 | void* state, 213 | void (*destructor)(void*), 214 | int (*compare)( 215 | void*, 216 | const char* a, size_t alen, 217 | const char* b, size_t blen), 218 | const char* (*name)(void*)); 219 | extern void leveldb_comparator_destroy(leveldb_comparator_t*); 220 | 221 | /* Filter policy */ 222 | 223 | extern leveldb_filterpolicy_t* leveldb_filterpolicy_create( 224 | void* state, 225 | void (*destructor)(void*), 226 | char* (*create_filter)( 227 | void*, 228 | const char* const* key_array, const size_t* key_length_array, 229 | int num_keys, 230 | size_t* filter_length), 231 | unsigned char (*key_may_match)( 232 | void*, 233 | const char* key, size_t length, 234 | const char* filter, size_t filter_length), 235 | const char* (*name)(void*)); 236 | extern void leveldb_filterpolicy_destroy(leveldb_filterpolicy_t*); 237 | 238 | extern leveldb_filterpolicy_t* leveldb_filterpolicy_create_bloom( 239 | int bits_per_key); 240 | 241 | /* Read options */ 242 | 243 | extern leveldb_readoptions_t* leveldb_readoptions_create(); 244 | extern void leveldb_readoptions_destroy(leveldb_readoptions_t*); 245 | extern void leveldb_readoptions_set_verify_checksums( 246 | leveldb_readoptions_t*, 247 | unsigned char); 248 | extern void leveldb_readoptions_set_fill_cache( 249 | leveldb_readoptions_t*, unsigned char); 250 | extern void leveldb_readoptions_set_snapshot( 251 | leveldb_readoptions_t*, 252 | const leveldb_snapshot_t*); 253 | 254 | /* Write options */ 255 | 256 | extern leveldb_writeoptions_t* leveldb_writeoptions_create(); 257 | extern void leveldb_writeoptions_destroy(leveldb_writeoptions_t*); 258 | extern void leveldb_writeoptions_set_sync( 259 | leveldb_writeoptions_t*, unsigned char); 260 | 261 | /* Cache */ 262 | 263 | extern leveldb_cache_t* leveldb_cache_create_lru(size_t capacity); 264 | extern void leveldb_cache_destroy(leveldb_cache_t* cache); 265 | 266 | /* Env */ 267 | 268 | extern leveldb_env_t* leveldb_create_default_env(); 269 | extern void leveldb_env_destroy(leveldb_env_t*); 270 | 271 | /* Utility */ 272 | 273 | /* Calls free(ptr). 274 | REQUIRES: ptr was malloc()-ed and returned by one of the routines 275 | in this file. Note that in certain cases (typically on Windows), you 276 | may need to call this routine instead of free(ptr) to dispose of 277 | malloc()-ed memory returned by this library. */ 278 | extern void leveldb_free(void* ptr); 279 | 280 | /* Return the major version number for this release. */ 281 | extern int leveldb_major_version(); 282 | 283 | /* Return the minor version number for this release. */ 284 | extern int leveldb_minor_version(); 285 | 286 | #ifdef __cplusplus 287 | } /* end extern "C" */ 288 | #endif 289 | 290 | #endif /* STORAGE_LEVELDB_INCLUDE_C_H_ */ 291 | -------------------------------------------------------------------------------- /include/leveldb/cache.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2011 The LevelDB Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style license that can be 3 | // found in the LICENSE file. See the AUTHORS file for names of contributors. 4 | // 5 | // A Cache is an interface that maps keys to values. It has internal 6 | // synchronization and may be safely accessed concurrently from 7 | // multiple threads. It may automatically evict entries to make room 8 | // for new entries. Values have a specified charge against the cache 9 | // capacity. For example, a cache where the values are variable 10 | // length strings, may use the length of the string as the charge for 11 | // the string. 12 | // 13 | // A builtin cache implementation with a least-recently-used eviction 14 | // policy is provided. Clients may use their own implementations if 15 | // they want something more sophisticated (like scan-resistance, a 16 | // custom eviction policy, variable cache sizing, etc.) 17 | 18 | #ifndef STORAGE_LEVELDB_INCLUDE_CACHE_H_ 19 | #define STORAGE_LEVELDB_INCLUDE_CACHE_H_ 20 | 21 | #include 22 | #include "leveldb/slice.h" 23 | 24 | namespace leveldb { 25 | 26 | class Cache; 27 | 28 | // Create a new cache with a fixed size capacity. This implementation 29 | // of Cache uses a least-recently-used eviction policy. 30 | extern Cache* NewLRUCache(size_t capacity); 31 | 32 | class Cache { 33 | public: 34 | Cache() { } 35 | 36 | // Destroys all existing entries by calling the "deleter" 37 | // function that was passed to the constructor. 38 | virtual ~Cache(); 39 | 40 | // Opaque handle to an entry stored in the cache. 41 | struct Handle { }; 42 | 43 | // Insert a mapping from key->value into the cache and assign it 44 | // the specified charge against the total cache capacity. 45 | // 46 | // Returns a handle that corresponds to the mapping. The caller 47 | // must call this->Release(handle) when the returned mapping is no 48 | // longer needed. 49 | // 50 | // When the inserted entry is no longer needed, the key and 51 | // value will be passed to "deleter". 52 | virtual Handle* Insert(const Slice& key, void* value, size_t charge, 53 | void (*deleter)(const Slice& key, void* value)) = 0; 54 | 55 | // If the cache has no mapping for "key", returns NULL. 56 | // 57 | // Else return a handle that corresponds to the mapping. The caller 58 | // must call this->Release(handle) when the returned mapping is no 59 | // longer needed. 60 | virtual Handle* Lookup(const Slice& key) = 0; 61 | 62 | // Release a mapping returned by a previous Lookup(). 63 | // REQUIRES: handle must not have been released yet. 64 | // REQUIRES: handle must have been returned by a method on *this. 65 | virtual void Release(Handle* handle) = 0; 66 | 67 | // Return the value encapsulated in a handle returned by a 68 | // successful Lookup(). 69 | // REQUIRES: handle must not have been released yet. 70 | // REQUIRES: handle must have been returned by a method on *this. 71 | virtual void* Value(Handle* handle) = 0; 72 | 73 | // If the cache contains entry for key, erase it. Note that the 74 | // underlying entry will be kept around until all existing handles 75 | // to it have been released. 76 | virtual void Erase(const Slice& key) = 0; 77 | 78 | // Return a new numeric id. May be used by multiple clients who are 79 | // sharing the same cache to partition the key space. Typically the 80 | // client will allocate a new id at startup and prepend the id to 81 | // its cache keys. 82 | virtual uint64_t NewId() = 0; 83 | 84 | private: 85 | void LRU_Remove(Handle* e); 86 | void LRU_Append(Handle* e); 87 | void Unref(Handle* e); 88 | 89 | struct Rep; 90 | Rep* rep_; 91 | 92 | // No copying allowed 93 | Cache(const Cache&); 94 | void operator=(const Cache&); 95 | }; 96 | 97 | } // namespace leveldb 98 | 99 | #endif // STORAGE_LEVELDB_UTIL_CACHE_H_ 100 | -------------------------------------------------------------------------------- /include/leveldb/comparator.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2011 The LevelDB Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style license that can be 3 | // found in the LICENSE file. See the AUTHORS file for names of contributors. 4 | 5 | #ifndef STORAGE_LEVELDB_INCLUDE_COMPARATOR_H_ 6 | #define STORAGE_LEVELDB_INCLUDE_COMPARATOR_H_ 7 | 8 | #include 9 | 10 | namespace leveldb { 11 | 12 | class Slice; 13 | 14 | // A Comparator object provides a total order across slices that are 15 | // used as keys in an sstable or a database. A Comparator implementation 16 | // must be thread-safe since leveldb may invoke its methods concurrently 17 | // from multiple threads. 18 | class Comparator { 19 | public: 20 | virtual ~Comparator(); 21 | 22 | // Three-way comparison. Returns value: 23 | // < 0 iff "a" < "b", 24 | // == 0 iff "a" == "b", 25 | // > 0 iff "a" > "b" 26 | virtual int Compare(const Slice& a, const Slice& b) const = 0; 27 | 28 | // The name of the comparator. Used to check for comparator 29 | // mismatches (i.e., a DB created with one comparator is 30 | // accessed using a different comparator. 31 | // 32 | // The client of this package should switch to a new name whenever 33 | // the comparator implementation changes in a way that will cause 34 | // the relative ordering of any two keys to change. 35 | // 36 | // Names starting with "leveldb." are reserved and should not be used 37 | // by any clients of this package. 38 | virtual const char* Name() const = 0; 39 | 40 | // Advanced functions: these are used to reduce the space requirements 41 | // for internal data structures like index blocks. 42 | 43 | // If *start < limit, changes *start to a short string in [start,limit). 44 | // Simple comparator implementations may return with *start unchanged, 45 | // i.e., an implementation of this method that does nothing is correct. 46 | virtual void FindShortestSeparator( 47 | std::string* start, 48 | const Slice& limit) const = 0; 49 | 50 | // Changes *key to a short string >= *key. 51 | // Simple comparator implementations may return with *key unchanged, 52 | // i.e., an implementation of this method that does nothing is correct. 53 | virtual void FindShortSuccessor(std::string* key) const = 0; 54 | }; 55 | 56 | // Return a builtin comparator that uses lexicographic byte-wise 57 | // ordering. The result remains the property of this module and 58 | // must not be deleted. 59 | extern const Comparator* BytewiseComparator(); 60 | 61 | } // namespace leveldb 62 | 63 | #endif // STORAGE_LEVELDB_INCLUDE_COMPARATOR_H_ 64 | -------------------------------------------------------------------------------- /include/leveldb/db.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2011 The LevelDB Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style license that can be 3 | // found in the LICENSE file. See the AUTHORS file for names of contributors. 4 | 5 | #ifndef STORAGE_LEVELDB_INCLUDE_DB_H_ 6 | #define STORAGE_LEVELDB_INCLUDE_DB_H_ 7 | 8 | #include 9 | #include 10 | #include "leveldb/iterator.h" 11 | #include "leveldb/options.h" 12 | 13 | namespace leveldb { 14 | 15 | // Update Makefile if you change these 16 | static const int kMajorVersion = 1; 17 | static const int kMinorVersion = 16; 18 | 19 | struct Options; 20 | struct ReadOptions; 21 | struct WriteOptions; 22 | class WriteBatch; 23 | 24 | // Abstract handle to particular state of a DB. 25 | // A Snapshot is an immutable object and can therefore be safely 26 | // accessed from multiple threads without any external synchronization. 27 | class Snapshot { 28 | protected: 29 | virtual ~Snapshot(); 30 | }; 31 | 32 | // A range of keys 33 | struct Range { 34 | Slice start; // Included in the range 35 | Slice limit; // Not included in the range 36 | 37 | Range() { } 38 | Range(const Slice& s, const Slice& l) : start(s), limit(l) { } 39 | }; 40 | 41 | // A DB is a persistent ordered map from keys to values. 42 | // A DB is safe for concurrent access from multiple threads without 43 | // any external synchronization. 44 | class DB { 45 | public: 46 | // Open the database with the specified "name". 47 | // Stores a pointer to a heap-allocated database in *dbptr and returns 48 | // OK on success. 49 | // Stores NULL in *dbptr and returns a non-OK status on error. 50 | // Caller should delete *dbptr when it is no longer needed. 51 | static Status Open(const Options& options, 52 | const std::string& name, 53 | DB** dbptr); 54 | 55 | DB() { } 56 | virtual ~DB(); 57 | 58 | // Set the database entry for "key" to "value". Returns OK on success, 59 | // and a non-OK status on error. 60 | // Note: consider setting options.sync = true. 61 | virtual Status Put(const WriteOptions& options, 62 | const Slice& key, 63 | const Slice& value) = 0; 64 | 65 | // Remove the database entry (if any) for "key". Returns OK on 66 | // success, and a non-OK status on error. It is not an error if "key" 67 | // did not exist in the database. 68 | // Note: consider setting options.sync = true. 69 | virtual Status Delete(const WriteOptions& options, const Slice& key) = 0; 70 | 71 | // Apply the specified updates to the database. 72 | // Returns OK on success, non-OK on failure. 73 | // Note: consider setting options.sync = true. 74 | virtual Status Write(const WriteOptions& options, WriteBatch* updates) = 0; 75 | 76 | // If the database contains an entry for "key" store the 77 | // corresponding value in *value and return OK. 78 | // 79 | // If there is no entry for "key" leave *value unchanged and return 80 | // a status for which Status::IsNotFound() returns true. 81 | // 82 | // May return some other Status on an error. 83 | virtual Status Get(const ReadOptions& options, 84 | const Slice& key, std::string* value) = 0; 85 | 86 | // Return a heap-allocated iterator over the contents of the database. 87 | // The result of NewIterator() is initially invalid (caller must 88 | // call one of the Seek methods on the iterator before using it). 89 | // 90 | // Caller should delete the iterator when it is no longer needed. 91 | // The returned iterator should be deleted before this db is deleted. 92 | virtual Iterator* NewIterator(const ReadOptions& options) = 0; 93 | 94 | // Return a handle to the current DB state. Iterators created with 95 | // this handle will all observe a stable snapshot of the current DB 96 | // state. The caller must call ReleaseSnapshot(result) when the 97 | // snapshot is no longer needed. 98 | virtual const Snapshot* GetSnapshot() = 0; 99 | 100 | // Release a previously acquired snapshot. The caller must not 101 | // use "snapshot" after this call. 102 | virtual void ReleaseSnapshot(const Snapshot* snapshot) = 0; 103 | 104 | // DB implementations can export properties about their state 105 | // via this method. If "property" is a valid property understood by this 106 | // DB implementation, fills "*value" with its current value and returns 107 | // true. Otherwise returns false. 108 | // 109 | // 110 | // Valid property names include: 111 | // 112 | // "leveldb.num-files-at-level" - return the number of files at level , 113 | // where is an ASCII representation of a level number (e.g. "0"). 114 | // "leveldb.stats" - returns a multi-line string that describes statistics 115 | // about the internal operation of the DB. 116 | // "leveldb.sstables" - returns a multi-line string that describes all 117 | // of the sstables that make up the db contents. 118 | virtual bool GetProperty(const Slice& property, std::string* value) = 0; 119 | 120 | // For each i in [0,n-1], store in "sizes[i]", the approximate 121 | // file system space used by keys in "[range[i].start .. range[i].limit)". 122 | // 123 | // Note that the returned sizes measure file system space usage, so 124 | // if the user data compresses by a factor of ten, the returned 125 | // sizes will be one-tenth the size of the corresponding user data size. 126 | // 127 | // The results may not include the sizes of recently written data. 128 | virtual void GetApproximateSizes(const Range* range, int n, 129 | uint64_t* sizes) = 0; 130 | 131 | // Compact the underlying storage for the key range [*begin,*end]. 132 | // In particular, deleted and overwritten versions are discarded, 133 | // and the data is rearranged to reduce the cost of operations 134 | // needed to access the data. This operation should typically only 135 | // be invoked by users who understand the underlying implementation. 136 | // 137 | // begin==NULL is treated as a key before all keys in the database. 138 | // end==NULL is treated as a key after all keys in the database. 139 | // Therefore the following call will compact the entire database: 140 | // db->CompactRange(NULL, NULL); 141 | virtual void CompactRange(const Slice* begin, const Slice* end) = 0; 142 | 143 | private: 144 | // No copying allowed 145 | DB(const DB&); 146 | void operator=(const DB&); 147 | }; 148 | 149 | // Destroy the contents of the specified database. 150 | // Be very careful using this method. 151 | Status DestroyDB(const std::string& name, const Options& options); 152 | 153 | // If a DB cannot be opened, you may attempt to call this method to 154 | // resurrect as much of the contents of the database as possible. 155 | // Some data may be lost, so be careful when calling this function 156 | // on a database that contains important information. 157 | Status RepairDB(const std::string& dbname, const Options& options); 158 | 159 | } // namespace leveldb 160 | 161 | #endif // STORAGE_LEVELDB_INCLUDE_DB_H_ 162 | -------------------------------------------------------------------------------- /include/leveldb/env.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2011 The LevelDB Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style license that can be 3 | // found in the LICENSE file. See the AUTHORS file for names of contributors. 4 | // 5 | // An Env is an interface used by the leveldb implementation to access 6 | // operating system functionality like the filesystem etc. Callers 7 | // may wish to provide a custom Env object when opening a database to 8 | // get fine gain control; e.g., to rate limit file system operations. 9 | // 10 | // All Env implementations are safe for concurrent access from 11 | // multiple threads without any external synchronization. 12 | 13 | #ifndef STORAGE_LEVELDB_INCLUDE_ENV_H_ 14 | #define STORAGE_LEVELDB_INCLUDE_ENV_H_ 15 | 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include "leveldb/status.h" 21 | 22 | namespace leveldb { 23 | 24 | class FileLock; 25 | class Logger; 26 | class RandomAccessFile; 27 | class SequentialFile; 28 | class Slice; 29 | class WritableFile; 30 | 31 | class Env { 32 | public: 33 | Env() { } 34 | virtual ~Env(); 35 | 36 | // Return a default environment suitable for the current operating 37 | // system. Sophisticated users may wish to provide their own Env 38 | // implementation instead of relying on this default environment. 39 | // 40 | // The result of Default() belongs to leveldb and must never be deleted. 41 | static Env* Default(); 42 | 43 | // Create a brand new sequentially-readable file with the specified name. 44 | // On success, stores a pointer to the new file in *result and returns OK. 45 | // On failure stores NULL in *result and returns non-OK. If the file does 46 | // not exist, returns a non-OK status. 47 | // 48 | // The returned file will only be accessed by one thread at a time. 49 | virtual Status NewSequentialFile(const std::string& fname, 50 | SequentialFile** result) = 0; 51 | 52 | // Create a brand new random access read-only file with the 53 | // specified name. On success, stores a pointer to the new file in 54 | // *result and returns OK. On failure stores NULL in *result and 55 | // returns non-OK. If the file does not exist, returns a non-OK 56 | // status. 57 | // 58 | // The returned file may be concurrently accessed by multiple threads. 59 | virtual Status NewRandomAccessFile(const std::string& fname, 60 | RandomAccessFile** result) = 0; 61 | 62 | // Create an object that writes to a new file with the specified 63 | // name. Deletes any existing file with the same name and creates a 64 | // new file. On success, stores a pointer to the new file in 65 | // *result and returns OK. On failure stores NULL in *result and 66 | // returns non-OK. 67 | // 68 | // The returned file will only be accessed by one thread at a time. 69 | virtual Status NewWritableFile(const std::string& fname, 70 | WritableFile** result) = 0; 71 | 72 | // Returns true iff the named file exists. 73 | virtual bool FileExists(const std::string& fname) = 0; 74 | 75 | // Store in *result the names of the children of the specified directory. 76 | // The names are relative to "dir". 77 | // Original contents of *results are dropped. 78 | virtual Status GetChildren(const std::string& dir, 79 | std::vector* result) = 0; 80 | 81 | // Delete the named file. 82 | virtual Status DeleteFile(const std::string& fname) = 0; 83 | 84 | // Create the specified directory. 85 | virtual Status CreateDir(const std::string& dirname) = 0; 86 | 87 | // Delete the specified directory. 88 | virtual Status DeleteDir(const std::string& dirname) = 0; 89 | 90 | // Store the size of fname in *file_size. 91 | virtual Status GetFileSize(const std::string& fname, uint64_t* file_size) = 0; 92 | 93 | // Rename file src to target. 94 | virtual Status RenameFile(const std::string& src, 95 | const std::string& target) = 0; 96 | 97 | // Lock the specified file. Used to prevent concurrent access to 98 | // the same db by multiple processes. On failure, stores NULL in 99 | // *lock and returns non-OK. 100 | // 101 | // On success, stores a pointer to the object that represents the 102 | // acquired lock in *lock and returns OK. The caller should call 103 | // UnlockFile(*lock) to release the lock. If the process exits, 104 | // the lock will be automatically released. 105 | // 106 | // If somebody else already holds the lock, finishes immediately 107 | // with a failure. I.e., this call does not wait for existing locks 108 | // to go away. 109 | // 110 | // May create the named file if it does not already exist. 111 | virtual Status LockFile(const std::string& fname, FileLock** lock) = 0; 112 | 113 | // Release the lock acquired by a previous successful call to LockFile. 114 | // REQUIRES: lock was returned by a successful LockFile() call 115 | // REQUIRES: lock has not already been unlocked. 116 | virtual Status UnlockFile(FileLock* lock) = 0; 117 | 118 | // Arrange to run "(*function)(arg)" once in a background thread. 119 | // 120 | // "function" may run in an unspecified thread. Multiple functions 121 | // added to the same Env may run concurrently in different threads. 122 | // I.e., the caller may not assume that background work items are 123 | // serialized. 124 | virtual void Schedule( 125 | void (*function)(void* arg), 126 | void* arg) = 0; 127 | 128 | // Start a new thread, invoking "function(arg)" within the new thread. 129 | // When "function(arg)" returns, the thread will be destroyed. 130 | virtual void StartThread(void (*function)(void* arg), void* arg) = 0; 131 | 132 | // *path is set to a temporary directory that can be used for testing. It may 133 | // or many not have just been created. The directory may or may not differ 134 | // between runs of the same process, but subsequent calls will return the 135 | // same directory. 136 | virtual Status GetTestDirectory(std::string* path) = 0; 137 | 138 | // Create and return a log file for storing informational messages. 139 | virtual Status NewLogger(const std::string& fname, Logger** result) = 0; 140 | 141 | // Returns the number of micro-seconds since some fixed point in time. Only 142 | // useful for computing deltas of time. 143 | virtual uint64_t NowMicros() = 0; 144 | 145 | // Sleep/delay the thread for the perscribed number of micro-seconds. 146 | virtual void SleepForMicroseconds(int micros) = 0; 147 | 148 | private: 149 | // No copying allowed 150 | Env(const Env&); 151 | void operator=(const Env&); 152 | }; 153 | 154 | // A file abstraction for reading sequentially through a file 155 | class SequentialFile { 156 | public: 157 | SequentialFile() { } 158 | virtual ~SequentialFile(); 159 | 160 | // Read up to "n" bytes from the file. "scratch[0..n-1]" may be 161 | // written by this routine. Sets "*result" to the data that was 162 | // read (including if fewer than "n" bytes were successfully read). 163 | // May set "*result" to point at data in "scratch[0..n-1]", so 164 | // "scratch[0..n-1]" must be live when "*result" is used. 165 | // If an error was encountered, returns a non-OK status. 166 | // 167 | // REQUIRES: External synchronization 168 | virtual Status Read(size_t n, Slice* result, char* scratch) = 0; 169 | 170 | // Skip "n" bytes from the file. This is guaranteed to be no 171 | // slower that reading the same data, but may be faster. 172 | // 173 | // If end of file is reached, skipping will stop at the end of the 174 | // file, and Skip will return OK. 175 | // 176 | // REQUIRES: External synchronization 177 | virtual Status Skip(uint64_t n) = 0; 178 | 179 | private: 180 | // No copying allowed 181 | SequentialFile(const SequentialFile&); 182 | void operator=(const SequentialFile&); 183 | }; 184 | 185 | // A file abstraction for randomly reading the contents of a file. 186 | class RandomAccessFile { 187 | public: 188 | RandomAccessFile() { } 189 | virtual ~RandomAccessFile(); 190 | 191 | // Read up to "n" bytes from the file starting at "offset". 192 | // "scratch[0..n-1]" may be written by this routine. Sets "*result" 193 | // to the data that was read (including if fewer than "n" bytes were 194 | // successfully read). May set "*result" to point at data in 195 | // "scratch[0..n-1]", so "scratch[0..n-1]" must be live when 196 | // "*result" is used. If an error was encountered, returns a non-OK 197 | // status. 198 | // 199 | // Safe for concurrent use by multiple threads. 200 | virtual Status Read(uint64_t offset, size_t n, Slice* result, 201 | char* scratch) const = 0; 202 | 203 | private: 204 | // No copying allowed 205 | RandomAccessFile(const RandomAccessFile&); 206 | void operator=(const RandomAccessFile&); 207 | }; 208 | 209 | // A file abstraction for sequential writing. The implementation 210 | // must provide buffering since callers may append small fragments 211 | // at a time to the file. 212 | class WritableFile { 213 | public: 214 | WritableFile() { } 215 | virtual ~WritableFile(); 216 | 217 | virtual Status Append(const Slice& data) = 0; 218 | virtual Status Close() = 0; 219 | virtual Status Flush() = 0; 220 | virtual Status Sync() = 0; 221 | 222 | private: 223 | // No copying allowed 224 | WritableFile(const WritableFile&); 225 | void operator=(const WritableFile&); 226 | }; 227 | 228 | // An interface for writing log messages. 229 | class Logger { 230 | public: 231 | Logger() { } 232 | virtual ~Logger(); 233 | 234 | // Write an entry to the log file with the specified format. 235 | virtual void Logv(const char* format, va_list ap) = 0; 236 | 237 | private: 238 | // No copying allowed 239 | Logger(const Logger&); 240 | void operator=(const Logger&); 241 | }; 242 | 243 | 244 | // Identifies a locked file. 245 | class FileLock { 246 | public: 247 | FileLock() { } 248 | virtual ~FileLock(); 249 | private: 250 | // No copying allowed 251 | FileLock(const FileLock&); 252 | void operator=(const FileLock&); 253 | }; 254 | 255 | // Log the specified data to *info_log if info_log is non-NULL. 256 | extern void Log(Logger* info_log, const char* format, ...) 257 | # if defined(__GNUC__) || defined(__clang__) 258 | __attribute__((__format__ (__printf__, 2, 3))) 259 | # endif 260 | ; 261 | 262 | // A utility routine: write "data" to the named file. 263 | extern Status WriteStringToFile(Env* env, const Slice& data, 264 | const std::string& fname); 265 | 266 | // A utility routine: read contents of named file into *data 267 | extern Status ReadFileToString(Env* env, const std::string& fname, 268 | std::string* data); 269 | 270 | // An implementation of Env that forwards all calls to another Env. 271 | // May be useful to clients who wish to override just part of the 272 | // functionality of another Env. 273 | class EnvWrapper : public Env { 274 | public: 275 | // Initialize an EnvWrapper that delegates all calls to *t 276 | explicit EnvWrapper(Env* t) : target_(t) { } 277 | virtual ~EnvWrapper(); 278 | 279 | // Return the target to which this Env forwards all calls 280 | Env* target() const { return target_; } 281 | 282 | // The following text is boilerplate that forwards all methods to target() 283 | Status NewSequentialFile(const std::string& f, SequentialFile** r) { 284 | return target_->NewSequentialFile(f, r); 285 | } 286 | Status NewRandomAccessFile(const std::string& f, RandomAccessFile** r) { 287 | return target_->NewRandomAccessFile(f, r); 288 | } 289 | Status NewWritableFile(const std::string& f, WritableFile** r) { 290 | return target_->NewWritableFile(f, r); 291 | } 292 | bool FileExists(const std::string& f) { return target_->FileExists(f); } 293 | Status GetChildren(const std::string& dir, std::vector* r) { 294 | return target_->GetChildren(dir, r); 295 | } 296 | Status DeleteFile(const std::string& f) { return target_->DeleteFile(f); } 297 | Status CreateDir(const std::string& d) { return target_->CreateDir(d); } 298 | Status DeleteDir(const std::string& d) { return target_->DeleteDir(d); } 299 | Status GetFileSize(const std::string& f, uint64_t* s) { 300 | return target_->GetFileSize(f, s); 301 | } 302 | Status RenameFile(const std::string& s, const std::string& t) { 303 | return target_->RenameFile(s, t); 304 | } 305 | Status LockFile(const std::string& f, FileLock** l) { 306 | return target_->LockFile(f, l); 307 | } 308 | Status UnlockFile(FileLock* l) { return target_->UnlockFile(l); } 309 | void Schedule(void (*f)(void*), void* a) { 310 | return target_->Schedule(f, a); 311 | } 312 | void StartThread(void (*f)(void*), void* a) { 313 | return target_->StartThread(f, a); 314 | } 315 | virtual Status GetTestDirectory(std::string* path) { 316 | return target_->GetTestDirectory(path); 317 | } 318 | virtual Status NewLogger(const std::string& fname, Logger** result) { 319 | return target_->NewLogger(fname, result); 320 | } 321 | uint64_t NowMicros() { 322 | return target_->NowMicros(); 323 | } 324 | void SleepForMicroseconds(int micros) { 325 | target_->SleepForMicroseconds(micros); 326 | } 327 | private: 328 | Env* target_; 329 | }; 330 | 331 | } // namespace leveldb 332 | 333 | #endif // STORAGE_LEVELDB_INCLUDE_ENV_H_ 334 | -------------------------------------------------------------------------------- /include/leveldb/filter_policy.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2012 The LevelDB Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style license that can be 3 | // found in the LICENSE file. See the AUTHORS file for names of contributors. 4 | // 5 | // A database can be configured with a custom FilterPolicy object. 6 | // This object is responsible for creating a small filter from a set 7 | // of keys. These filters are stored in leveldb and are consulted 8 | // automatically by leveldb to decide whether or not to read some 9 | // information from disk. In many cases, a filter can cut down the 10 | // number of disk seeks form a handful to a single disk seek per 11 | // DB::Get() call. 12 | // 13 | // Most people will want to use the builtin bloom filter support (see 14 | // NewBloomFilterPolicy() below). 15 | 16 | #ifndef STORAGE_LEVELDB_INCLUDE_FILTER_POLICY_H_ 17 | #define STORAGE_LEVELDB_INCLUDE_FILTER_POLICY_H_ 18 | 19 | #include 20 | 21 | namespace leveldb { 22 | 23 | class Slice; 24 | 25 | class FilterPolicy { 26 | public: 27 | virtual ~FilterPolicy(); 28 | 29 | // Return the name of this policy. Note that if the filter encoding 30 | // changes in an incompatible way, the name returned by this method 31 | // must be changed. Otherwise, old incompatible filters may be 32 | // passed to methods of this type. 33 | virtual const char* Name() const = 0; 34 | 35 | // keys[0,n-1] contains a list of keys (potentially with duplicates) 36 | // that are ordered according to the user supplied comparator. 37 | // Append a filter that summarizes keys[0,n-1] to *dst. 38 | // 39 | // Warning: do not change the initial contents of *dst. Instead, 40 | // append the newly constructed filter to *dst. 41 | virtual void CreateFilter(const Slice* keys, int n, std::string* dst) 42 | const = 0; 43 | 44 | // "filter" contains the data appended by a preceding call to 45 | // CreateFilter() on this class. This method must return true if 46 | // the key was in the list of keys passed to CreateFilter(). 47 | // This method may return true or false if the key was not on the 48 | // list, but it should aim to return false with a high probability. 49 | virtual bool KeyMayMatch(const Slice& key, const Slice& filter) const = 0; 50 | }; 51 | 52 | // Return a new filter policy that uses a bloom filter with approximately 53 | // the specified number of bits per key. A good value for bits_per_key 54 | // is 10, which yields a filter with ~ 1% false positive rate. 55 | // 56 | // Callers must delete the result after any database that is using the 57 | // result has been closed. 58 | // 59 | // Note: if you are using a custom comparator that ignores some parts 60 | // of the keys being compared, you must not use NewBloomFilterPolicy() 61 | // and must provide your own FilterPolicy that also ignores the 62 | // corresponding parts of the keys. For example, if the comparator 63 | // ignores trailing spaces, it would be incorrect to use a 64 | // FilterPolicy (like NewBloomFilterPolicy) that does not ignore 65 | // trailing spaces in keys. 66 | extern const FilterPolicy* NewBloomFilterPolicy(int bits_per_key); 67 | 68 | } 69 | 70 | #endif // STORAGE_LEVELDB_INCLUDE_FILTER_POLICY_H_ 71 | -------------------------------------------------------------------------------- /include/leveldb/iterator.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2011 The LevelDB Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style license that can be 3 | // found in the LICENSE file. See the AUTHORS file for names of contributors. 4 | // 5 | // An iterator yields a sequence of key/value pairs from a source. 6 | // The following class defines the interface. Multiple implementations 7 | // are provided by this library. In particular, iterators are provided 8 | // to access the contents of a Table or a DB. 9 | // 10 | // Multiple threads can invoke const methods on an Iterator without 11 | // external synchronization, but if any of the threads may call a 12 | // non-const method, all threads accessing the same Iterator must use 13 | // external synchronization. 14 | 15 | #ifndef STORAGE_LEVELDB_INCLUDE_ITERATOR_H_ 16 | #define STORAGE_LEVELDB_INCLUDE_ITERATOR_H_ 17 | 18 | #include "leveldb/slice.h" 19 | #include "leveldb/status.h" 20 | 21 | namespace leveldb { 22 | 23 | class Iterator { 24 | public: 25 | Iterator(); 26 | virtual ~Iterator(); 27 | 28 | // An iterator is either positioned at a key/value pair, or 29 | // not valid. This method returns true iff the iterator is valid. 30 | virtual bool Valid() const = 0; 31 | 32 | // Position at the first key in the source. The iterator is Valid() 33 | // after this call iff the source is not empty. 34 | virtual void SeekToFirst() = 0; 35 | 36 | // Position at the last key in the source. The iterator is 37 | // Valid() after this call iff the source is not empty. 38 | virtual void SeekToLast() = 0; 39 | 40 | // Position at the first key in the source that at or past target 41 | // The iterator is Valid() after this call iff the source contains 42 | // an entry that comes at or past target. 43 | virtual void Seek(const Slice& target) = 0; 44 | 45 | // Moves to the next entry in the source. After this call, Valid() is 46 | // true iff the iterator was not positioned at the last entry in the source. 47 | // REQUIRES: Valid() 48 | virtual void Next() = 0; 49 | 50 | // Moves to the previous entry in the source. After this call, Valid() is 51 | // true iff the iterator was not positioned at the first entry in source. 52 | // REQUIRES: Valid() 53 | virtual void Prev() = 0; 54 | 55 | // Return the key for the current entry. The underlying storage for 56 | // the returned slice is valid only until the next modification of 57 | // the iterator. 58 | // REQUIRES: Valid() 59 | virtual Slice key() const = 0; 60 | 61 | // Return the value for the current entry. The underlying storage for 62 | // the returned slice is valid only until the next modification of 63 | // the iterator. 64 | // REQUIRES: !AtEnd() && !AtStart() 65 | virtual Slice value() const = 0; 66 | 67 | // If an error has occurred, return it. Else return an ok status. 68 | virtual Status status() const = 0; 69 | 70 | // Clients are allowed to register function/arg1/arg2 triples that 71 | // will be invoked when this iterator is destroyed. 72 | // 73 | // Note that unlike all of the preceding methods, this method is 74 | // not abstract and therefore clients should not override it. 75 | typedef void (*CleanupFunction)(void* arg1, void* arg2); 76 | void RegisterCleanup(CleanupFunction function, void* arg1, void* arg2); 77 | 78 | private: 79 | struct Cleanup { 80 | CleanupFunction function; 81 | void* arg1; 82 | void* arg2; 83 | Cleanup* next; 84 | }; 85 | Cleanup cleanup_; 86 | 87 | // No copying allowed 88 | Iterator(const Iterator&); 89 | void operator=(const Iterator&); 90 | }; 91 | 92 | // Return an empty iterator (yields nothing). 93 | extern Iterator* NewEmptyIterator(); 94 | 95 | // Return an empty iterator with the specified status. 96 | extern Iterator* NewErrorIterator(const Status& status); 97 | 98 | } // namespace leveldb 99 | 100 | #endif // STORAGE_LEVELDB_INCLUDE_ITERATOR_H_ 101 | -------------------------------------------------------------------------------- /include/leveldb/options.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2011 The LevelDB Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style license that can be 3 | // found in the LICENSE file. See the AUTHORS file for names of contributors. 4 | 5 | #ifndef STORAGE_LEVELDB_INCLUDE_OPTIONS_H_ 6 | #define STORAGE_LEVELDB_INCLUDE_OPTIONS_H_ 7 | 8 | #include 9 | 10 | namespace leveldb { 11 | 12 | class Cache; 13 | class Comparator; 14 | class Env; 15 | class FilterPolicy; 16 | class Logger; 17 | class Snapshot; 18 | 19 | // DB contents are stored in a set of blocks, each of which holds a 20 | // sequence of key,value pairs. Each block may be compressed before 21 | // being stored in a file. The following enum describes which 22 | // compression method (if any) is used to compress a block. 23 | enum CompressionType { 24 | // NOTE: do not change the values of existing entries, as these are 25 | // part of the persistent format on disk. 26 | kNoCompression = 0x0, 27 | kSnappyCompression = 0x1 28 | }; 29 | 30 | // Options to control the behavior of a database (passed to DB::Open) 31 | struct Options { 32 | // ------------------- 33 | // Parameters that affect behavior 34 | 35 | // Comparator used to define the order of keys in the table. 36 | // Default: a comparator that uses lexicographic byte-wise ordering 37 | // 38 | // REQUIRES: The client must ensure that the comparator supplied 39 | // here has the same name and orders keys *exactly* the same as the 40 | // comparator provided to previous open calls on the same DB. 41 | const Comparator* comparator; 42 | 43 | // If true, the database will be created if it is missing. 44 | // Default: false 45 | bool create_if_missing; 46 | 47 | // If true, an error is raised if the database already exists. 48 | // Default: false 49 | bool error_if_exists; 50 | 51 | // If true, the implementation will do aggressive checking of the 52 | // data it is processing and will stop early if it detects any 53 | // errors. This may have unforeseen ramifications: for example, a 54 | // corruption of one DB entry may cause a large number of entries to 55 | // become unreadable or for the entire DB to become unopenable. 56 | // Default: false 57 | bool paranoid_checks; 58 | 59 | // Use the specified object to interact with the environment, 60 | // e.g. to read/write files, schedule background work, etc. 61 | // Default: Env::Default() 62 | Env* env; 63 | 64 | // Any internal progress/error information generated by the db will 65 | // be written to info_log if it is non-NULL, or to a file stored 66 | // in the same directory as the DB contents if info_log is NULL. 67 | // Default: NULL 68 | Logger* info_log; 69 | 70 | // ------------------- 71 | // Parameters that affect performance 72 | 73 | // Amount of data to build up in memory (backed by an unsorted log 74 | // on disk) before converting to a sorted on-disk file. 75 | // 76 | // Larger values increase performance, especially during bulk loads. 77 | // Up to two write buffers may be held in memory at the same time, 78 | // so you may wish to adjust this parameter to control memory usage. 79 | // Also, a larger write buffer will result in a longer recovery time 80 | // the next time the database is opened. 81 | // 82 | // Default: 4MB 83 | size_t write_buffer_size; 84 | 85 | // Number of open files that can be used by the DB. You may need to 86 | // increase this if your database has a large working set (budget 87 | // one open file per 2MB of working set). 88 | // 89 | // Default: 1000 90 | int max_open_files; 91 | 92 | // Control over blocks (user data is stored in a set of blocks, and 93 | // a block is the unit of reading from disk). 94 | 95 | // If non-NULL, use the specified cache for blocks. 96 | // If NULL, leveldb will automatically create and use an 8MB internal cache. 97 | // Default: NULL 98 | Cache* block_cache; 99 | 100 | // Approximate size of user data packed per block. Note that the 101 | // block size specified here corresponds to uncompressed data. The 102 | // actual size of the unit read from disk may be smaller if 103 | // compression is enabled. This parameter can be changed dynamically. 104 | // 105 | // Default: 4K 106 | size_t block_size; 107 | 108 | // Number of keys between restart points for delta encoding of keys. 109 | // This parameter can be changed dynamically. Most clients should 110 | // leave this parameter alone. 111 | // 112 | // Default: 16 113 | int block_restart_interval; 114 | 115 | // Compress blocks using the specified compression algorithm. This 116 | // parameter can be changed dynamically. 117 | // 118 | // Default: kSnappyCompression, which gives lightweight but fast 119 | // compression. 120 | // 121 | // Typical speeds of kSnappyCompression on an Intel(R) Core(TM)2 2.4GHz: 122 | // ~200-500MB/s compression 123 | // ~400-800MB/s decompression 124 | // Note that these speeds are significantly faster than most 125 | // persistent storage speeds, and therefore it is typically never 126 | // worth switching to kNoCompression. Even if the input data is 127 | // incompressible, the kSnappyCompression implementation will 128 | // efficiently detect that and will switch to uncompressed mode. 129 | CompressionType compression; 130 | 131 | // If non-NULL, use the specified filter policy to reduce disk reads. 132 | // Many applications will benefit from passing the result of 133 | // NewBloomFilterPolicy() here. 134 | // 135 | // Default: NULL 136 | const FilterPolicy* filter_policy; 137 | 138 | // Create an Options object with default values for all fields. 139 | Options(); 140 | }; 141 | 142 | // Options that control read operations 143 | struct ReadOptions { 144 | // If true, all data read from underlying storage will be 145 | // verified against corresponding checksums. 146 | // Default: false 147 | bool verify_checksums; 148 | 149 | // Should the data read for this iteration be cached in memory? 150 | // Callers may wish to set this field to false for bulk scans. 151 | // Default: true 152 | bool fill_cache; 153 | 154 | // If "snapshot" is non-NULL, read as of the supplied snapshot 155 | // (which must belong to the DB that is being read and which must 156 | // not have been released). If "snapshot" is NULL, use an impliicit 157 | // snapshot of the state at the beginning of this read operation. 158 | // Default: NULL 159 | const Snapshot* snapshot; 160 | 161 | ReadOptions() 162 | : verify_checksums(false), 163 | fill_cache(true), 164 | snapshot(NULL) { 165 | } 166 | }; 167 | 168 | // Options that control write operations 169 | struct WriteOptions { 170 | // If true, the write will be flushed from the operating system 171 | // buffer cache (by calling WritableFile::Sync()) before the write 172 | // is considered complete. If this flag is true, writes will be 173 | // slower. 174 | // 175 | // If this flag is false, and the machine crashes, some recent 176 | // writes may be lost. Note that if it is just the process that 177 | // crashes (i.e., the machine does not reboot), no writes will be 178 | // lost even if sync==false. 179 | // 180 | // In other words, a DB write with sync==false has similar 181 | // crash semantics as the "write()" system call. A DB write 182 | // with sync==true has similar crash semantics to a "write()" 183 | // system call followed by "fsync()". 184 | // 185 | // Default: false 186 | bool sync; 187 | 188 | WriteOptions() 189 | : sync(false) { 190 | } 191 | }; 192 | 193 | } // namespace leveldb 194 | 195 | #endif // STORAGE_LEVELDB_INCLUDE_OPTIONS_H_ 196 | -------------------------------------------------------------------------------- /include/leveldb/slice.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2011 The LevelDB Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style license that can be 3 | // found in the LICENSE file. See the AUTHORS file for names of contributors. 4 | // 5 | // Slice is a simple structure containing a pointer into some external 6 | // storage and a size. The user of a Slice must ensure that the slice 7 | // is not used after the corresponding external storage has been 8 | // deallocated. 9 | // 10 | // Multiple threads can invoke const methods on a Slice without 11 | // external synchronization, but if any of the threads may call a 12 | // non-const method, all threads accessing the same Slice must use 13 | // external synchronization. 14 | 15 | #ifndef STORAGE_LEVELDB_INCLUDE_SLICE_H_ 16 | #define STORAGE_LEVELDB_INCLUDE_SLICE_H_ 17 | 18 | #include 19 | #include 20 | #include 21 | #include 22 | 23 | namespace leveldb { 24 | 25 | class Slice { 26 | public: 27 | // Create an empty slice. 28 | Slice() : data_(""), size_(0) { } 29 | 30 | // Create a slice that refers to d[0,n-1]. 31 | Slice(const char* d, size_t n) : data_(d), size_(n) { } 32 | 33 | // Create a slice that refers to the contents of "s" 34 | Slice(const std::string& s) : data_(s.data()), size_(s.size()) { } 35 | 36 | // Create a slice that refers to s[0,strlen(s)-1] 37 | Slice(const char* s) : data_(s), size_(strlen(s)) { } 38 | 39 | // Return a pointer to the beginning of the referenced data 40 | const char* data() const { return data_; } 41 | 42 | // Return the length (in bytes) of the referenced data 43 | size_t size() const { return size_; } 44 | 45 | // Return true iff the length of the referenced data is zero 46 | bool empty() const { return size_ == 0; } 47 | 48 | // Return the ith byte in the referenced data. 49 | // REQUIRES: n < size() 50 | char operator[](size_t n) const { 51 | assert(n < size()); 52 | return data_[n]; 53 | } 54 | 55 | // Change this slice to refer to an empty array 56 | void clear() { data_ = ""; size_ = 0; } 57 | 58 | // Drop the first "n" bytes from this slice. 59 | void remove_prefix(size_t n) { 60 | assert(n <= size()); 61 | data_ += n; 62 | size_ -= n; 63 | } 64 | 65 | // Return a string that contains the copy of the referenced data. 66 | std::string ToString() const { return std::string(data_, size_); } 67 | 68 | // Three-way comparison. Returns value: 69 | // < 0 iff "*this" < "b", 70 | // == 0 iff "*this" == "b", 71 | // > 0 iff "*this" > "b" 72 | int compare(const Slice& b) const; 73 | 74 | // Return true iff "x" is a prefix of "*this" 75 | bool starts_with(const Slice& x) const { 76 | return ((size_ >= x.size_) && 77 | (memcmp(data_, x.data_, x.size_) == 0)); 78 | } 79 | 80 | private: 81 | const char* data_; 82 | size_t size_; 83 | 84 | // Intentionally copyable 85 | }; 86 | 87 | inline bool operator==(const Slice& x, const Slice& y) { 88 | return ((x.size() == y.size()) && 89 | (memcmp(x.data(), y.data(), x.size()) == 0)); 90 | } 91 | 92 | inline bool operator!=(const Slice& x, const Slice& y) { 93 | return !(x == y); 94 | } 95 | 96 | inline int Slice::compare(const Slice& b) const { 97 | const int min_len = (size_ < b.size_) ? size_ : b.size_; 98 | int r = memcmp(data_, b.data_, min_len); 99 | if (r == 0) { 100 | if (size_ < b.size_) r = -1; 101 | else if (size_ > b.size_) r = +1; 102 | } 103 | return r; 104 | } 105 | 106 | } // namespace leveldb 107 | 108 | 109 | #endif // STORAGE_LEVELDB_INCLUDE_SLICE_H_ 110 | -------------------------------------------------------------------------------- /include/leveldb/status.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2011 The LevelDB Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style license that can be 3 | // found in the LICENSE file. See the AUTHORS file for names of contributors. 4 | // 5 | // A Status encapsulates the result of an operation. It may indicate success, 6 | // or it may indicate an error with an associated error message. 7 | // 8 | // Multiple threads can invoke const methods on a Status without 9 | // external synchronization, but if any of the threads may call a 10 | // non-const method, all threads accessing the same Status must use 11 | // external synchronization. 12 | 13 | #ifndef STORAGE_LEVELDB_INCLUDE_STATUS_H_ 14 | #define STORAGE_LEVELDB_INCLUDE_STATUS_H_ 15 | 16 | #include 17 | #include "leveldb/slice.h" 18 | 19 | namespace leveldb { 20 | 21 | class Status { 22 | public: 23 | // Create a success status. 24 | Status() : state_(NULL) { } 25 | ~Status() { delete[] state_; } 26 | 27 | // Copy the specified status. 28 | Status(const Status& s); 29 | void operator=(const Status& s); 30 | 31 | // Return a success status. 32 | static Status OK() { return Status(); } 33 | 34 | // Return error status of an appropriate type. 35 | static Status NotFound(const Slice& msg, const Slice& msg2 = Slice()) { 36 | return Status(kNotFound, msg, msg2); 37 | } 38 | static Status Corruption(const Slice& msg, const Slice& msg2 = Slice()) { 39 | return Status(kCorruption, msg, msg2); 40 | } 41 | static Status NotSupported(const Slice& msg, const Slice& msg2 = Slice()) { 42 | return Status(kNotSupported, msg, msg2); 43 | } 44 | static Status InvalidArgument(const Slice& msg, const Slice& msg2 = Slice()) { 45 | return Status(kInvalidArgument, msg, msg2); 46 | } 47 | static Status IOError(const Slice& msg, const Slice& msg2 = Slice()) { 48 | return Status(kIOError, msg, msg2); 49 | } 50 | 51 | // Returns true iff the status indicates success. 52 | bool ok() const { return (state_ == NULL); } 53 | 54 | // Returns true iff the status indicates a NotFound error. 55 | bool IsNotFound() const { return code() == kNotFound; } 56 | 57 | // Returns true iff the status indicates a Corruption error. 58 | bool IsCorruption() const { return code() == kCorruption; } 59 | 60 | // Returns true iff the status indicates an IOError. 61 | bool IsIOError() const { return code() == kIOError; } 62 | 63 | // Return a string representation of this status suitable for printing. 64 | // Returns the string "OK" for success. 65 | std::string ToString() const; 66 | 67 | private: 68 | // OK status has a NULL state_. Otherwise, state_ is a new[] array 69 | // of the following form: 70 | // state_[0..3] == length of message 71 | // state_[4] == code 72 | // state_[5..] == message 73 | const char* state_; 74 | 75 | enum Code { 76 | kOk = 0, 77 | kNotFound = 1, 78 | kCorruption = 2, 79 | kNotSupported = 3, 80 | kInvalidArgument = 4, 81 | kIOError = 5 82 | }; 83 | 84 | Code code() const { 85 | return (state_ == NULL) ? kOk : static_cast(state_[4]); 86 | } 87 | 88 | Status(Code code, const Slice& msg, const Slice& msg2); 89 | static const char* CopyState(const char* s); 90 | }; 91 | 92 | inline Status::Status(const Status& s) { 93 | state_ = (s.state_ == NULL) ? NULL : CopyState(s.state_); 94 | } 95 | inline void Status::operator=(const Status& s) { 96 | // The following condition catches both aliasing (when this == &s), 97 | // and the common case where both s and *this are ok. 98 | if (state_ != s.state_) { 99 | delete[] state_; 100 | state_ = (s.state_ == NULL) ? NULL : CopyState(s.state_); 101 | } 102 | } 103 | 104 | } // namespace leveldb 105 | 106 | #endif // STORAGE_LEVELDB_INCLUDE_STATUS_H_ 107 | -------------------------------------------------------------------------------- /include/leveldb/table.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2011 The LevelDB Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style license that can be 3 | // found in the LICENSE file. See the AUTHORS file for names of contributors. 4 | 5 | #ifndef STORAGE_LEVELDB_INCLUDE_TABLE_H_ 6 | #define STORAGE_LEVELDB_INCLUDE_TABLE_H_ 7 | 8 | #include 9 | #include "leveldb/iterator.h" 10 | 11 | namespace leveldb { 12 | 13 | class Block; 14 | class BlockHandle; 15 | class Footer; 16 | struct Options; 17 | class RandomAccessFile; 18 | struct ReadOptions; 19 | class TableCache; 20 | 21 | // A Table is a sorted map from strings to strings. Tables are 22 | // immutable and persistent. A Table may be safely accessed from 23 | // multiple threads without external synchronization. 24 | class Table { 25 | public: 26 | // Attempt to open the table that is stored in bytes [0..file_size) 27 | // of "file", and read the metadata entries necessary to allow 28 | // retrieving data from the table. 29 | // 30 | // If successful, returns ok and sets "*table" to the newly opened 31 | // table. The client should delete "*table" when no longer needed. 32 | // If there was an error while initializing the table, sets "*table" 33 | // to NULL and returns a non-ok status. Does not take ownership of 34 | // "*source", but the client must ensure that "source" remains live 35 | // for the duration of the returned table's lifetime. 36 | // 37 | // *file must remain live while this Table is in use. 38 | static Status Open(const Options& options, 39 | RandomAccessFile* file, 40 | uint64_t file_size, 41 | Table** table); 42 | 43 | ~Table(); 44 | 45 | // Returns a new iterator over the table contents. 46 | // The result of NewIterator() is initially invalid (caller must 47 | // call one of the Seek methods on the iterator before using it). 48 | Iterator* NewIterator(const ReadOptions&) const; 49 | 50 | // Given a key, return an approximate byte offset in the file where 51 | // the data for that key begins (or would begin if the key were 52 | // present in the file). The returned value is in terms of file 53 | // bytes, and so includes effects like compression of the underlying data. 54 | // E.g., the approximate offset of the last key in the table will 55 | // be close to the file length. 56 | uint64_t ApproximateOffsetOf(const Slice& key) const; 57 | 58 | private: 59 | struct Rep; 60 | Rep* rep_; 61 | 62 | explicit Table(Rep* rep) { rep_ = rep; } 63 | static Iterator* BlockReader(void*, const ReadOptions&, const Slice&); 64 | 65 | // Calls (*handle_result)(arg, ...) with the entry found after a call 66 | // to Seek(key). May not make such a call if filter policy says 67 | // that key is not present. 68 | friend class TableCache; 69 | Status InternalGet( 70 | const ReadOptions&, const Slice& key, 71 | void* arg, 72 | void (*handle_result)(void* arg, const Slice& k, const Slice& v)); 73 | 74 | 75 | void ReadMeta(const Footer& footer); 76 | void ReadFilter(const Slice& filter_handle_value); 77 | 78 | // No copying allowed 79 | Table(const Table&); 80 | void operator=(const Table&); 81 | }; 82 | 83 | } // namespace leveldb 84 | 85 | #endif // STORAGE_LEVELDB_INCLUDE_TABLE_H_ 86 | -------------------------------------------------------------------------------- /include/leveldb/table_builder.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2011 The LevelDB Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style license that can be 3 | // found in the LICENSE file. See the AUTHORS file for names of contributors. 4 | // 5 | // TableBuilder provides the interface used to build a Table 6 | // (an immutable and sorted map from keys to values). 7 | // 8 | // Multiple threads can invoke const methods on a TableBuilder without 9 | // external synchronization, but if any of the threads may call a 10 | // non-const method, all threads accessing the same TableBuilder must use 11 | // external synchronization. 12 | 13 | #ifndef STORAGE_LEVELDB_INCLUDE_TABLE_BUILDER_H_ 14 | #define STORAGE_LEVELDB_INCLUDE_TABLE_BUILDER_H_ 15 | 16 | #include 17 | #include "leveldb/options.h" 18 | #include "leveldb/status.h" 19 | 20 | namespace leveldb { 21 | 22 | class BlockBuilder; 23 | class BlockHandle; 24 | class WritableFile; 25 | 26 | class TableBuilder { 27 | public: 28 | // Create a builder that will store the contents of the table it is 29 | // building in *file. Does not close the file. It is up to the 30 | // caller to close the file after calling Finish(). 31 | TableBuilder(const Options& options, WritableFile* file); 32 | 33 | // REQUIRES: Either Finish() or Abandon() has been called. 34 | ~TableBuilder(); 35 | 36 | // Change the options used by this builder. Note: only some of the 37 | // option fields can be changed after construction. If a field is 38 | // not allowed to change dynamically and its value in the structure 39 | // passed to the constructor is different from its value in the 40 | // structure passed to this method, this method will return an error 41 | // without changing any fields. 42 | Status ChangeOptions(const Options& options); 43 | 44 | // Add key,value to the table being constructed. 45 | // REQUIRES: key is after any previously added key according to comparator. 46 | // REQUIRES: Finish(), Abandon() have not been called 47 | void Add(const Slice& key, const Slice& value); 48 | 49 | // Advanced operation: flush any buffered key/value pairs to file. 50 | // Can be used to ensure that two adjacent entries never live in 51 | // the same data block. Most clients should not need to use this method. 52 | // REQUIRES: Finish(), Abandon() have not been called 53 | void Flush(); 54 | 55 | // Return non-ok iff some error has been detected. 56 | Status status() const; 57 | 58 | // Finish building the table. Stops using the file passed to the 59 | // constructor after this function returns. 60 | // REQUIRES: Finish(), Abandon() have not been called 61 | Status Finish(); 62 | 63 | // Indicate that the contents of this builder should be abandoned. Stops 64 | // using the file passed to the constructor after this function returns. 65 | // If the caller is not going to call Finish(), it must call Abandon() 66 | // before destroying this builder. 67 | // REQUIRES: Finish(), Abandon() have not been called 68 | void Abandon(); 69 | 70 | // Number of calls to Add() so far. 71 | uint64_t NumEntries() const; 72 | 73 | // Size of the file generated so far. If invoked after a successful 74 | // Finish() call, returns the size of the final generated file. 75 | uint64_t FileSize() const; 76 | 77 | private: 78 | bool ok() const { return status().ok(); } 79 | void WriteBlock(BlockBuilder* block, BlockHandle* handle); 80 | void WriteRawBlock(const Slice& data, CompressionType, BlockHandle* handle); 81 | 82 | struct Rep; 83 | Rep* rep_; 84 | 85 | // No copying allowed 86 | TableBuilder(const TableBuilder&); 87 | void operator=(const TableBuilder&); 88 | }; 89 | 90 | } // namespace leveldb 91 | 92 | #endif // STORAGE_LEVELDB_INCLUDE_TABLE_BUILDER_H_ 93 | -------------------------------------------------------------------------------- /include/leveldb/write_batch.h: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2011 The LevelDB Authors. All rights reserved. 2 | // Use of this source code is governed by a BSD-style license that can be 3 | // found in the LICENSE file. See the AUTHORS file for names of contributors. 4 | // 5 | // WriteBatch holds a collection of updates to apply atomically to a DB. 6 | // 7 | // The updates are applied in the order in which they are added 8 | // to the WriteBatch. For example, the value of "key" will be "v3" 9 | // after the following batch is written: 10 | // 11 | // batch.Put("key", "v1"); 12 | // batch.Delete("key"); 13 | // batch.Put("key", "v2"); 14 | // batch.Put("key", "v3"); 15 | // 16 | // Multiple threads can invoke const methods on a WriteBatch without 17 | // external synchronization, but if any of the threads may call a 18 | // non-const method, all threads accessing the same WriteBatch must use 19 | // external synchronization. 20 | 21 | #ifndef STORAGE_LEVELDB_INCLUDE_WRITE_BATCH_H_ 22 | #define STORAGE_LEVELDB_INCLUDE_WRITE_BATCH_H_ 23 | 24 | #include 25 | #include "leveldb/status.h" 26 | 27 | namespace leveldb { 28 | 29 | class Slice; 30 | 31 | class WriteBatch { 32 | public: 33 | WriteBatch(); 34 | ~WriteBatch(); 35 | 36 | // Store the mapping "key->value" in the database. 37 | void Put(const Slice& key, const Slice& value); 38 | 39 | // If the database contains a mapping for "key", erase it. Else do nothing. 40 | void Delete(const Slice& key); 41 | 42 | // Clear all updates buffered in this batch. 43 | void Clear(); 44 | 45 | // Support for iterating over the contents of a batch. 46 | class Handler { 47 | public: 48 | virtual ~Handler(); 49 | virtual void Put(const Slice& key, const Slice& value) = 0; 50 | virtual void Delete(const Slice& key) = 0; 51 | }; 52 | Status Iterate(Handler* handler) const; 53 | 54 | private: 55 | friend class WriteBatchInternal; 56 | 57 | std::string rep_; // See comment in write_batch.cc for the format of rep_ 58 | 59 | // Intentionally copyable 60 | }; 61 | 62 | } // namespace leveldb 63 | 64 | #endif // STORAGE_LEVELDB_INCLUDE_WRITE_BATCH_H_ 65 | -------------------------------------------------------------------------------- /src/app/agent.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "encoding/binary" 6 | "encoding/json" 7 | "io" 8 | "net" 9 | "sync" 10 | ) 11 | 12 | type AgentHandler func(ud interface{}, params interface{}) (interface{}, error) 13 | 14 | type AgentSvr struct { 15 | ln net.Listener 16 | db *Leveldb 17 | handers map[string][]interface{} 18 | wg sync.WaitGroup 19 | } 20 | 21 | type Request struct { 22 | Id uint32 23 | Method string 24 | Params interface{} 25 | } 26 | 27 | type Response struct { 28 | Id uint32 `json:"id"` 29 | Result interface{} `json:"result"` 30 | Error interface{} `json:"error"` 31 | } 32 | 33 | func (self *AgentSvr) dispatchRequst(conn *net.TCPConn, req *Request) { 34 | defer func() { 35 | if err := recover(); err != nil { 36 | Error("handle agent connection:%v failed:%v", conn.RemoteAddr(), err) 37 | } 38 | }() 39 | cb, ok := self.handers[req.Method] 40 | if ok { 41 | ud := cb[0] 42 | handler := cb[1].(AgentHandler) 43 | var resp Response 44 | resp.Id = req.Id 45 | if result, err := handler(ud, req.Params); err != nil { 46 | resp.Error = err 47 | } else { 48 | resp.Result = result 49 | } 50 | body, err := json.Marshal(resp) 51 | if err != nil { 52 | Panic("marshal response conn:%v, failed:%v", conn.RemoteAddr(), err) 53 | } 54 | 55 | length := uint32(len(body)) 56 | buf := bytes.NewBuffer(nil) 57 | binary.Write(buf, binary.BigEndian, length) 58 | buf.Write(body) 59 | chunk := buf.Bytes() 60 | if _, err = conn.Write(chunk); err != nil { 61 | Panic("write response conn:%v, failed:%v", conn.RemoteAddr(), err) 62 | } 63 | } else { 64 | Error("unknown request:%v", req) 65 | } 66 | } 67 | 68 | func (self *AgentSvr) handleConnection(conn *net.TCPConn) { 69 | defer conn.Close() 70 | defer self.wg.Done() 71 | defer func() { 72 | if err := recover(); err != nil { 73 | Error("handle agent connection:%v failed:%v", conn.RemoteAddr(), err) 74 | } 75 | }() 76 | 77 | Info("new agent connection:%v", conn.RemoteAddr()) 78 | for { 79 | var sz uint32 80 | err := binary.Read(conn, binary.BigEndian, &sz) 81 | if err != nil { 82 | Error("read conn failed:%v, err:%v", conn.RemoteAddr(), err) 83 | break 84 | } 85 | buf := make([]byte, sz) 86 | _, err = io.ReadFull(conn, buf) 87 | if err != nil { 88 | Error("read conn failed:%v, err:%v", conn.RemoteAddr(), err) 89 | break 90 | } 91 | var req Request 92 | if err = json.Unmarshal(buf, &req); err != nil { 93 | Error("parse request failed:%v, err:%v", conn.RemoteAddr(), err) 94 | } 95 | 96 | go self.dispatchRequst(conn, &req) 97 | } 98 | } 99 | 100 | func (self *AgentSvr) Start() { 101 | self.wg.Add(1) 102 | defer self.wg.Done() 103 | 104 | ln, err := net.Listen("tcp", setting.Agent.Addr) 105 | if err != nil { 106 | Panic("resolve local addr failed:%s", err.Error()) 107 | } 108 | Info("start agent succeed:%s", setting.Agent.Addr) 109 | 110 | // register handler 111 | self.Register("Get", self, handlerGet) 112 | 113 | self.ln = ln 114 | for { 115 | conn, err := self.ln.Accept() 116 | if err != nil { 117 | Error("accept failed:%v", err) 118 | if opErr, ok := err.(*net.OpError); ok { 119 | if !opErr.Temporary() { 120 | break 121 | } 122 | } 123 | continue 124 | } 125 | self.wg.Add(1) 126 | go self.handleConnection(conn.(*net.TCPConn)) 127 | } 128 | } 129 | 130 | func (self *AgentSvr) Stop() { 131 | if self.ln != nil { 132 | self.ln.Close() 133 | } 134 | self.wg.Wait() 135 | } 136 | 137 | func (self *AgentSvr) Register(cmd string, ud interface{}, handler AgentHandler) { 138 | self.handers[cmd] = []interface{}{ud, handler} 139 | } 140 | 141 | func handlerGet(ud interface{}, params interface{}) (result interface{}, err error) { 142 | agent := ud.(*AgentSvr) 143 | key := params.(string) 144 | Info("agent get:%v", key) 145 | chunk, err := agent.db.Get([]byte(key)) 146 | if chunk == nil || err != nil { 147 | Error("query key:%s failed:%v", key, err) 148 | return 149 | } 150 | var data map[string]string 151 | if err = json.Unmarshal(chunk, &data); err != nil { 152 | Error("unmarshal key:%s failed:%v", key, err) 153 | return 154 | } 155 | 156 | result = data 157 | return 158 | } 159 | 160 | func NewAgent(db *Leveldb) *AgentSvr { 161 | agent := new(AgentSvr) 162 | agent.db = db 163 | agent.handers = make(map[string][]interface{}) 164 | return agent 165 | } 166 | -------------------------------------------------------------------------------- /src/app/command.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "fmt" 6 | "net" 7 | "strings" 8 | "sync" 9 | "time" 10 | ) 11 | 12 | type CmdHandler func(ud interface{}, args []string) (string, error) 13 | 14 | type CmdService struct { 15 | ln net.Listener 16 | addr string 17 | handlers map[string][]interface{} 18 | wg sync.WaitGroup 19 | } 20 | 21 | func (c *CmdService) handleConnection(conn net.Conn) { 22 | defer conn.Close() 23 | defer c.wg.Done() 24 | defer func() { 25 | if err := recover(); err != nil { 26 | Error("handle connection:%v failed:%v", conn.RemoteAddr(), err) 27 | } 28 | }() 29 | 30 | Info("handle conn:%v", conn) 31 | reader := bufio.NewReader(conn) 32 | for { 33 | s, err := reader.ReadString('\n') 34 | if err != nil { 35 | Error("read conn:%v failed, err:%v", conn, err) 36 | break 37 | } 38 | s = strings.Trim(s, "\r\n ") 39 | args := strings.Split(s, " ") 40 | if len(args) == 0 { 41 | continue 42 | } 43 | 44 | var response string 45 | 46 | from := time.Now() 47 | cmd := args[0] 48 | cb, ok := c.handlers[cmd] 49 | if ok { 50 | Info("recv command: %s", cmd) 51 | ud := cb[0] 52 | handle := cb[1].(CmdHandler) 53 | result, err := handle(ud, args[1:]) 54 | if err != nil { 55 | response = "- " + err.Error() 56 | } else { 57 | response = "+ " + result 58 | } 59 | } else { 60 | response = "- unknown command: " + cmd 61 | } 62 | duration := time.Now().Sub(from) 63 | response = fmt.Sprintf("%s\nelapsed %f sec\n", response, duration.Seconds()) 64 | conn.Write([]byte(response)) 65 | } 66 | Info("end handle conn:%v", conn) 67 | } 68 | 69 | func (c *CmdService) Register(cmd string, ud interface{}, handler CmdHandler) { 70 | _, ok := c.handlers[cmd] 71 | if handler == nil && ok { 72 | delete(c.handlers, cmd) 73 | } else { 74 | Info("register cmd:%s", cmd) 75 | c.handlers[cmd] = []interface{}{ud, handler} 76 | } 77 | } 78 | 79 | func (c *CmdService) Start() { 80 | // no need add one, save one for the last connection 81 | c.wg.Add(1) 82 | defer c.wg.Done() 83 | 84 | ln, err := net.Listen("tcp", c.addr) 85 | if err != nil { 86 | Panic("start manager failed:%v", err) 87 | } 88 | 89 | Info("start manager succeed:%s", c.addr) 90 | 91 | c.ln = ln 92 | for { 93 | conn, err := c.ln.Accept() 94 | if err != nil { 95 | Error("accept failed:%v", err) 96 | if opErr, ok := err.(*net.OpError); ok { 97 | if !opErr.Temporary() { 98 | break 99 | } 100 | } 101 | continue 102 | } 103 | c.wg.Add(1) 104 | go c.handleConnection(conn) 105 | } 106 | } 107 | 108 | func (c *CmdService) Stop() { 109 | if c.ln != nil { 110 | c.ln.Close() 111 | } 112 | c.wg.Wait() 113 | } 114 | 115 | func NewCmdService() *CmdService { 116 | cmdService := new(CmdService) 117 | cmdService.addr = setting.Manager.Addr 118 | cmdService.handlers = make(map[string][]interface{}) 119 | return cmdService 120 | } 121 | -------------------------------------------------------------------------------- /src/app/context.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "errors" 7 | "fmt" 8 | "redis" 9 | "reflect" 10 | "runtime" 11 | "sort" 12 | "strconv" 13 | "strings" 14 | ) 15 | 16 | func help(ud interface{}, args []string) (result string, err error) { 17 | context := ud.(*Context) 18 | c := context.c 19 | 20 | for cmd := range c.handlers { 21 | result = result + cmd + "\n" 22 | } 23 | return 24 | } 25 | 26 | func procs(ud interface{}, args []string) (result string, err error) { 27 | count := 0 28 | if len(args) > 0 { 29 | if count, err = strconv.Atoi(args[0]); err != nil { 30 | Error("illegal parameter: %v", err) 31 | return 32 | } 33 | } 34 | old := runtime.GOMAXPROCS(count) 35 | if count < 1 { 36 | result = fmt.Sprintf("max procs:%d", old) 37 | } else { 38 | result = fmt.Sprintf("modify max procs:%d -> %d", old, count) 39 | } 40 | return 41 | } 42 | 43 | func shutdown(ud interface{}, args []string) (result string, err error) { 44 | passwd := "" 45 | if len(args) > 0 { 46 | passwd = args[0] 47 | } 48 | 49 | if passwd != "confirm" { 50 | err = errors.New("wrong password") 51 | return 52 | } 53 | 54 | context := ud.(*Context) 55 | go safeQuit(context) 56 | result = "please close the connection to quit the process" 57 | return 58 | } 59 | 60 | func info(ud interface{}, args []string) (result string, err error) { 61 | context := ud.(*Context) 62 | db := context.db 63 | 64 | key := "" 65 | if len(args) > 0 { 66 | key = args[0] 67 | } 68 | result = db.Info(key) 69 | return 70 | } 71 | 72 | func sync_one(ud interface{}, args []string) (result string, err error) { 73 | context := ud.(*Context) 74 | sync_queue := context.sync_queue 75 | 76 | key := "" 77 | if len(args) > 0 { 78 | key = args[0] 79 | } 80 | sync_queue <- key 81 | return 82 | } 83 | 84 | func sync_all(ud interface{}, args []string) (result string, err error) { 85 | context := ud.(*Context) 86 | sync_queue := context.sync_queue 87 | cli, err := GetRedisConnection() 88 | if err != nil { 89 | return 90 | } 91 | defer cli.Close() 92 | 93 | all_key_strings, err := cli.Exec("keys", "*") 94 | if err != nil { 95 | Error("sync_all cmd service failed:%v", err) 96 | return 97 | } 98 | keys := all_key_strings.([]string) 99 | sort.Strings(keys) 100 | sz := len(keys) 101 | cur := 0 102 | for _, key := range keys { 103 | sync_queue <- key 104 | cur += 1 105 | if cur%100 == 0 { 106 | Info("sync progress: %d/%d, queue:%d", cur, sz, len(sync_queue)) 107 | } 108 | } 109 | Info("sync finish: %d/%d", cur, sz) 110 | result = strconv.Itoa(sz) 111 | return 112 | } 113 | 114 | func count(ud interface{}, args []string) (result string, err error) { 115 | context := ud.(*Context) 116 | db := context.db 117 | it := db.NewIterator() 118 | defer it.Close() 119 | 120 | i := 0 121 | for it.Seek(INDEX_KEY_START); it.Valid() && bytes.Compare(it.Key(), INDEX_KEY_END) <= 0; it.Next() { 122 | i++ 123 | } 124 | result = strconv.Itoa(i) 125 | return 126 | } 127 | 128 | func check(ud interface{}, args []string) (result string, err error) { 129 | context := ud.(*Context) 130 | cli, err := GetRedisConnection() 131 | if err != nil { 132 | return 133 | } 134 | defer cli.Close() 135 | db := context.db 136 | 137 | detail := false 138 | if len(args) > 0 && args[0] == "detail" { 139 | detail = true 140 | } 141 | 142 | var miss []string 143 | var mismatch []string 144 | if detail { 145 | miss = make([]string, 0) 146 | mismatch = make([]string, 0) 147 | } 148 | miss_count := 0 149 | match_count := 0 150 | mismatch_count := 0 151 | ret, err := cli.Exec("keys", "*") 152 | if err != nil { 153 | return 154 | } 155 | keys := ret.([]string) 156 | sort.Strings(keys) 157 | total := len(keys) 158 | 159 | for i, key := range keys { 160 | redis_data := make(map[string]string) 161 | err = cli.Hgetall(key, redis_data) 162 | if err != nil { 163 | Error("hgetall key %s failed:%v", key, err) 164 | return 165 | } 166 | var chunk []byte 167 | var leveldb_data map[string]string 168 | if chunk, err = db.Get([]byte(key)); err != nil { 169 | Error("leveldb.Get failed on key %s failed:%v", key, err) 170 | return 171 | } 172 | if chunk == nil { 173 | if miss != nil { 174 | miss = append(miss, key) 175 | } 176 | miss_count++ 177 | continue 178 | } 179 | err = json.Unmarshal(chunk, &leveldb_data) 180 | if err != nil { 181 | Error("json.Unmarshal failed on key %s failed:%v", key, err) 182 | continue 183 | } 184 | if !reflect.DeepEqual(redis_data, leveldb_data) { 185 | if mismatch != nil { 186 | mismatch = append(mismatch, key) 187 | } 188 | mismatch_count++ 189 | } else { 190 | match_count++ 191 | } 192 | 193 | if i%1000 == 0 { 194 | Info("check progress:%d/%d\n", i, total) 195 | } 196 | } 197 | 198 | buf := bytes.NewBufferString("check results:\n") 199 | fmt.Fprintf(buf, "total: %d\n", total) 200 | fmt.Fprintf(buf, "miss: %d\n", miss_count) 201 | fmt.Fprintf(buf, "mismatch: %d\n", mismatch_count) 202 | fmt.Fprintf(buf, "match: %d\n", match_count) 203 | if detail { 204 | if mismatch_count > 0 { 205 | fmt.Fprintf(buf, "mismatch keys: %s\n", strings.Join(mismatch, ", ")) 206 | } 207 | if miss_count > 0 { 208 | fmt.Fprintf(buf, "miss keys: %s\n", strings.Join(miss, ", ")) 209 | } 210 | } 211 | result = buf.String() 212 | return 213 | } 214 | 215 | func fast_check(ud interface{}, args []string) (result string, err error) { 216 | context := ud.(*Context) 217 | cli, err := GetRedisConnection() 218 | if err != nil { 219 | return 220 | } 221 | defer cli.Close() 222 | db := context.db 223 | 224 | detail := false 225 | if len(args) > 0 && args[0] == "detail" { 226 | detail = true 227 | } 228 | 229 | var miss []string 230 | var mismatch []string 231 | if detail { 232 | miss = make([]string, 0) 233 | mismatch = make([]string, 0) 234 | } 235 | miss_count := 0 236 | match_count := 0 237 | mismatch_count := 0 238 | ret, err := cli.Exec("keys", "*") 239 | if err != nil { 240 | return 241 | } 242 | keys := ret.([]string) 243 | total := len(keys) 244 | 245 | var cur_version string // redis 246 | var bak_version []byte // leveldb 247 | for i, key := range keys { 248 | if ret, err = cli.Hget(key, "version"); err != nil { 249 | return 250 | } 251 | cur_version = ret.(string) 252 | index_key := indexKey(key) 253 | if bak_version, err = db.Get([]byte(index_key)); err != nil { 254 | return 255 | } 256 | if bak_version == nil { 257 | if miss != nil { 258 | miss = append(miss, key) 259 | } 260 | miss_count++ 261 | } else if cur_version != string(bak_version) { 262 | if mismatch != nil { 263 | mismatch = append(mismatch, key) 264 | } 265 | mismatch_count++ 266 | } else { 267 | match_count++ 268 | } 269 | 270 | if i%1000 == 0 { 271 | Info("fast check progress:%d/%d\n", i, total) 272 | } 273 | } 274 | 275 | buf := bytes.NewBufferString("fast check results:\n") 276 | fmt.Fprintf(buf, "total: %d\n", total) 277 | fmt.Fprintf(buf, "miss: %d\n", miss_count) 278 | fmt.Fprintf(buf, "mismatch: %d\n", mismatch_count) 279 | fmt.Fprintf(buf, "match: %d\n", match_count) 280 | if detail { 281 | if mismatch_count > 0 { 282 | fmt.Fprintf(buf, "mismatch keys: %s\n", strings.Join(mismatch, ", ")) 283 | } 284 | if miss_count > 0 { 285 | fmt.Fprintf(buf, "miss keys: %s\n", strings.Join(miss, ", ")) 286 | } 287 | } 288 | result = buf.String() 289 | return 290 | } 291 | 292 | func dump(ud interface{}, args []string) (result string, err error) { 293 | if len(args) == 0 { 294 | err = errors.New("no key") 295 | return 296 | } 297 | 298 | key := args[0] 299 | context := ud.(*Context) 300 | db := context.db 301 | 302 | chunk, err := db.Get([]byte(key)) 303 | if chunk == nil || err != nil { 304 | Error("fetch data failed:%v", err) 305 | return 306 | } 307 | 308 | Info("dump key:%s(%d)", key, len(chunk)) 309 | var data map[string]string 310 | err = json.Unmarshal(chunk, &data) 311 | if err != nil { 312 | Error("unmarshal chunk failed:%v", err) 313 | return 314 | } 315 | 316 | buf := bytes.NewBufferString("content:\n") 317 | for key, val := range data { 318 | fmt.Fprintf(buf, "%v:\t%v\n", key, val) 319 | } 320 | result = buf.String() 321 | return 322 | } 323 | 324 | func restore(ud interface{}, key string, cli *redis.Redis) (err error) { 325 | context := ud.(*Context) 326 | db := context.db 327 | var chunk []byte 328 | if chunk, err = db.Get([]byte(key)); err != nil { 329 | Error("query key %s failed:%v", key, err) 330 | return 331 | } 332 | 333 | if chunk == nil { 334 | err = errors.New("key doesn't exist on leveldb") 335 | return 336 | } 337 | 338 | var leveldb_data map[string]string 339 | err = json.Unmarshal(chunk, &leveldb_data) 340 | if err != nil { 341 | return 342 | } 343 | redis_data := make(map[string]string) 344 | err = cli.Hgetall(key, redis_data) 345 | if err != nil { 346 | Error("hgetall key %s failed:%v", key, err) 347 | return 348 | } 349 | 350 | if redis_data["version"] >= leveldb_data["version"] && len(redis_data) > 0 { 351 | Info("redis_data[version]:%s >= leveldb_data[version]:%s", redis_data["version"], leveldb_data["version"]) 352 | return 353 | } 354 | leveldb_array := make([]interface{}, len(leveldb_data)*2+1) 355 | leveldb_array[0] = key 356 | i := 1 357 | for k, v := range leveldb_data { 358 | leveldb_array[i] = k 359 | leveldb_array[i+1] = v 360 | i = i + 2 361 | } 362 | _, err = cli.Exec("hmset", leveldb_array...) 363 | if err != nil { 364 | Error("hmset key %s failed:%v", key, err) 365 | return 366 | } 367 | return 368 | } 369 | 370 | func restore_one(ud interface{}, args []string) (result string, err error) { 371 | if len(args) < 1 { 372 | err = errors.New("restore need one argument") 373 | return 374 | } 375 | key := args[0] 376 | cli, err := GetRedisConnection() 377 | if err != nil { 378 | return 379 | } 380 | defer cli.Close() 381 | 382 | err = restore(ud, key, cli) 383 | if err != nil { 384 | return 385 | } 386 | result = fmt.Sprintf("set key:%s", key) 387 | return 388 | } 389 | 390 | func restore_all(ud interface{}, args []string) (result string, err error) { 391 | context := ud.(*Context) 392 | db := context.db 393 | it := db.NewIterator() 394 | count := 0 395 | restore_count := 0 396 | cli, err := GetRedisConnection() 397 | if err != nil { 398 | return 399 | } 400 | defer cli.Close() 401 | 402 | for it.Seek(KEY_START); it.Valid() && bytes.Compare(it.Key(), KEY_END) <= 0; it.Next() { 403 | err = restore(ud, string(it.Key()), cli) 404 | if err != nil { 405 | return 406 | } else { 407 | restore_count++ 408 | } 409 | count++ 410 | if count%100 == 0 { 411 | Info("progress:%d, restore:%d", count, restore_count) 412 | } 413 | } 414 | result = fmt.Sprintf("restore key %d, total %d\n", restore_count, count) 415 | return 416 | } 417 | 418 | func keys(ud interface{}, args []string) (result string, err error) { 419 | start := 0 420 | count := 10 421 | if len(args) > 0 { 422 | if start, err = strconv.Atoi(args[0]); err != nil { 423 | Error("iter start error: %v", err) 424 | return 425 | } 426 | } 427 | 428 | if len(args) > 1 { 429 | if count, err = strconv.Atoi(args[1]); err != nil { 430 | Error("iter start error: %v", err) 431 | return 432 | } 433 | } 434 | 435 | context := ud.(*Context) 436 | db := context.db 437 | it := db.NewIterator() 438 | defer it.Close() 439 | 440 | buf := bytes.NewBufferString("keys:\n") 441 | i := 0 442 | for it.Seek(INDEX_KEY_START); it.Valid() && bytes.Compare(it.Key(), INDEX_KEY_END) <= 0; it.Next() { 443 | if start <= i && i <= start+count { 444 | fmt.Fprintf(buf, "%s\n", string(it.Key()[INDEX_KEY_LEN:])) 445 | } 446 | i++ 447 | } 448 | result = buf.String() 449 | return 450 | } 451 | 452 | func diff(ud interface{}, args []string) (result string, err error) { 453 | if len(args) == 0 { 454 | err = errors.New("no key") 455 | return 456 | } 457 | 458 | key := args[0] 459 | context := ud.(*Context) 460 | cli, err := GetRedisConnection() 461 | if err != nil { 462 | return 463 | } 464 | defer cli.Close() 465 | db := context.db 466 | // query redis 467 | left := make(map[string]string) 468 | err = cli.Hgetall(key, left) 469 | if err != nil { 470 | return 471 | } 472 | 473 | chunk, err := db.Get([]byte(key)) 474 | if chunk == nil || err != nil { 475 | Error("fetch data from leveldb failed:%v", err) 476 | return 477 | } 478 | 479 | var right map[string]string 480 | err = json.Unmarshal(chunk, &right) 481 | if err != nil { 482 | Error("unmarshal chunk failed:%v", err) 483 | return 484 | } 485 | 486 | buf := bytes.NewBufferString("left:redis, right:leveldb\n") 487 | buf_len := buf.Len() 488 | for k, v1 := range left { 489 | if v2, ok := right[k]; ok { 490 | if v1 != v2 { 491 | fmt.Fprintf(buf, "%s < %s, %s\n", k, v1, v2) 492 | } 493 | } else { 494 | fmt.Fprintf(buf, "%s, only in left\n", k) 495 | } 496 | } 497 | 498 | for k, _ := range right { 499 | if _, ok := left[k]; !ok { 500 | fmt.Fprintf(buf, "%s, only in right\n", k) 501 | } 502 | } 503 | 504 | if buf_len == buf.Len() { 505 | fmt.Fprintf(buf, "perfect match\n") 506 | } 507 | 508 | result = buf.String() 509 | return 510 | } 511 | 512 | func (context *Context) Register(c *CmdService) { 513 | Info("register command service") 514 | c.Register("help", context, help) 515 | c.Register("procs", context, procs) 516 | c.Register("info", context, info) 517 | c.Register("sync", context, sync_one) 518 | c.Register("sync_all", context, sync_all) 519 | c.Register("dump", context, dump) 520 | c.Register("count", context, count) 521 | c.Register("diff", context, diff) 522 | c.Register("shutdown", context, shutdown) 523 | c.Register("keys", context, keys) 524 | c.Register("check_all", context, check) 525 | c.Register("fast_check", context, fast_check) 526 | c.Register("restore_one", context, restore_one) 527 | c.Register("restore_all", context, restore_all) 528 | } 529 | 530 | func GetRedisConnection() (cli *redis.Redis, err error) { 531 | cli = redis.NewRedis(setting.Redis.Host, setting.Redis.Password, setting.Redis.Db) 532 | err = cli.Connect() 533 | return 534 | } 535 | 536 | func NewContext() *Context { 537 | context := new(Context) 538 | context.quit_chan = make(chan bool) 539 | return context 540 | } 541 | -------------------------------------------------------------------------------- /src/app/leveldb.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "errors" 5 | "levigo" 6 | ) 7 | 8 | type Leveldb struct { 9 | env *levigo.Env 10 | options *levigo.Options 11 | roptions *levigo.ReadOptions 12 | woptions *levigo.WriteOptions 13 | db *levigo.DB 14 | } 15 | 16 | func (self *Leveldb) Open(dbname string) (err error) { 17 | if self.db != nil { 18 | return 19 | } 20 | 21 | self.db, err = levigo.Open(dbname, self.options) 22 | return 23 | } 24 | 25 | func (self *Leveldb) BatchPut(args ...[]byte) error { 26 | sz := len(args) 27 | if sz == 0 || sz%2 != 0 { 28 | return errors.New("illegal parameters") 29 | } 30 | 31 | batch := levigo.NewWriteBatch() 32 | defer batch.Close() 33 | 34 | for i := 0; i < sz-1; i = i + 2 { 35 | batch.Put(args[i], args[i+1]) 36 | } 37 | return self.db.Write(self.woptions, batch) 38 | } 39 | 40 | func (self *Leveldb) Put(key, value []byte) error { 41 | return self.db.Put(self.woptions, key, value) 42 | } 43 | 44 | func (self *Leveldb) Get(key []byte) ([]byte, error) { 45 | return self.db.Get(self.roptions, key) 46 | } 47 | 48 | func (self *Leveldb) Info(key string) string { 49 | property := "leveldb." + key 50 | prop := self.db.PropertyValue(property) 51 | if prop == "" { 52 | return "valid key:\n\tnum-files-at-level\n\tstats\n\tsstables\n" 53 | } 54 | return prop 55 | } 56 | 57 | func (self *Leveldb) Close() { 58 | if self.db != nil { 59 | self.db.Close() 60 | } 61 | 62 | if self.options != nil { 63 | self.options.Close() 64 | } 65 | 66 | if self.env != nil { 67 | self.env.Close() 68 | } 69 | } 70 | 71 | func (self *Leveldb) NewIterator() *levigo.Iterator { 72 | return self.db.NewIterator(self.roptions) 73 | } 74 | 75 | func NewLeveldb(name string) *Leveldb { 76 | options := levigo.NewOptions() 77 | 78 | // options.SetComparator(cmp) 79 | options.SetCreateIfMissing(true) 80 | options.SetErrorIfExists(false) 81 | 82 | // set env 83 | env := levigo.NewDefaultEnv() 84 | options.SetEnv(env) 85 | 86 | // set cache 87 | cache := levigo.NewLRUCache(16 << 20) 88 | options.SetCache(cache) 89 | 90 | options.SetInfoLog(nil) 91 | options.SetParanoidChecks(false) 92 | options.SetWriteBufferSize(128 << 20) 93 | options.SetMaxOpenFiles(2000) 94 | options.SetBlockSize(4 * 1024) 95 | options.SetBlockRestartInterval(16) 96 | options.SetCompression(levigo.SnappyCompression) 97 | 98 | // set filter 99 | filter := levigo.NewBloomFilter(10) 100 | options.SetFilterPolicy(filter) 101 | 102 | roptions := levigo.NewReadOptions() 103 | roptions.SetVerifyChecksums(false) 104 | roptions.SetFillCache(true) 105 | 106 | woptions := levigo.NewWriteOptions() 107 | // set sync false 108 | woptions.SetSync(false) 109 | 110 | db := &Leveldb{env, 111 | options, 112 | roptions, 113 | woptions, 114 | nil} 115 | if err := db.Open(name); err != nil { 116 | Panic("open db failed, err:%v", err) 117 | } else { 118 | Info("open db succeed, dbname:%v", name) 119 | } 120 | return db 121 | } 122 | -------------------------------------------------------------------------------- /src/app/log.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "log" 7 | "os" 8 | "runtime" 9 | ) 10 | 11 | func finiLog(fp *os.File) { 12 | fmt.Print("finilog\n") 13 | fp.Close() 14 | } 15 | 16 | func initLog() { 17 | fp, err := os.OpenFile(setting.Log.File, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0666) 18 | if err != nil { 19 | fmt.Fprintf(os.Stderr, "open log file failed:%s", err) 20 | os.Exit(1) 21 | } 22 | log.SetOutput(io.MultiWriter(fp, os.Stderr)) 23 | runtime.SetFinalizer(fp, finiLog) 24 | } 25 | 26 | func _print(format string, a ...interface{}) { 27 | log.Printf(format, a...) 28 | } 29 | 30 | func Debug(format string, a ...interface{}) { 31 | if setting.Log.Level > 2 { 32 | _print(format, a...) 33 | } 34 | } 35 | 36 | func Info(format string, a ...interface{}) { 37 | if setting.Log.Level > 1 { 38 | _print(format, a...) 39 | } 40 | } 41 | 42 | func Error(format string, a ...interface{}) { 43 | if setting.Log.Level > 0 { 44 | _print(format, a...) 45 | } 46 | } 47 | 48 | func Panic(format string, a ...interface{}) { 49 | _print(format, a...) 50 | panic("!!") 51 | } 52 | -------------------------------------------------------------------------------- /src/app/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/json" 5 | "flag" 6 | "fmt" 7 | "io/ioutil" 8 | "os" 9 | "os/signal" 10 | "syscall" 11 | ) 12 | 13 | type Context struct { 14 | db *Leveldb 15 | m *Monitor 16 | s *StorerMgr 17 | c *CmdService 18 | agent *AgentSvr 19 | quit_chan chan bool 20 | sync_queue chan string 21 | } 22 | 23 | type Redis struct { 24 | Host string 25 | Password string 26 | Db int 27 | NotificationConfig string 28 | Event string 29 | Expire bool 30 | } 31 | 32 | type LeveldbConfig struct { 33 | Dbname string 34 | } 35 | 36 | type Manager struct { 37 | Addr string 38 | } 39 | 40 | type Log struct { 41 | File string 42 | Level int 43 | } 44 | 45 | type Agent struct { 46 | Addr string 47 | } 48 | 49 | type Setting struct { 50 | Redis Redis 51 | Leveldb LeveldbConfig 52 | Manager Manager 53 | Log Log 54 | Agent Agent 55 | } 56 | 57 | func usage() { 58 | fmt.Fprintf(os.Stderr, "usage: %s [config]\n", os.Args[0]) 59 | flag.PrintDefaults() 60 | os.Exit(2) 61 | } 62 | 63 | func safeQuit(context *Context) { 64 | context.c.Stop() 65 | Error("wait context") 66 | context.agent.Stop() 67 | Error("wait agent") 68 | context.m.Stop() 69 | Error("wait monitor") 70 | context.s.Stop() 71 | Error("wait storer") 72 | context.quit_chan <- true 73 | } 74 | 75 | func handleSignal(context *Context) { 76 | c := make(chan os.Signal, 1) 77 | signal.Notify(c, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT, syscall.SIGHUP) 78 | for sig := range c { 79 | switch sig { 80 | case syscall.SIGHUP: 81 | Error("catch sighup, ignore") 82 | default: 83 | safeQuit(context) 84 | } 85 | } 86 | } 87 | 88 | var setting Setting 89 | 90 | func main() { 91 | flag.Usage = usage 92 | flag.Parse() 93 | 94 | args := flag.Args() 95 | if len(args) < 1 { 96 | fmt.Println("config file is missing.") 97 | os.Exit(1) 98 | } 99 | 100 | content, err := ioutil.ReadFile(args[0]) 101 | if err != nil { 102 | panic(err) 103 | } 104 | 105 | if err = json.Unmarshal([]byte(content), &setting); err != nil { 106 | panic(err) 107 | } 108 | 109 | // init log 110 | initLog() 111 | 112 | database := NewLeveldb(setting.Leveldb.Dbname) 113 | defer database.Close() 114 | 115 | m := NewMonitor() 116 | s := NewStorerMgr(database, 5) 117 | c := NewCmdService() 118 | agent := NewAgent(database) 119 | 120 | context := NewContext() 121 | context.db = database 122 | context.m = m 123 | context.s = s 124 | context.c = c 125 | context.agent = agent 126 | context.Register(c) 127 | context.sync_queue = make(chan string, 1) 128 | 129 | go handleSignal(context) 130 | go m.Start(context.sync_queue) 131 | go s.Start(context.sync_queue) 132 | go c.Start() 133 | go agent.Start() 134 | 135 | Info("start succeed") 136 | Error("catch signal %v, program will exit", <-context.quit_chan) 137 | } 138 | -------------------------------------------------------------------------------- /src/app/monitor.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | "redis" 8 | ) 9 | 10 | type Monitor struct { 11 | cli *redis.Redis 12 | notification_config string 13 | event string 14 | qlen int 15 | quit_flag bool 16 | quit_chan chan int 17 | } 18 | 19 | func (m *Monitor) subscribe() error { 20 | config_key := "notify-keyspace-events" 21 | _, err := m.cli.Exec("config", "set", config_key, m.notification_config) 22 | if err != nil { 23 | return err 24 | } 25 | Info("config set %s = %s", config_key, m.notification_config) 26 | 27 | _, err = m.cli.Exec("subscribe", m.event) 28 | if err != nil { 29 | return err 30 | } 31 | 32 | Info("subscribe: %s", m.event) 33 | return nil 34 | } 35 | 36 | func (m *Monitor) reconnect() bool { 37 | times := 0 38 | for { 39 | if m.quit_flag { 40 | Error("close redis connection, monitor will exit") 41 | return false 42 | } 43 | 44 | wait := times 45 | times = times + 1 46 | 47 | if wait > 30 { 48 | wait = 30 49 | } 50 | Info("try to reconnect monitor, times:%d, wait:%d", times, wait) 51 | time.Sleep(time.Duration(wait) * time.Second) 52 | 53 | err := m.cli.ReConnect() 54 | if err != nil { 55 | Error("reconnect monitor failed:%v", err) 56 | continue 57 | } 58 | err = m.subscribe() 59 | if err != nil { 60 | Error("subscribe monitor failed:%v", err) 61 | continue 62 | } else { 63 | break 64 | } 65 | } 66 | return true 67 | } 68 | 69 | func (m *Monitor) Start(queue chan string) { 70 | err := m.cli.Connect() 71 | if err != nil { 72 | Panic("start monitor failed:%v", err) 73 | } 74 | err = m.subscribe() 75 | if err != nil { 76 | Panic("start monitor failed:%v", err) 77 | } 78 | Info("start monitor succeed") 79 | 80 | for { 81 | resp, err := m.cli.ReadResponse() 82 | if err != nil { 83 | Error("recv message failed, try to reconnect to redis:%v", err) 84 | if m.reconnect() { 85 | continue 86 | } else { 87 | close(queue) 88 | break 89 | } 90 | } 91 | if data, ok := resp.([]string); ok { 92 | if len(data) != 3 || data[0] != "message" { 93 | Error("receive unexpected message, %v", data) 94 | } else { 95 | event := data[1] 96 | key := data[2] 97 | Info("receive [%s], value[%s]", event, key) 98 | queue <- key 99 | 100 | qlen := len(queue) 101 | if qlen > m.qlen { 102 | Error("queue grow, current length:%d", qlen) 103 | } 104 | m.qlen = qlen 105 | } 106 | } else { 107 | Error("receive unexpected message, %v", resp) 108 | } 109 | } 110 | m.quit_chan <- 1 111 | } 112 | 113 | func (m *Monitor) Stop() { 114 | m.quit_flag = true 115 | if m.cli != nil { 116 | m.cli.Close() 117 | } 118 | <-m.quit_chan 119 | } 120 | 121 | func NewMonitor() *Monitor { 122 | cli := redis.NewRedis(setting.Redis.Host, setting.Redis.Password, setting.Redis.Db) 123 | notification_config := "gE" 124 | event := fmt.Sprintf("__keyevent@%d__:%s", setting.Redis.Db, setting.Redis.Event) 125 | return &Monitor{cli, notification_config, event, 0, false, make(chan int)} 126 | } 127 | -------------------------------------------------------------------------------- /src/app/storer.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/json" 5 | "strconv" 6 | "sync" 7 | "time" 8 | 9 | "redis" 10 | ) 11 | 12 | type Storer struct { 13 | cli *redis.Redis 14 | db *Leveldb 15 | } 16 | 17 | func (s *Storer) reconnect() { 18 | times := 0 19 | for { 20 | wait := times 21 | times = times + 1 22 | 23 | if wait > 30 { 24 | wait = 30 25 | } 26 | Info("try to reconnect storer, times:%d, wait:%d", times, wait) 27 | time.Sleep(time.Duration(wait) * time.Second) 28 | 29 | err := s.cli.ReConnect() 30 | if err != nil { 31 | Error("reconnect storer failed:%v", err) 32 | continue 33 | } else { 34 | break 35 | } 36 | } 37 | } 38 | 39 | func (s *Storer) retry(key string, err error) { 40 | Error("recv message failed, try to reconnect to redis:%v", err) 41 | s.reconnect() 42 | s.save(key) 43 | } 44 | 45 | func (s *Storer) expire(key string, resp map[string]string) { 46 | value, ok := resp["expire"] 47 | if !ok { 48 | return 49 | } 50 | seconds, err := strconv.Atoi(value) 51 | if err != nil { 52 | return 53 | } 54 | if seconds > 0 { 55 | Info("expire key:%s, seconds:%d", key, seconds) 56 | s.cli.Exec("expire", key, seconds) 57 | } 58 | } 59 | 60 | func (s *Storer) save(key string) { 61 | name, err := s.cli.Type(key) 62 | if err != nil { 63 | s.retry(key, err) 64 | return 65 | } 66 | 67 | if name != "hash" { 68 | Error("unexpected key type, key:%s, type:%s", key, name) 69 | return 70 | } 71 | 72 | resp := make(map[string]string) 73 | err = s.cli.Hgetall(key, resp) 74 | if err != nil { 75 | s.retry(key, err) 76 | return 77 | } 78 | 79 | chunk, err := json.Marshal(resp) 80 | if err != nil { 81 | Error("marshal obj failed, key:%s, obj:%v, err:%v", key, resp, err) 82 | return 83 | } 84 | 85 | index_key := indexKey(key) 86 | version := []byte(resp["version"]) 87 | err = s.db.BatchPut([]byte(index_key), version, []byte(key), chunk) 88 | if err != nil { 89 | Error("save key:%s failed, err:%v", key, err) 90 | return 91 | } 92 | 93 | // expire key 94 | if setting.Redis.Expire { 95 | s.expire(key, resp) 96 | } 97 | 98 | Info("save key:%s, data len:%d", key, len(chunk)) 99 | return 100 | } 101 | 102 | func (s *Storer) Start(queue chan string, wg *sync.WaitGroup) { 103 | defer wg.Done() 104 | 105 | err := s.cli.Connect() 106 | if err != nil { 107 | Panic("start Storer failed:%v", err) 108 | } 109 | 110 | Info("start storer succeed") 111 | 112 | for key := range queue { 113 | s.save(key) 114 | } 115 | Info("queue is closed, storer will exit") 116 | } 117 | 118 | func NewStorer(db *Leveldb) *Storer { 119 | cli := redis.NewRedis(setting.Redis.Host, setting.Redis.Password, setting.Redis.Db) 120 | return &Storer{cli, db} 121 | } 122 | 123 | type StorerMgr struct { 124 | instances []*Storer 125 | queues []chan string 126 | wg sync.WaitGroup 127 | } 128 | 129 | func _hash(str string) int { 130 | h := 0 131 | for _, c := range str { 132 | h += int(c) 133 | } 134 | return h 135 | } 136 | 137 | func (m *StorerMgr) Start(queue chan string) { 138 | m.wg.Add(1) 139 | defer m.wg.Done() 140 | 141 | for i, instance := range m.instances { 142 | m.wg.Add(1) 143 | go instance.Start(m.queues[i], &m.wg) 144 | } 145 | 146 | // dispatch msg 147 | max := len(m.queues) 148 | for key := range queue { 149 | i := _hash(key) % max 150 | m.queues[i] <- key 151 | } 152 | 153 | Info("queue is closed, all storer will exit") 154 | for _, queue := range m.queues { 155 | close(queue) 156 | } 157 | } 158 | 159 | func (m *StorerMgr) Stop() { 160 | m.wg.Wait() 161 | } 162 | 163 | func NewStorerMgr(db *Leveldb, numInstances int) *StorerMgr { 164 | m := new(StorerMgr) 165 | m.instances = make([]*Storer, numInstances) 166 | m.queues = make([]chan string, numInstances) 167 | for i := 0; i < numInstances; i++ { 168 | m.instances[i] = NewStorer(db) 169 | m.queues[i] = make(chan string, 256) 170 | } 171 | return m 172 | } 173 | -------------------------------------------------------------------------------- /src/app/util.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | const INDEX_KEY_PREFIX string = "|" 4 | const INDEX_KEY_LEN int = len("|") 5 | 6 | var INDEX_KEY_START = []byte("|") 7 | var INDEX_KEY_END = []byte{'|', 0xff} 8 | 9 | const KEY_PREFIX string = "uid:" 10 | 11 | var KEY_START = []byte("uid:") 12 | var KEY_END = []byte{'u', 'i', 'd', ':', 0xff} 13 | 14 | func indexKey(key string) string { 15 | return INDEX_KEY_PREFIX + key 16 | } 17 | -------------------------------------------------------------------------------- /src/conf/AUTHORS: -------------------------------------------------------------------------------- 1 | Stephen Weinberg 2 | -------------------------------------------------------------------------------- /src/conf/COPYRIGHT: -------------------------------------------------------------------------------- 1 | Copyright (c) 2010-2012, Stephen Weinberg 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 5 | 6 | * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 7 | * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 8 | * Neither the name of goconf nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. 9 | 10 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 11 | -------------------------------------------------------------------------------- /src/conf/conf.go: -------------------------------------------------------------------------------- 1 | // This package implements a parser for configuration files. 2 | // This allows easy reading and writing of structured configuration files. 3 | // 4 | // Given the configuration file: 5 | // 6 | // [default] 7 | // host = example.com 8 | // port = 443 9 | // php = on 10 | // 11 | // [service-1] 12 | // host = s1.example.com 13 | // allow-writing = false 14 | // 15 | // To read this configuration file, do: 16 | // 17 | // c, err := conf.ReadConfigFile("server.conf") 18 | // c.GetString("default", "host") // returns example.com 19 | // c.GetInt("", "port") // returns 443 (assumes "default") 20 | // c.GetBool("", "php") // returns true 21 | // c.GetString("service-1", "host") // returns s1.example.com 22 | // c.GetBool("service-1","allow-writing") // returns false 23 | // c.GetInt("service-1", "port") // returns 0 and a GetError 24 | // 25 | // Note that all section and option names are case insensitive. All values are case 26 | // sensitive. 27 | // 28 | // Goconfig's string substitution syntax has not been removed. However, it may be 29 | // taken out or modified in the future. 30 | package conf 31 | 32 | import ( 33 | "fmt" 34 | "regexp" 35 | "strings" 36 | ) 37 | 38 | // ConfigFile is the representation of configuration settings. 39 | // The public interface is entirely through methods. 40 | type ConfigFile struct { 41 | data map[string]map[string]string // Maps sections to options to values. 42 | } 43 | 44 | const ( 45 | // Get Errors 46 | SectionNotFound = iota 47 | OptionNotFound 48 | MaxDepthReached 49 | 50 | // Read Errors 51 | BlankSection 52 | 53 | // Get and Read Errors 54 | CouldNotParse 55 | ) 56 | 57 | var ( 58 | DefaultSection = "default" // Default section name (must be lower-case). 59 | DepthValues = 200 // Maximum allowed depth when recursively substituing variable names. 60 | 61 | // Strings accepted as bool. 62 | BoolStrings = map[string]bool{ 63 | "t": true, 64 | "true": true, 65 | "y": true, 66 | "yes": true, 67 | "on": true, 68 | "1": true, 69 | "f": false, 70 | "false": false, 71 | "n": false, 72 | "no": false, 73 | "off": false, 74 | "0": false, 75 | } 76 | 77 | varRegExp = regexp.MustCompile(`%\(([a-zA-Z0-9_.\-]+)\)s`) 78 | ) 79 | 80 | // AddSection adds a new section to the configuration. 81 | // It returns true if the new section was inserted, and false if the section already existed. 82 | func (c *ConfigFile) AddSection(section string) bool { 83 | section = strings.ToLower(section) 84 | 85 | if _, ok := c.data[section]; ok { 86 | return false 87 | } 88 | c.data[section] = make(map[string]string) 89 | 90 | return true 91 | } 92 | 93 | // RemoveSection removes a section from the configuration. 94 | // It returns true if the section was removed, and false if section did not exist. 95 | func (c *ConfigFile) RemoveSection(section string) bool { 96 | section = strings.ToLower(section) 97 | 98 | switch _, ok := c.data[section]; { 99 | case !ok: 100 | return false 101 | case section == DefaultSection: 102 | return false // default section cannot be removed 103 | default: 104 | for o, _ := range c.data[section] { 105 | delete(c.data[section], o) 106 | } 107 | delete(c.data, section) 108 | } 109 | 110 | return true 111 | } 112 | 113 | // AddOption adds a new option and value to the configuration. 114 | // It returns true if the option and value were inserted, and false if the value was overwritten. 115 | // If the section does not exist in advance, it is created. 116 | func (c *ConfigFile) AddOption(section string, option string, value string) bool { 117 | c.AddSection(section) // make sure section exists 118 | 119 | section = strings.ToLower(section) 120 | option = strings.ToLower(option) 121 | 122 | _, ok := c.data[section][option] 123 | c.data[section][option] = value 124 | 125 | return !ok 126 | } 127 | 128 | // RemoveOption removes a option and value from the configuration. 129 | // It returns true if the option and value were removed, and false otherwise, 130 | // including if the section did not exist. 131 | func (c *ConfigFile) RemoveOption(section string, option string) bool { 132 | section = strings.ToLower(section) 133 | option = strings.ToLower(option) 134 | 135 | if _, ok := c.data[section]; !ok { 136 | return false 137 | } 138 | 139 | _, ok := c.data[section][option] 140 | delete(c.data[section], option) 141 | 142 | return ok 143 | } 144 | 145 | // NewConfigFile creates an empty configuration representation. 146 | // This representation can be filled with AddSection and AddOption and then 147 | // saved to a file using WriteConfigFile. 148 | func NewConfigFile() *ConfigFile { 149 | c := new(ConfigFile) 150 | c.data = make(map[string]map[string]string) 151 | 152 | c.AddSection(DefaultSection) // default section always exists 153 | 154 | return c 155 | } 156 | 157 | type GetError struct { 158 | Reason int 159 | ValueType string 160 | Value string 161 | Section string 162 | Option string 163 | } 164 | 165 | func (err GetError) Error() string { 166 | switch err.Reason { 167 | case SectionNotFound: 168 | return fmt.Sprintf("section '%s' not found", string(err.Section)) 169 | case OptionNotFound: 170 | return fmt.Sprintf("option '%s' not found in section '%s'", string(err.Option), string(err.Section)) 171 | case CouldNotParse: 172 | return fmt.Sprintf("could not parse %s value '%s'", string(err.ValueType), string(err.Value)) 173 | case MaxDepthReached: 174 | return fmt.Sprintf("possible cycle while unfolding variables: max depth of %d reached", int(DepthValues)) 175 | } 176 | 177 | return "invalid get error" 178 | } 179 | 180 | type ReadError struct { 181 | Reason int 182 | Line string 183 | } 184 | 185 | func (err ReadError) Error() string { 186 | switch err.Reason { 187 | case BlankSection: 188 | return "empty section name not allowed" 189 | case CouldNotParse: 190 | return fmt.Sprintf("could not parse line: %s", string(err.Line)) 191 | } 192 | 193 | return "invalid read error" 194 | } 195 | -------------------------------------------------------------------------------- /src/conf/conf_test.go: -------------------------------------------------------------------------------- 1 | package conf 2 | 3 | import ( 4 | "strconv" 5 | "testing" 6 | ) 7 | 8 | const confFile = ` 9 | [default] 10 | host = example.com 11 | port = 43 12 | compression = on 13 | active = false 14 | 15 | [service-1] 16 | port = 443 17 | ` 18 | 19 | //url = http://%(host)s/something 20 | 21 | type stringtest struct { 22 | section string 23 | option string 24 | answer string 25 | } 26 | 27 | type inttest struct { 28 | section string 29 | option string 30 | answer int 31 | } 32 | 33 | type booltest struct { 34 | section string 35 | option string 36 | answer bool 37 | } 38 | 39 | var testSet = []interface{}{ 40 | stringtest{"", "host", "example.com"}, 41 | inttest{"default", "port", 43}, 42 | booltest{"default", "compression", true}, 43 | booltest{"default", "active", false}, 44 | inttest{"service-1", "port", 443}, 45 | //stringtest{"service-1", "url", "http://example.com/something"}, 46 | } 47 | 48 | func TestBuild(t *testing.T) { 49 | c, err := ReadConfigBytes([]byte(confFile)) 50 | if err != nil { 51 | t.Error(err) 52 | } 53 | 54 | for _, element := range testSet { 55 | switch element.(type) { 56 | case stringtest: 57 | e := element.(stringtest) 58 | ans, err := c.GetString(e.section, e.option) 59 | if err != nil { 60 | t.Error("c.GetString(\"" + e.section + "\",\"" + e.option + "\") returned error: " + err.Error()) 61 | } else if ans != e.answer { 62 | t.Error("c.GetString(\"" + e.section + "\",\"" + e.option + "\") returned incorrect answer: " + ans) 63 | } 64 | case inttest: 65 | e := element.(inttest) 66 | ans, err := c.GetInt(e.section, e.option) 67 | if err != nil { 68 | t.Error("c.GetInt(\"" + e.section + "\",\"" + e.option + "\") returned error: " + err.Error()) 69 | } else if ans != e.answer { 70 | t.Error("c.GetInt(\"" + e.section + "\",\"" + e.option + "\") returned incorrect answer: " + strconv.Itoa(ans)) 71 | } 72 | case booltest: 73 | e := element.(booltest) 74 | ans, err := c.GetBool(e.section, e.option) 75 | if err != nil { 76 | t.Error("c.GetBool(\"" + e.section + "\",\"" + e.option + "\") returned error: " + err.Error()) 77 | } else if ans != e.answer { 78 | t.Error("c.GetBool(\"" + e.section + "\",\"" + e.option + "\") returned incorrect answer") 79 | } 80 | } 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /src/conf/get.go: -------------------------------------------------------------------------------- 1 | package conf 2 | 3 | import ( 4 | "strconv" 5 | "strings" 6 | ) 7 | 8 | // GetSections returns the list of sections in the configuration. 9 | // (The default section always exists.) 10 | func (c *ConfigFile) GetSections() (sections []string) { 11 | sections = make([]string, len(c.data)) 12 | 13 | i := 0 14 | for s, _ := range c.data { 15 | sections[i] = s 16 | i++ 17 | } 18 | 19 | return sections 20 | } 21 | 22 | // HasSection checks if the configuration has the given section. 23 | // (The default section always exists.) 24 | func (c *ConfigFile) HasSection(section string) bool { 25 | if section == "" { 26 | section = "default" 27 | } 28 | _, ok := c.data[strings.ToLower(section)] 29 | 30 | return ok 31 | } 32 | 33 | // GetOptions returns the list of options available in the given section. 34 | // It returns an error if the section does not exist and an empty list if the section is empty. 35 | // Options within the default section are also included. 36 | func (c *ConfigFile) GetOptions(section string) (options []string, err error) { 37 | if section == "" { 38 | section = "default" 39 | } 40 | section = strings.ToLower(section) 41 | 42 | if _, ok := c.data[section]; !ok { 43 | return nil, GetError{SectionNotFound, "", "", section, ""} 44 | } 45 | 46 | options = make([]string, len(c.data[DefaultSection])+len(c.data[section])) 47 | i := 0 48 | for s, _ := range c.data[DefaultSection] { 49 | options[i] = s 50 | i++ 51 | } 52 | for s, _ := range c.data[section] { 53 | options[i] = s 54 | i++ 55 | } 56 | 57 | return options, nil 58 | } 59 | 60 | // HasOption checks if the configuration has the given option in the section. 61 | // It returns false if either the option or section do not exist. 62 | func (c *ConfigFile) HasOption(section string, option string) bool { 63 | if section == "" { 64 | section = "default" 65 | } 66 | section = strings.ToLower(section) 67 | option = strings.ToLower(option) 68 | 69 | if _, ok := c.data[section]; !ok { 70 | return false 71 | } 72 | 73 | _, okd := c.data[DefaultSection][option] 74 | _, oknd := c.data[section][option] 75 | 76 | return okd || oknd 77 | } 78 | 79 | // GetRawString gets the (raw) string value for the given option in the section. 80 | // The raw string value is not subjected to unfolding, which was illustrated in the beginning of this documentation. 81 | // It returns an error if either the section or the option do not exist. 82 | func (c *ConfigFile) GetRawString(section string, option string) (value string, err error) { 83 | if section == "" { 84 | section = "default" 85 | } 86 | 87 | section = strings.ToLower(section) 88 | option = strings.ToLower(option) 89 | 90 | if _, ok := c.data[section]; ok { 91 | if value, ok = c.data[section][option]; ok { 92 | return value, nil 93 | } 94 | return "", GetError{OptionNotFound, "", "", section, option} 95 | } 96 | return "", GetError{SectionNotFound, "", "", section, option} 97 | } 98 | 99 | // GetString gets the string value for the given option in the section. 100 | // If the value needs to be unfolded (see e.g. %(host)s example in the beginning of this documentation), 101 | // then GetString does this unfolding automatically, up to DepthValues number of iterations. 102 | // It returns an error if either the section or the option do not exist, or the unfolding cycled. 103 | func (c *ConfigFile) GetString(section string, option string) (value string, err error) { 104 | value, err = c.GetRawString(section, option) 105 | if err != nil { 106 | return "", err 107 | } 108 | 109 | return value, nil 110 | } 111 | 112 | // GetInt has the same behaviour as GetString but converts the response to int. 113 | func (c *ConfigFile) GetInt(section string, option string) (value int, err error) { 114 | sv, err := c.GetString(section, option) 115 | if err == nil { 116 | value, err = strconv.Atoi(sv) 117 | if err != nil { 118 | err = GetError{CouldNotParse, "int", sv, section, option} 119 | } 120 | } 121 | 122 | return value, err 123 | } 124 | 125 | // GetFloat has the same behaviour as GetString but converts the response to float. 126 | func (c *ConfigFile) GetFloat64(section string, option string) (value float64, err error) { 127 | sv, err := c.GetString(section, option) 128 | if err == nil { 129 | value, err = strconv.ParseFloat(sv, 64) 130 | if err != nil { 131 | err = GetError{CouldNotParse, "float64", sv, section, option} 132 | } 133 | } 134 | 135 | return value, err 136 | } 137 | 138 | // GetBool has the same behaviour as GetString but converts the response to bool. 139 | // See constant BoolStrings for string values converted to bool. 140 | func (c *ConfigFile) GetBool(section string, option string) (value bool, err error) { 141 | sv, err := c.GetString(section, option) 142 | if err != nil { 143 | return false, err 144 | } 145 | 146 | value, ok := BoolStrings[strings.ToLower(sv)] 147 | if !ok { 148 | return false, GetError{CouldNotParse, "bool", sv, section, option} 149 | } 150 | 151 | return value, nil 152 | } 153 | -------------------------------------------------------------------------------- /src/conf/read.go: -------------------------------------------------------------------------------- 1 | package conf 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "io" 7 | "os" 8 | "strings" 9 | ) 10 | 11 | // ReadConfigFile reads a file and returns a new configuration representation. 12 | // This representation can be queried with GetString, etc. 13 | func ReadConfigFile(fname string) (c *ConfigFile, err error) { 14 | var file *os.File 15 | 16 | if file, err = os.Open(fname); err != nil { 17 | return nil, err 18 | } 19 | 20 | c = NewConfigFile() 21 | if err = c.Read(file); err != nil { 22 | return nil, err 23 | } 24 | 25 | if err = file.Close(); err != nil { 26 | return nil, err 27 | } 28 | 29 | return c, nil 30 | } 31 | 32 | func ReadConfigBytes(conf []byte) (c *ConfigFile, err error) { 33 | buf := bytes.NewBuffer(conf) 34 | 35 | c = NewConfigFile() 36 | if err = c.Read(buf); err != nil { 37 | return nil, err 38 | } 39 | 40 | return c, err 41 | } 42 | 43 | // Read reads an io.Reader and returns a configuration representation. This 44 | // representation can be queried with GetString, etc. 45 | func (c *ConfigFile) Read(reader io.Reader) (err error) { 46 | buf := bufio.NewReader(reader) 47 | 48 | var section, option string 49 | section = "default" 50 | for { 51 | l, buferr := buf.ReadString('\n') // parse line-by-line 52 | l = strings.TrimSpace(l) 53 | 54 | if buferr != nil { 55 | if buferr != io.EOF { 56 | return err 57 | } 58 | 59 | if len(l) == 0 { 60 | break 61 | } 62 | } 63 | 64 | // switch written for readability (not performance) 65 | switch { 66 | case len(l) == 0: // empty line 67 | continue 68 | 69 | case l[0] == '#': // comment 70 | continue 71 | 72 | case l[0] == ';': // comment 73 | continue 74 | 75 | case len(l) >= 3 && strings.ToLower(l[0:3]) == "rem": // comment (for windows users) 76 | continue 77 | 78 | case l[0] == '[' && l[len(l)-1] == ']': // new section 79 | option = "" // reset multi-line value 80 | section = strings.TrimSpace(l[1 : len(l)-1]) 81 | c.AddSection(section) 82 | 83 | case section == "": // not new section and no section defined so far 84 | return ReadError{BlankSection, l} 85 | 86 | default: // other alternatives 87 | i := strings.IndexAny(l, "=:") 88 | switch { 89 | case i > 0: // option and value 90 | i := strings.IndexAny(l, "=:") 91 | option = strings.TrimSpace(l[0:i]) 92 | value := strings.TrimSpace(stripComments(l[i+1:])) 93 | c.AddOption(section, option, value) 94 | 95 | case section != "" && option != "": // continuation of multi-line value 96 | prev, _ := c.GetRawString(section, option) 97 | value := strings.TrimSpace(stripComments(l)) 98 | c.AddOption(section, option, prev+"\n"+value) 99 | 100 | default: 101 | return ReadError{CouldNotParse, l} 102 | } 103 | } 104 | 105 | // Reached end of file 106 | if buferr == io.EOF { 107 | break 108 | } 109 | } 110 | return nil 111 | } 112 | 113 | func stripComments(l string) string { 114 | // comments are preceded by space or TAB 115 | for _, c := range []string{" ;", "\t;", " #", "\t#"} { 116 | if i := strings.Index(l, c); i != -1 { 117 | l = l[0:i] 118 | } 119 | } 120 | return l 121 | } 122 | -------------------------------------------------------------------------------- /src/conf/write.go: -------------------------------------------------------------------------------- 1 | package conf 2 | 3 | import ( 4 | "bytes" 5 | "io" 6 | "os" 7 | ) 8 | 9 | // WriteConfigFile saves the configuration representation to a file. 10 | // The desired file permissions must be passed as in os.Open. 11 | // The header is a string that is saved as a comment in the first line of the file. 12 | func (c *ConfigFile) WriteConfigFile(fname string, perm uint32, header string) (err error) { 13 | var file *os.File 14 | 15 | if file, err = os.Create(fname); err != nil { 16 | return err 17 | } 18 | if err = c.Write(file, header); err != nil { 19 | return err 20 | } 21 | 22 | return file.Close() 23 | } 24 | 25 | // WriteConfigBytes returns the configuration file. 26 | func (c *ConfigFile) WriteConfigBytes(header string) (config []byte) { 27 | buf := bytes.NewBuffer(nil) 28 | 29 | c.Write(buf, header) 30 | 31 | return buf.Bytes() 32 | } 33 | 34 | // Writes the configuration file to the io.Writer. 35 | func (c *ConfigFile) Write(writer io.Writer, header string) (err error) { 36 | buf := bytes.NewBuffer(nil) 37 | 38 | if header != "" { 39 | if _, err = buf.WriteString("# " + header + "\n"); err != nil { 40 | return err 41 | } 42 | } 43 | 44 | for section, sectionmap := range c.data { 45 | if section == DefaultSection && len(sectionmap) == 0 { 46 | continue // skip default section if empty 47 | } 48 | if _, err = buf.WriteString("[" + section + "]\n"); err != nil { 49 | return err 50 | } 51 | for option, value := range sectionmap { 52 | if _, err = buf.WriteString(option + "=" + value + "\n"); err != nil { 53 | return err 54 | } 55 | } 56 | if _, err = buf.WriteString("\n"); err != nil { 57 | return err 58 | } 59 | } 60 | 61 | buf.WriteTo(writer) 62 | 63 | return nil 64 | } 65 | -------------------------------------------------------------------------------- /src/kmonitor/kmonitor.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "log" 5 | "fmt" 6 | "os" 7 | "os/signal" 8 | "flag" 9 | 10 | "conf" 11 | "redis" 12 | ) 13 | 14 | var config *conf.ConfigFile 15 | var quit bool = false 16 | 17 | func run() int { 18 | host,_ := config.GetString("redis", "host") 19 | password,_ := config.GetString("redis", "password") 20 | db,_ := config.GetInt("redis", "db") 21 | 22 | events,_ := config.GetString("redis", "events") 23 | channel,_ := config.GetString("redis", "channel") 24 | 25 | cli := redis.NewRedis(host, password, db) 26 | err := cli.Connect() 27 | if err != nil { 28 | log.Print("connect to redis failed") 29 | return 1 30 | } 31 | 32 | _, err = cli.Exec("config", "set", "notify-keyspace-events", events) 33 | if err != nil { 34 | log.Printf("config redis failed:%v", err) 35 | return 1 36 | } 37 | 38 | _, err = cli.Exec("subscribe", channel) 39 | if err != nil { 40 | log.Printf("subscribe failed:%s", err) 41 | return 1 42 | } 43 | 44 | for { 45 | if quit { 46 | break 47 | } 48 | resp, err := cli.ReadResponse() 49 | if err != nil { 50 | log.Printf("read publish message failed:%v", err) 51 | return 2 52 | } 53 | if data, ok := resp.([]string); ok { 54 | if len(data) != 3 || data[0] != "message" { 55 | log.Printf("receive unexpected message, %v", data) 56 | } else { 57 | event := data[1] 58 | key := data[2] 59 | log.Printf("receive [%s], value[%s]", event, key) 60 | } 61 | } else { 62 | log.Printf("receive unexpected message, %v", resp) 63 | } 64 | } 65 | return 0 66 | } 67 | 68 | func usage() { 69 | fmt.Fprintf(os.Stderr, "usage: %s [config]\n", os.Args[0]) 70 | flag.PrintDefaults() 71 | os.Exit(2) 72 | } 73 | 74 | func main() { 75 | flag.Usage = usage 76 | flag.Parse() 77 | 78 | args := flag.Args() 79 | if len(args) < 1 { 80 | fmt.Println("config file is missing.") 81 | os.Exit(1) 82 | } 83 | 84 | var err error 85 | config, err = conf.ReadConfigFile(args[0]) 86 | if err != nil { 87 | fmt.Fprintf(os.Stderr, "read config file failed:%s", err) 88 | os.Exit(1) 89 | } 90 | 91 | c := make(chan os.Signal, 1) 92 | signal.Notify(c, os.Interrupt, os.Kill) 93 | go func() { 94 | s := <- c 95 | log.Printf("catch signal %v, program will exit",s) 96 | quit = true 97 | }() 98 | 99 | code := run() 100 | os.Exit(code) 101 | } 102 | 103 | -------------------------------------------------------------------------------- /src/levigo/.gitignore: -------------------------------------------------------------------------------- 1 | *.o 2 | *.a 3 | *.6 4 | *.out 5 | _testmain.go 6 | _obj 7 | -------------------------------------------------------------------------------- /src/levigo/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2012 Jeffrey M Hodges 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 4 | 5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 6 | 7 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 8 | -------------------------------------------------------------------------------- /src/levigo/README.md: -------------------------------------------------------------------------------- 1 | # levigo 2 | 3 | levigo is a Go wrapper for LevelDB. 4 | 5 | The API has been godoc'ed and [is available on the 6 | web](http://godoc.org/github.com/jmhodges/levigo). 7 | 8 | Questions answered at `golang-nuts@googlegroups.com`. 9 | 10 | ## Building 11 | 12 | You'll need the shared library build of 13 | [LevelDB](http://code.google.com/p/leveldb/) installed on your machine. The 14 | current LevelDB will build it by default. 15 | 16 | The minimum version of LevelDB required is currently 1.7. If you require the 17 | use of an older version of LevelDB, see the [fork of levigo for LevelDB 18 | 1.4](https://github.com/jmhodges/levigo_leveldb_1.4). Prefer putting in the 19 | work to be up to date as LevelDB moves very quickly. 20 | 21 | Now, if you build LevelDB and put the shared library and headers in one of the 22 | standard places for your OS, you'll be able to simply run: 23 | 24 | go get github.com/jmhodges/levigo 25 | 26 | But, suppose you put the shared LevelDB library somewhere weird like 27 | /path/to/lib and the headers were installed in /path/to/include. To install 28 | levigo remotely, you'll run: 29 | 30 | CGO_CFLAGS="-I/path/to/leveldb/include" CGO_LDFLAGS="-L/path/to/leveldb/lib" go get github.com/jmhodges/levigo 31 | 32 | and there you go. 33 | 34 | In order to build with snappy, you'll have to explicitly add "-lsnappy" to the 35 | `CGO_LDFLAGS`. Supposing that both snappy and leveldb are in weird places, 36 | you'll run something like: 37 | 38 | CGO_CFLAGS="-I/path/to/leveldb/include -I/path/to/snappy/include" 39 | CGO_LDFLAGS="-L/path/to/leveldb/lib -L/path/to/snappy/lib -lsnappy" go get github.com/jmhodges/levigo 40 | 41 | (and make sure the -lsnappy is after the snappy library path!). 42 | 43 | Of course, these same rules apply when doing `go build`, as well. 44 | 45 | ## Caveats 46 | 47 | Comparators and WriteBatch iterators must be written in C in your own 48 | library. This seems like a pain in the ass, but remember that you'll have the 49 | LevelDB C API available to your in your client package when you import levigo. 50 | 51 | An example of writing your own Comparator can be found in 52 | . 53 | -------------------------------------------------------------------------------- /src/levigo/batch.go: -------------------------------------------------------------------------------- 1 | package levigo 2 | 3 | // #include "leveldb/c.h" 4 | import "C" 5 | 6 | import ( 7 | "unsafe" 8 | ) 9 | 10 | // WriteBatch is a batching of Puts, and Deletes to be written atomically to a 11 | // database. A WriteBatch is written when passed to DB.Write. 12 | // 13 | // To prevent memory leaks, call Close when the program no longer needs the 14 | // WriteBatch object. 15 | type WriteBatch struct { 16 | wbatch *C.leveldb_writebatch_t 17 | } 18 | 19 | // NewWriteBatch creates a fully allocated WriteBatch. 20 | func NewWriteBatch() *WriteBatch { 21 | wb := C.leveldb_writebatch_create() 22 | return &WriteBatch{wb} 23 | } 24 | 25 | // Close releases the underlying memory of a WriteBatch. 26 | func (w *WriteBatch) Close() { 27 | C.leveldb_writebatch_destroy(w.wbatch) 28 | } 29 | 30 | // Put places a key-value pair into the WriteBatch for writing later. 31 | // 32 | // Both the key and value byte slices may be reused as WriteBatch takes a copy 33 | // of them before returning. 34 | // 35 | func (w *WriteBatch) Put(key, value []byte) { 36 | // leveldb_writebatch_put, and _delete call memcpy() (by way of 37 | // Memtable::Add) when called, so we do not need to worry about these 38 | // []byte being reclaimed by GC. 39 | var k, v *C.char 40 | if len(key) != 0 { 41 | k = (*C.char)(unsafe.Pointer(&key[0])) 42 | } 43 | if len(value) != 0 { 44 | v = (*C.char)(unsafe.Pointer(&value[0])) 45 | } 46 | 47 | lenk := len(key) 48 | lenv := len(value) 49 | 50 | C.leveldb_writebatch_put(w.wbatch, k, C.size_t(lenk), v, C.size_t(lenv)) 51 | } 52 | 53 | // Delete queues a deletion of the data at key to be deleted later. 54 | // 55 | // The key byte slice may be reused safely. Delete takes a copy of 56 | // them before returning. 57 | func (w *WriteBatch) Delete(key []byte) { 58 | C.leveldb_writebatch_delete(w.wbatch, 59 | (*C.char)(unsafe.Pointer(&key[0])), C.size_t(len(key))) 60 | } 61 | 62 | // Clear removes all the enqueued Put and Deletes in the WriteBatch. 63 | func (w *WriteBatch) Clear() { 64 | C.leveldb_writebatch_clear(w.wbatch) 65 | } 66 | -------------------------------------------------------------------------------- /src/levigo/cache.go: -------------------------------------------------------------------------------- 1 | package levigo 2 | 3 | // #include 4 | // #include "leveldb/c.h" 5 | import "C" 6 | 7 | // Cache is a cache used to store data read from data in memory. 8 | // 9 | // Typically, NewLRUCache is all you will need, but advanced users may 10 | // implement their own *C.leveldb_cache_t and create a Cache. 11 | // 12 | // To prevent memory leaks, a Cache must have Close called on it when it is 13 | // no longer needed by the program. Note: if the process is shutting down, 14 | // this may not be necessary and could be avoided to shorten shutdown time. 15 | type Cache struct { 16 | Cache *C.leveldb_cache_t 17 | } 18 | 19 | // NewLRUCache creates a new Cache object with the capacity given. 20 | // 21 | // To prevent memory leaks, Close should be called on the Cache when the 22 | // program no longer needs it. Note: if the process is shutting down, this may 23 | // not be necessary and could be avoided to shorten shutdown time. 24 | func NewLRUCache(capacity int) *Cache { 25 | return &Cache{C.leveldb_cache_create_lru(C.size_t(capacity))} 26 | } 27 | 28 | // Close deallocates the underlying memory of the Cache object. 29 | func (c *Cache) Close() { 30 | C.leveldb_cache_destroy(c.Cache) 31 | } 32 | -------------------------------------------------------------------------------- /src/levigo/comparator.go: -------------------------------------------------------------------------------- 1 | package levigo 2 | 3 | // #include "leveldb/c.h" 4 | import "C" 5 | 6 | // DestroyComparator deallocates a *C.leveldb_comparator_t. 7 | // 8 | // This is provided as a convienience to advanced users that have implemented 9 | // their own comparators in C in their own code. 10 | func DestroyComparator(cmp *C.leveldb_comparator_t) { 11 | C.leveldb_comparator_destroy(cmp) 12 | } 13 | -------------------------------------------------------------------------------- /src/levigo/conv.go: -------------------------------------------------------------------------------- 1 | package levigo 2 | 3 | // #include "leveldb/c.h" 4 | import "C" 5 | 6 | func boolToUchar(b bool) C.uchar { 7 | uc := C.uchar(0) 8 | if b { 9 | uc = C.uchar(1) 10 | } 11 | return uc 12 | } 13 | 14 | func ucharToBool(uc C.uchar) bool { 15 | if uc == C.uchar(0) { 16 | return false 17 | } 18 | return true 19 | } 20 | -------------------------------------------------------------------------------- /src/levigo/db.go: -------------------------------------------------------------------------------- 1 | package levigo 2 | 3 | /* 4 | #include 5 | #include "leveldb/c.h" 6 | 7 | // This function exists only to clean up lack-of-const warnings when 8 | // leveldb_approximate_sizes is called from Go-land. 9 | void levigo_leveldb_approximate_sizes( 10 | leveldb_t* db, 11 | int num_ranges, 12 | char** range_start_key, const size_t* range_start_key_len, 13 | char** range_limit_key, const size_t* range_limit_key_len, 14 | uint64_t* sizes) { 15 | leveldb_approximate_sizes(db, 16 | num_ranges, 17 | (const char* const*)range_start_key, 18 | range_start_key_len, 19 | (const char* const*)range_limit_key, 20 | range_limit_key_len, 21 | sizes); 22 | } 23 | */ 24 | import "C" 25 | 26 | import ( 27 | "unsafe" 28 | ) 29 | 30 | type DatabaseError string 31 | 32 | func (e DatabaseError) Error() string { 33 | return string(e) 34 | } 35 | 36 | // DB is a reusable handle to a LevelDB database on disk, created by Open. 37 | // 38 | // To avoid memory and file descriptor leaks, call Close when the process no 39 | // longer needs the handle. Calls to any DB method made after Close will 40 | // panic. 41 | // 42 | // The DB instance may be shared between goroutines. The usual data race 43 | // conditions will occur if the same key is written to from more than one, of 44 | // course. 45 | type DB struct { 46 | Ldb *C.leveldb_t 47 | } 48 | 49 | // Range is a range of keys in the database. GetApproximateSizes calls with it 50 | // begin at the key Start and end right before the key Limit. 51 | type Range struct { 52 | Start []byte 53 | Limit []byte 54 | } 55 | 56 | // Snapshot provides a consistent view of read operations in a DB. 57 | // 58 | // Snapshot is used in read operations by setting it on a 59 | // ReadOptions. Snapshots are created by calling DB.NewSnapshot. 60 | // 61 | // To prevent memory leaks and resource strain in the database, the snapshot 62 | // returned must be released with DB.ReleaseSnapshot method on the DB that 63 | // created it. 64 | type Snapshot struct { 65 | snap *C.leveldb_snapshot_t 66 | } 67 | 68 | // Open opens a database. 69 | // 70 | // Creating a new database is done by calling SetCreateIfMissing(true) on the 71 | // Options passed to Open. 72 | // 73 | // It is usually wise to set a Cache object on the Options with SetCache to 74 | // keep recently used data from that database in memory. 75 | func Open(dbname string, o *Options) (*DB, error) { 76 | var errStr *C.char 77 | ldbname := C.CString(dbname) 78 | defer C.free(unsafe.Pointer(ldbname)) 79 | 80 | leveldb := C.leveldb_open(o.Opt, ldbname, &errStr) 81 | if errStr != nil { 82 | gs := C.GoString(errStr) 83 | C.leveldb_free(unsafe.Pointer(errStr)) 84 | return nil, DatabaseError(gs) 85 | } 86 | return &DB{leveldb}, nil 87 | } 88 | 89 | // DestroyDatabase removes a database entirely, removing everything from the 90 | // filesystem. 91 | func DestroyDatabase(dbname string, o *Options) error { 92 | var errStr *C.char 93 | ldbname := C.CString(dbname) 94 | defer C.free(unsafe.Pointer(ldbname)) 95 | 96 | C.leveldb_destroy_db(o.Opt, ldbname, &errStr) 97 | if errStr != nil { 98 | gs := C.GoString(errStr) 99 | C.leveldb_free(unsafe.Pointer(errStr)) 100 | return DatabaseError(gs) 101 | } 102 | return nil 103 | } 104 | 105 | // RepairDatabase attempts to repair a database. 106 | // 107 | // If the database is unrepairable, an error is returned. 108 | func RepairDatabase(dbname string, o *Options) error { 109 | var errStr *C.char 110 | ldbname := C.CString(dbname) 111 | defer C.free(unsafe.Pointer(ldbname)) 112 | 113 | C.leveldb_repair_db(o.Opt, ldbname, &errStr) 114 | if errStr != nil { 115 | gs := C.GoString(errStr) 116 | C.leveldb_free(unsafe.Pointer(errStr)) 117 | return DatabaseError(gs) 118 | } 119 | return nil 120 | } 121 | 122 | // Put writes data associated with a key to the database. 123 | // 124 | // If a nil []byte is passed in as value, it will be returned by Get 125 | // as an zero-length slice. The WriteOptions passed in can be reused 126 | // by multiple calls to this and if the WriteOptions is left unchanged. 127 | // 128 | // The key and value byte slices may be reused safely. Put takes a copy of 129 | // them before returning. 130 | func (db *DB) Put(wo *WriteOptions, key, value []byte) error { 131 | var errStr *C.char 132 | // leveldb_put, _get, and _delete call memcpy() (by way of Memtable::Add) 133 | // when called, so we do not need to worry about these []byte being 134 | // reclaimed by GC. 135 | var k, v *C.char 136 | if len(key) != 0 { 137 | k = (*C.char)(unsafe.Pointer(&key[0])) 138 | } 139 | if len(value) != 0 { 140 | v = (*C.char)(unsafe.Pointer(&value[0])) 141 | } 142 | 143 | lenk := len(key) 144 | lenv := len(value) 145 | C.leveldb_put( 146 | db.Ldb, wo.Opt, k, C.size_t(lenk), v, C.size_t(lenv), &errStr) 147 | 148 | if errStr != nil { 149 | gs := C.GoString(errStr) 150 | C.leveldb_free(unsafe.Pointer(errStr)) 151 | return DatabaseError(gs) 152 | } 153 | return nil 154 | } 155 | 156 | // Get returns the data associated with the key from the database. 157 | // 158 | // If the key does not exist in the database, a nil []byte is returned. If the 159 | // key does exist, but the data is zero-length in the database, a zero-length 160 | // []byte will be returned. 161 | // 162 | // The key byte slice may be reused safely. Get takes a copy of 163 | // them before returning. 164 | func (db *DB) Get(ro *ReadOptions, key []byte) ([]byte, error) { 165 | var errStr *C.char 166 | var vallen C.size_t 167 | var k *C.char 168 | if len(key) != 0 { 169 | k = (*C.char)(unsafe.Pointer(&key[0])) 170 | } 171 | 172 | value := C.leveldb_get( 173 | db.Ldb, ro.Opt, k, C.size_t(len(key)), &vallen, &errStr) 174 | 175 | if errStr != nil { 176 | gs := C.GoString(errStr) 177 | C.leveldb_free(unsafe.Pointer(errStr)) 178 | return nil, DatabaseError(gs) 179 | } 180 | 181 | if value == nil { 182 | return nil, nil 183 | } 184 | 185 | defer C.leveldb_free(unsafe.Pointer(value)) 186 | return C.GoBytes(unsafe.Pointer(value), C.int(vallen)), nil 187 | } 188 | 189 | // Delete removes the data associated with the key from the database. 190 | // 191 | // The key byte slice may be reused safely. Delete takes a copy of 192 | // them before returning. The WriteOptions passed in can be reused by 193 | // multiple calls to this and if the WriteOptions is left unchanged. 194 | func (db *DB) Delete(wo *WriteOptions, key []byte) error { 195 | var errStr *C.char 196 | var k *C.char 197 | if len(key) != 0 { 198 | k = (*C.char)(unsafe.Pointer(&key[0])) 199 | } 200 | 201 | C.leveldb_delete( 202 | db.Ldb, wo.Opt, k, C.size_t(len(key)), &errStr) 203 | 204 | if errStr != nil { 205 | gs := C.GoString(errStr) 206 | C.leveldb_free(unsafe.Pointer(errStr)) 207 | return DatabaseError(gs) 208 | } 209 | return nil 210 | } 211 | 212 | // Write atomically writes a WriteBatch to disk. The WriteOptions 213 | // passed in can be reused by multiple calls to this and other methods. 214 | func (db *DB) Write(wo *WriteOptions, w *WriteBatch) error { 215 | var errStr *C.char 216 | C.leveldb_write(db.Ldb, wo.Opt, w.wbatch, &errStr) 217 | if errStr != nil { 218 | gs := C.GoString(errStr) 219 | C.leveldb_free(unsafe.Pointer(errStr)) 220 | return DatabaseError(gs) 221 | } 222 | return nil 223 | } 224 | 225 | // NewIterator returns an Iterator over the the database that uses the 226 | // ReadOptions given. 227 | // 228 | // Often, this is used for large, offline bulk reads while serving live 229 | // traffic. In that case, it may be wise to disable caching so that the data 230 | // processed by the returned Iterator does not displace the already cached 231 | // data. This can be done by calling SetFillCache(false) on the ReadOptions 232 | // before passing it here. 233 | // 234 | // Similiarly, ReadOptions.SetSnapshot is also useful. 235 | // 236 | // The ReadOptions passed in can be reused by multiple calls to this 237 | // and other methods if the ReadOptions is left unchanged. 238 | func (db *DB) NewIterator(ro *ReadOptions) *Iterator { 239 | it := C.leveldb_create_iterator(db.Ldb, ro.Opt) 240 | return &Iterator{Iter: it} 241 | } 242 | 243 | // GetApproximateSizes returns the approximate number of bytes of file system 244 | // space used by one or more key ranges. 245 | // 246 | // The keys counted will begin at Range.Start and end on the key before 247 | // Range.Limit. 248 | func (db *DB) GetApproximateSizes(ranges []Range) []uint64 { 249 | starts := make([]*C.char, len(ranges)) 250 | limits := make([]*C.char, len(ranges)) 251 | startLens := make([]C.size_t, len(ranges)) 252 | limitLens := make([]C.size_t, len(ranges)) 253 | for i, r := range ranges { 254 | starts[i] = C.CString(string(r.Start)) 255 | startLens[i] = C.size_t(len(r.Start)) 256 | limits[i] = C.CString(string(r.Limit)) 257 | limitLens[i] = C.size_t(len(r.Limit)) 258 | } 259 | sizes := make([]uint64, len(ranges)) 260 | numranges := C.int(len(ranges)) 261 | startsPtr := &starts[0] 262 | limitsPtr := &limits[0] 263 | startLensPtr := &startLens[0] 264 | limitLensPtr := &limitLens[0] 265 | sizesPtr := (*C.uint64_t)(&sizes[0]) 266 | C.levigo_leveldb_approximate_sizes( 267 | db.Ldb, numranges, startsPtr, startLensPtr, 268 | limitsPtr, limitLensPtr, sizesPtr) 269 | for i := range ranges { 270 | C.free(unsafe.Pointer(starts[i])) 271 | C.free(unsafe.Pointer(limits[i])) 272 | } 273 | return sizes 274 | } 275 | 276 | // PropertyValue returns the value of a database property. 277 | // 278 | // Examples of properties include "leveldb.stats", "leveldb.sstables", 279 | // and "leveldb.num-files-at-level0". 280 | func (db *DB) PropertyValue(propName string) string { 281 | cname := C.CString(propName) 282 | value := C.GoString(C.leveldb_property_value(db.Ldb, cname)) 283 | C.free(unsafe.Pointer(cname)) 284 | return value 285 | } 286 | 287 | // NewSnapshot creates a new snapshot of the database. 288 | // 289 | // The Snapshot, when used in a ReadOptions, provides a consistent 290 | // view of state of the database at the the snapshot was created. 291 | // 292 | // To prevent memory leaks and resource strain in the database, the snapshot 293 | // returned must be released with DB.ReleaseSnapshot method on the DB that 294 | // created it. 295 | // 296 | // See the LevelDB documentation for details. 297 | func (db *DB) NewSnapshot() *Snapshot { 298 | return &Snapshot{C.leveldb_create_snapshot(db.Ldb)} 299 | } 300 | 301 | // ReleaseSnapshot removes the snapshot from the database's list of snapshots, 302 | // and deallocates it. 303 | func (db *DB) ReleaseSnapshot(snap *Snapshot) { 304 | C.leveldb_release_snapshot(db.Ldb, snap.snap) 305 | } 306 | 307 | // CompactRange runs a manual compaction on the Range of keys given. This is 308 | // not likely to be needed for typical usage. 309 | func (db *DB) CompactRange(r Range) { 310 | var start, limit *C.char 311 | if len(r.Start) != 0 { 312 | start = (*C.char)(unsafe.Pointer(&r.Start[0])) 313 | } 314 | if len(r.Limit) != 0 { 315 | limit = (*C.char)(unsafe.Pointer(&r.Limit[0])) 316 | } 317 | C.leveldb_compact_range( 318 | db.Ldb, start, C.size_t(len(r.Start)), limit, C.size_t(len(r.Limit))) 319 | } 320 | 321 | // Close closes the database, rendering it unusable for I/O, by deallocating 322 | // the underlying handle. 323 | // 324 | // Any attempts to use the DB after Close is called will panic. 325 | func (db *DB) Close() { 326 | C.leveldb_close(db.Ldb) 327 | } 328 | -------------------------------------------------------------------------------- /src/levigo/doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Package levigo provides the ability to create and access LevelDB databases. 4 | 5 | levigo.Open opens and creates databases. 6 | 7 | opts := levigo.NewOptions() 8 | opts.SetCache(levigo.NewLRUCache(3<<30)) 9 | opts.SetCreateIfMissing(true) 10 | db, err := levigo.Open("/path/to/db", opts) 11 | 12 | The DB struct returned by Open provides DB.Get, DB.Put and DB.Delete to modify 13 | and query the database. 14 | 15 | ro := levigo.NewReadOptions() 16 | wo := levigo.NewWriteOptions() 17 | // if ro and wo are not used again, be sure to Close them. 18 | data, err := db.Get(ro, []byte("key")) 19 | ... 20 | err = db.Put(wo, []byte("anotherkey"), data) 21 | ... 22 | err = db.Delete(wo, []byte("key")) 23 | 24 | For bulk reads, use an Iterator. If you want to avoid disturbing your live 25 | traffic while doing the bulk read, be sure to call SetFillCache(false) on the 26 | ReadOptions you use when creating the Iterator. 27 | 28 | ro := levigo.NewReadOptions() 29 | ro.SetFillCache(false) 30 | it := db.NewIterator(ro) 31 | defer it.Close() 32 | it.Seek(mykey) 33 | for it = it; it.Valid(); it.Next() { 34 | munge(it.Key(), it.Value()) 35 | } 36 | if err := it.GetError(); err != nil { 37 | ... 38 | } 39 | 40 | Batched, atomic writes can be performed with a WriteBatch and 41 | DB.Write. 42 | 43 | wb := levigo.NewWriteBatch() 44 | // defer wb.Close or use wb.Clear and reuse. 45 | wb.Delete([]byte("removed")) 46 | wb.Put([]byte("added"), []byte("data")) 47 | wb.Put([]byte("anotheradded"), []byte("more")) 48 | err := db.Write(wo, wb) 49 | 50 | If your working dataset does not fit in memory, you'll want to add a bloom 51 | filter to your database. NewBloomFilter and Options.SetFilterPolicy is what 52 | you want. NewBloomFilter is amount of bits in the filter to use per key in 53 | your database. 54 | 55 | filter := levigo.NewBloomFilter(10) 56 | opts.SetFilterPolicy(filter) 57 | db, err := levigo.Open("/path/to/db", opts) 58 | 59 | If you're using a custom comparator in your code, be aware you may have to 60 | make your own filter policy object. 61 | 62 | This documentation is not a complete discussion of LevelDB. Please read the 63 | LevelDB documentation for information on 64 | its operation. You'll find lots of goodies there. 65 | */ 66 | package levigo 67 | -------------------------------------------------------------------------------- /src/levigo/env.go: -------------------------------------------------------------------------------- 1 | package levigo 2 | 3 | // #cgo LDFLAGS: -lleveldb -lsnappy -lstdc++ 4 | // #include "leveldb/c.h" 5 | import "C" 6 | 7 | // Env is a system call environment used by a database. 8 | // 9 | // Typically, NewDefaultEnv is all you need. Advanced users may create their 10 | // own Env with a *C.leveldb_env_t of their own creation. 11 | // 12 | // To prevent memory leaks, an Env must have Close called on it when it is 13 | // no longer needed by the program. 14 | type Env struct { 15 | Env *C.leveldb_env_t 16 | } 17 | 18 | // NewDefaultEnv creates a default environment for use in an Options. 19 | // 20 | // To prevent memory leaks, the Env returned should be deallocated with 21 | // Close. 22 | func NewDefaultEnv() *Env { 23 | return &Env{C.leveldb_create_default_env()} 24 | } 25 | 26 | // Close deallocates the Env, freeing the underlying struct. 27 | func (env *Env) Close() { 28 | C.leveldb_env_destroy(env.Env) 29 | } 30 | -------------------------------------------------------------------------------- /src/levigo/examples/comparator_example.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | /* 4 | #include 5 | #include 6 | 7 | static void CmpDestroy(void* arg) { } 8 | 9 | static int CmpCompare(void* arg, const char* a, size_t alen, 10 | const char* b, size_t blen) { 11 | int n = (alen < blen) ? alen : blen; 12 | int r = memcmp(a, b, n); 13 | if (r == 0) { 14 | if (alen < blen) r = -1; 15 | else if (alen > blen) r = +1; 16 | } 17 | return r; 18 | } 19 | 20 | static const char* CmpName(void* arg) { 21 | return "foo"; 22 | } 23 | 24 | static leveldb_comparator_t* CmpFooNew() { 25 | return leveldb_comparator_create(NULL, CmpDestroy, CmpCompare, CmpName); 26 | } 27 | 28 | */ 29 | import "C" 30 | 31 | type Comparator struct { 32 | Comparator *C.leveldb_comparator_t 33 | } 34 | 35 | func NewFooComparator() *Comparator { 36 | return &Comparator{C.CmpFooNew()} 37 | } 38 | 39 | func (cmp *Comparator) Close() { 40 | C.leveldb_comparator_destroy(cmp.Comparator) 41 | } 42 | 43 | func main() { 44 | NewFooComparator().Close() 45 | } 46 | -------------------------------------------------------------------------------- /src/levigo/filterpolicy.go: -------------------------------------------------------------------------------- 1 | package levigo 2 | 3 | // #include 4 | // #include "leveldb/c.h" 5 | import "C" 6 | 7 | // FilterPolicy is a factory type that allows the LevelDB database to create a 8 | // filter, such as a bloom filter, that is stored in the sstables and used by 9 | // DB.Get to reduce reads. 10 | // 11 | // An instance of this struct may be supplied to Options when opening a 12 | // DB. Typical usage is to call NewBloomFilter to get an instance. 13 | // 14 | // To prevent memory leaks, a FilterPolicy must have Close called on it when 15 | // it is no longer needed by the program. 16 | type FilterPolicy struct { 17 | Policy *C.leveldb_filterpolicy_t 18 | } 19 | 20 | // NewBloomFilter creates a filter policy that will create a bloom filter when 21 | // necessary with the given number of bits per key. 22 | // 23 | // See the FilterPolicy documentation for more. 24 | func NewBloomFilter(bitsPerKey int) *FilterPolicy { 25 | policy := C.leveldb_filterpolicy_create_bloom(C.int(bitsPerKey)) 26 | return &FilterPolicy{policy} 27 | } 28 | 29 | func (fp *FilterPolicy) Close() { 30 | C.leveldb_filterpolicy_destroy(fp.Policy) 31 | } 32 | -------------------------------------------------------------------------------- /src/levigo/iterator.go: -------------------------------------------------------------------------------- 1 | package levigo 2 | 3 | // #include 4 | // #include "leveldb/c.h" 5 | import "C" 6 | 7 | import ( 8 | "unsafe" 9 | ) 10 | 11 | type IteratorError string 12 | 13 | func (e IteratorError) Error() string { 14 | return string(e) 15 | } 16 | 17 | // Iterator is a read-only iterator through a LevelDB database. It provides a 18 | // way to seek to specific keys and iterate through the keyspace from that 19 | // point, as well as access the values of those keys. 20 | // 21 | // Care must be taken when using an Iterator. If the method Valid returns 22 | // false, calls to Key, Value, Next, and Prev will result in panics. However, 23 | // Seek, SeekToFirst, SeekToLast, GetError, Valid, and Close will still be 24 | // safe to call. 25 | // 26 | // GetError will only return an error in the event of a LevelDB error. It will 27 | // return a nil on iterators that are simply invalid. Given that behavior, 28 | // GetError is not a replacement for a Valid. 29 | // 30 | // A typical use looks like: 31 | // 32 | // db := levigo.Open(...) 33 | // 34 | // it := db.NewIterator(readOpts) 35 | // defer it.Close() 36 | // it.Seek(mykey) 37 | // for it = it; it.Valid(); it.Next() { 38 | // useKeyAndValue(it.Key(), it.Value()) 39 | // } 40 | // if err := it.GetError() { 41 | // ... 42 | // } 43 | // 44 | // To prevent memory leaks, an Iterator must have Close called on it when it 45 | // is no longer needed by the program. 46 | type Iterator struct { 47 | Iter *C.leveldb_iterator_t 48 | } 49 | 50 | // Valid returns false only when an Iterator has iterated past either the 51 | // first or the last key in the database. 52 | func (it *Iterator) Valid() bool { 53 | return ucharToBool(C.leveldb_iter_valid(it.Iter)) 54 | } 55 | 56 | // Key returns a copy the key in the database the iterator currently holds. 57 | // 58 | // If Valid returns false, this method will panic. 59 | func (it *Iterator) Key() []byte { 60 | var klen C.size_t 61 | kdata := C.leveldb_iter_key(it.Iter, &klen) 62 | if kdata == nil { 63 | return nil 64 | } 65 | // Unlike DB.Get, the key, kdata, returned is not meant to be freed by the 66 | // client. It's a direct reference to data managed by the iterator_t 67 | // instead of a copy. So, we must not free it here but simply copy it 68 | // with GoBytes. 69 | return C.GoBytes(unsafe.Pointer(kdata), C.int(klen)) 70 | } 71 | 72 | // Value returns a copy of the value in the database the iterator currently 73 | // holds. 74 | // 75 | // If Valid returns false, this method will panic. 76 | func (it *Iterator) Value() []byte { 77 | var vlen C.size_t 78 | vdata := C.leveldb_iter_value(it.Iter, &vlen) 79 | if vdata == nil { 80 | return nil 81 | } 82 | // Unlike DB.Get, the value, vdata, returned is not meant to be freed by 83 | // the client. It's a direct reference to data managed by the iterator_t 84 | // instead of a copy. So, we must not free it here but simply copy it with 85 | // GoBytes. 86 | return C.GoBytes(unsafe.Pointer(vdata), C.int(vlen)) 87 | } 88 | 89 | // Next moves the iterator to the next sequential key in the database, as 90 | // defined by the Comparator in the ReadOptions used to create this Iterator. 91 | // 92 | // If Valid returns false, this method will panic. 93 | func (it *Iterator) Next() { 94 | C.leveldb_iter_next(it.Iter) 95 | } 96 | 97 | // Prev moves the iterator to the previous sequential key in the database, as 98 | // defined by the Comparator in the ReadOptions used to create this Iterator. 99 | // 100 | // If Valid returns false, this method will panic. 101 | func (it *Iterator) Prev() { 102 | C.leveldb_iter_prev(it.Iter) 103 | } 104 | 105 | // SeekToFirst moves the iterator to the first key in the database, as defined 106 | // by the Comparator in the ReadOptions used to create this Iterator. 107 | // 108 | // This method is safe to call when Valid returns false. 109 | func (it *Iterator) SeekToFirst() { 110 | C.leveldb_iter_seek_to_first(it.Iter) 111 | } 112 | 113 | // SeekToLast moves the iterator to the last key in the database, as defined 114 | // by the Comparator in the ReadOptions used to create this Iterator. 115 | // 116 | // This method is safe to call when Valid returns false. 117 | func (it *Iterator) SeekToLast() { 118 | C.leveldb_iter_seek_to_last(it.Iter) 119 | } 120 | 121 | // Seek moves the iterator the position of the key given or, if the key 122 | // doesn't exist, the next key that does exist in the database. If the key 123 | // doesn't exist, and there is no next key, the Iterator becomes invalid. 124 | // 125 | // This method is safe to call when Valid returns false. 126 | func (it *Iterator) Seek(key []byte) { 127 | C.leveldb_iter_seek(it.Iter, (*C.char)(unsafe.Pointer(&key[0])), C.size_t(len(key))) 128 | } 129 | 130 | // GetError returns an IteratorError from LevelDB if it had one during 131 | // iteration. 132 | // 133 | // This method is safe to call when Valid returns false. 134 | func (it *Iterator) GetError() error { 135 | var errStr *C.char 136 | C.leveldb_iter_get_error(it.Iter, &errStr) 137 | if errStr != nil { 138 | gs := C.GoString(errStr) 139 | C.leveldb_free(unsafe.Pointer(errStr)) 140 | return IteratorError(gs) 141 | } 142 | return nil 143 | } 144 | 145 | // Close deallocates the given Iterator, freeing the underlying C struct. 146 | func (it *Iterator) Close() { 147 | C.leveldb_iter_destroy(it.Iter) 148 | it.Iter = nil 149 | } 150 | -------------------------------------------------------------------------------- /src/levigo/leveldb_test.go: -------------------------------------------------------------------------------- 1 | package levigo 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "math/rand" 7 | "os" 8 | "path/filepath" 9 | "testing" 10 | "time" 11 | ) 12 | 13 | func init() { 14 | rand.Seed(int64(time.Now().Nanosecond())) 15 | } 16 | 17 | // This testcase is a port of leveldb's c_test.c. 18 | func TestC(t *testing.T) { 19 | if GetLevelDBMajorVersion() <= 0 { 20 | t.Errorf("Major version cannot be less than zero") 21 | } 22 | 23 | dbname := tempDir(t) 24 | defer deleteDBDirectory(t, dbname) 25 | env := NewDefaultEnv() 26 | cache := NewLRUCache(1 << 20) 27 | 28 | options := NewOptions() 29 | // options.SetComparator(cmp) 30 | options.SetErrorIfExists(true) 31 | options.SetCache(cache) 32 | options.SetEnv(env) 33 | options.SetInfoLog(nil) 34 | options.SetWriteBufferSize(1 << 20) 35 | options.SetParanoidChecks(true) 36 | options.SetMaxOpenFiles(10) 37 | options.SetBlockSize(1024) 38 | options.SetBlockRestartInterval(8) 39 | options.SetCompression(NoCompression) 40 | 41 | roptions := NewReadOptions() 42 | roptions.SetVerifyChecksums(true) 43 | roptions.SetFillCache(false) 44 | 45 | woptions := NewWriteOptions() 46 | woptions.SetSync(true) 47 | 48 | _ = DestroyDatabase(dbname, options) 49 | 50 | db, err := Open(dbname, options) 51 | if err == nil { 52 | t.Errorf("Open on missing db should have failed") 53 | } 54 | 55 | options.SetCreateIfMissing(true) 56 | db, err = Open(dbname, options) 57 | if err != nil { 58 | t.Fatalf("Open failed: %v", err) 59 | } 60 | 61 | putKey := []byte("foo") 62 | putValue := []byte("hello") 63 | err = db.Put(woptions, putKey, putValue) 64 | if err != nil { 65 | t.Errorf("Put failed: %v", err) 66 | } 67 | 68 | CheckGet(t, "after Put", db, roptions, putKey, putValue) 69 | 70 | wb := NewWriteBatch() 71 | wb.Put([]byte("foo"), []byte("a")) 72 | wb.Clear() 73 | wb.Put([]byte("bar"), []byte("b")) 74 | wb.Put([]byte("box"), []byte("c")) 75 | wb.Delete([]byte("bar")) 76 | err = db.Write(woptions, wb) 77 | if err != nil { 78 | t.Errorf("Write batch failed: %v", err) 79 | } 80 | CheckGet(t, "after WriteBatch", db, roptions, []byte("foo"), []byte("hello")) 81 | CheckGet(t, "after WriteBatch", db, roptions, []byte("bar"), nil) 82 | CheckGet(t, "after WriteBatch", db, roptions, []byte("box"), []byte("c")) 83 | // TODO: WriteBatch iteration isn't easy. Suffers same problems as 84 | // Comparator. 85 | // wbiter := &TestWBIter{t: t} 86 | // wb.Iterate(wbiter) 87 | // if wbiter.pos != 3 { 88 | // t.Errorf("After Iterate, on the wrong pos: %d", wbiter.pos) 89 | // } 90 | wb.Close() 91 | 92 | iter := db.NewIterator(roptions) 93 | if iter.Valid() { 94 | t.Errorf("Read iterator should not be valid, yet") 95 | } 96 | iter.SeekToFirst() 97 | if !iter.Valid() { 98 | t.Errorf("Read iterator should be valid after seeking to first record") 99 | } 100 | CheckIter(t, iter, []byte("box"), []byte("c")) 101 | iter.Next() 102 | CheckIter(t, iter, []byte("foo"), []byte("hello")) 103 | iter.Prev() 104 | CheckIter(t, iter, []byte("box"), []byte("c")) 105 | iter.Prev() 106 | if iter.Valid() { 107 | t.Errorf("Read iterator should not be valid after go back past the first record") 108 | } 109 | iter.SeekToLast() 110 | CheckIter(t, iter, []byte("foo"), []byte("hello")) 111 | iter.Seek([]byte("b")) 112 | CheckIter(t, iter, []byte("box"), []byte("c")) 113 | if iter.GetError() != nil { 114 | t.Errorf("Read iterator has an error we didn't expect: %v", iter.GetError()) 115 | } 116 | iter.Close() 117 | 118 | // approximate sizes 119 | n := 20000 120 | woptions.SetSync(false) 121 | for i := 0; i < n; i++ { 122 | keybuf := []byte(fmt.Sprintf("k%020d", i)) 123 | valbuf := []byte(fmt.Sprintf("v%020d", i)) 124 | err := db.Put(woptions, keybuf, valbuf) 125 | if err != nil { 126 | t.Errorf("Put error in approximate size test: %v", err) 127 | } 128 | } 129 | 130 | ranges := []Range{ 131 | {[]byte("a"), []byte("k00000000000000010000")}, 132 | {[]byte("k00000000000000010000"), []byte("z")}, 133 | } 134 | sizes := db.GetApproximateSizes(ranges) 135 | if len(sizes) == 2 { 136 | if sizes[0] <= 0 { 137 | t.Errorf("First size range was %d", sizes[0]) 138 | } 139 | if sizes[1] <= 0 { 140 | t.Errorf("Second size range was %d", sizes[1]) 141 | } 142 | } else { 143 | t.Errorf("Expected 2 approx. sizes back, got %d", len(sizes)) 144 | } 145 | 146 | // property 147 | prop := db.PropertyValue("nosuchprop") 148 | if prop != "" { 149 | t.Errorf("property nosuchprop should not have a value") 150 | } 151 | prop = db.PropertyValue("leveldb.stats") 152 | if prop == "" { 153 | t.Errorf("property leveldb.stats should have a value") 154 | } 155 | 156 | // snapshot 157 | snap := db.NewSnapshot() 158 | err = db.Delete(woptions, []byte("foo")) 159 | if err != nil { 160 | t.Errorf("Delete during snapshot test errored: %v", err) 161 | } 162 | roptions.SetSnapshot(snap) 163 | CheckGet(t, "from snapshot", db, roptions, []byte("foo"), []byte("hello")) 164 | roptions.SetSnapshot(nil) 165 | CheckGet(t, "from snapshot", db, roptions, []byte("foo"), nil) 166 | db.ReleaseSnapshot(snap) 167 | 168 | // repair 169 | db.Close() 170 | options.SetCreateIfMissing(false) 171 | options.SetErrorIfExists(false) 172 | err = RepairDatabase(dbname, options) 173 | if err != nil { 174 | t.Errorf("Repairing db failed: %v", err) 175 | } 176 | db, err = Open(dbname, options) 177 | if err != nil { 178 | t.Errorf("Unable to open repaired db: %v", err) 179 | } 180 | CheckGet(t, "repair", db, roptions, []byte("foo"), nil) 181 | CheckGet(t, "repair", db, roptions, []byte("bar"), nil) 182 | CheckGet(t, "repair", db, roptions, []byte("box"), []byte("c")) 183 | options.SetCreateIfMissing(true) 184 | options.SetErrorIfExists(true) 185 | 186 | // filter 187 | policy := NewBloomFilter(10) 188 | db.Close() 189 | DestroyDatabase(dbname, options) 190 | options.SetFilterPolicy(policy) 191 | db, err = Open(dbname, options) 192 | if err != nil { 193 | t.Fatalf("Unable to recreate db for filter tests: %v", err) 194 | } 195 | err = db.Put(woptions, []byte("foo"), []byte("foovalue")) 196 | if err != nil { 197 | t.Errorf("Unable to put 'foo' with filter: %v", err) 198 | } 199 | err = db.Put(woptions, []byte("bar"), []byte("barvalue")) 200 | if err != nil { 201 | t.Errorf("Unable to put 'bar' with filter: %v", err) 202 | } 203 | db.CompactRange(Range{nil, nil}) 204 | CheckGet(t, "filter", db, roptions, []byte("foo"), []byte("foovalue")) 205 | CheckGet(t, "filter", db, roptions, []byte("bar"), []byte("barvalue")) 206 | options.SetFilterPolicy(nil) 207 | policy.Close() 208 | 209 | // cleanup 210 | db.Close() 211 | options.Close() 212 | roptions.Close() 213 | woptions.Close() 214 | cache.Close() 215 | // DestroyComparator(cmp) 216 | env.Close() 217 | } 218 | 219 | func TestNilSlicesInDb(t *testing.T) { 220 | dbname := tempDir(t) 221 | defer deleteDBDirectory(t, dbname) 222 | options := NewOptions() 223 | options.SetErrorIfExists(true) 224 | options.SetCreateIfMissing(true) 225 | ro := NewReadOptions() 226 | _ = DestroyDatabase(dbname, options) 227 | db, err := Open(dbname, options) 228 | if err != nil { 229 | t.Fatalf("Database could not be opened: %v", err) 230 | } 231 | defer db.Close() 232 | val, err := db.Get(ro, []byte("missing")) 233 | if err != nil { 234 | t.Errorf("Get failed: %v", err) 235 | } 236 | if val != nil { 237 | t.Errorf("A key not in the db should return nil, not %v", val) 238 | } 239 | wo := NewWriteOptions() 240 | db.Put(wo, nil, []byte("love")) 241 | val, err = db.Get(ro, nil) 242 | if !bytes.Equal([]byte("love"), val) { 243 | t.Errorf("Get should see the nil key: %v", val) 244 | } 245 | val, err = db.Get(ro, []byte{}) 246 | if !bytes.Equal([]byte("love"), val) { 247 | t.Errorf("Get shouldn't distinguish between nil key and empty slice key: %v", val) 248 | } 249 | 250 | err = db.Put(wo, []byte("nilvalue"), nil) 251 | if err != nil { 252 | t.Errorf("nil value Put errored: %v", err) 253 | } 254 | // Compare with the []byte("missing") case. We expect Get to return a 255 | // []byte{} here, but expect a nil returned there. 256 | CheckGet(t, "nil value Put", db, ro, []byte("nilvalue"), []byte{}) 257 | 258 | err = db.Put(wo, []byte("emptyvalue"), []byte{}) 259 | if err != nil { 260 | t.Errorf("empty value Put errored: %v", err) 261 | } 262 | CheckGet(t, "empty value Put", db, ro, []byte("emptyvalue"), []byte{}) 263 | 264 | err = db.Delete(wo, nil) 265 | if err != nil { 266 | t.Errorf("nil key Delete errored: %v", err) 267 | } 268 | err = db.Delete(wo, []byte{}) 269 | if err != nil { 270 | t.Errorf("empty slice key Delete errored: %v", err) 271 | } 272 | 273 | } 274 | 275 | func TestIterationValidityLimits(t *testing.T) { 276 | dbname := tempDir(t) 277 | defer deleteDBDirectory(t, dbname) 278 | options := NewOptions() 279 | options.SetErrorIfExists(true) 280 | options.SetCreateIfMissing(true) 281 | ro := NewReadOptions() 282 | wo := NewWriteOptions() 283 | _ = DestroyDatabase(dbname, options) 284 | db, err := Open(dbname, options) 285 | if err != nil { 286 | t.Fatalf("Database could not be opened: %v", err) 287 | } 288 | defer db.Close() 289 | db.Put(wo, []byte("bat"), []byte("somedata")) 290 | db.Put(wo, []byte("done"), []byte("somedata")) 291 | it := db.NewIterator(ro) 292 | defer it.Close() 293 | if it.Valid() { 294 | t.Errorf("new Iterator was valid") 295 | } 296 | it.Seek([]byte("bat")) 297 | if !it.Valid() { 298 | t.Errorf("Seek to %#v failed.", []byte("bat")) 299 | } 300 | if !bytes.Equal([]byte("bat"), it.Key()) { 301 | t.Errorf("did not seek to []byte(\"bat\")") 302 | } 303 | key := it.Key() 304 | it.Next() 305 | if bytes.Equal(key, it.Key()) { 306 | t.Errorf("key should be a copy of last key") 307 | } 308 | it.Next() 309 | if it.Valid() { 310 | t.Errorf("iterating off the db should result in an invalid iterator") 311 | } 312 | err = it.GetError() 313 | if err != nil { 314 | t.Errorf("should not have seen an error on an invalid iterator") 315 | } 316 | it.Seek([]byte("bat")) 317 | if !it.Valid() { 318 | t.Errorf("Iterator should be valid again") 319 | } 320 | } 321 | 322 | func CheckGet(t *testing.T, where string, db *DB, roptions *ReadOptions, key, expected []byte) { 323 | getValue, err := db.Get(roptions, key) 324 | 325 | if err != nil { 326 | t.Errorf("%s, Get failed: %v", where, err) 327 | } 328 | if !bytes.Equal(getValue, expected) { 329 | t.Errorf("%s, expected Get value %v, got %v", where, expected, getValue) 330 | } 331 | } 332 | 333 | func WBIterCheckEqual(t *testing.T, where string, which string, pos int, expected, given []byte) { 334 | if !bytes.Equal(expected, given) { 335 | t.Errorf("%s at pos %d, %s expected: %v, got: %v", where, pos, which, expected, given) 336 | } 337 | } 338 | 339 | func CheckIter(t *testing.T, it *Iterator, key, value []byte) { 340 | if !bytes.Equal(key, it.Key()) { 341 | t.Errorf("Iterator: expected key %v, got %v", key, it.Key()) 342 | } 343 | if !bytes.Equal(value, it.Value()) { 344 | t.Errorf("Iterator: expected value %v, got %v", value, it.Value()) 345 | } 346 | } 347 | 348 | func deleteDBDirectory(t *testing.T, dirPath string) { 349 | err := os.RemoveAll(dirPath) 350 | if err != nil { 351 | t.Errorf("Unable to remove database directory: %s", dirPath) 352 | } 353 | } 354 | 355 | func tempDir(t *testing.T) string { 356 | bottom := fmt.Sprintf("levigo-test-%d", rand.Int()) 357 | path := filepath.Join(os.TempDir(), bottom) 358 | deleteDBDirectory(t, path) 359 | return path 360 | } 361 | -------------------------------------------------------------------------------- /src/levigo/options.go: -------------------------------------------------------------------------------- 1 | package levigo 2 | 3 | // #include "leveldb/c.h" 4 | import "C" 5 | 6 | // CompressionOpt is a value for Options.SetCompression. 7 | type CompressionOpt int 8 | 9 | // Known compression arguments for Options.SetCompression. 10 | const ( 11 | NoCompression = CompressionOpt(0) 12 | SnappyCompression = CompressionOpt(1) 13 | ) 14 | 15 | // Options represent all of the available options when opening a database with 16 | // Open. Options should be created with NewOptions. 17 | // 18 | // It is usually with to call SetCache with a cache object. Otherwise, all 19 | // data will be read off disk. 20 | // 21 | // To prevent memory leaks, Close must be called on an Options when the 22 | // program no longer needs it. 23 | type Options struct { 24 | Opt *C.leveldb_options_t 25 | } 26 | 27 | // ReadOptions represent all of the available options when reading from a 28 | // database. 29 | // 30 | // To prevent memory leaks, Close must called on a ReadOptions when the 31 | // program no longer needs it. 32 | type ReadOptions struct { 33 | Opt *C.leveldb_readoptions_t 34 | } 35 | 36 | // WriteOptions represent all of the available options when writeing from a 37 | // database. 38 | // 39 | // To prevent memory leaks, Close must called on a WriteOptions when the 40 | // program no longer needs it. 41 | type WriteOptions struct { 42 | Opt *C.leveldb_writeoptions_t 43 | } 44 | 45 | // NewOptions allocates a new Options object. 46 | func NewOptions() *Options { 47 | opt := C.leveldb_options_create() 48 | return &Options{opt} 49 | } 50 | 51 | // NewReadOptions allocates a new ReadOptions object. 52 | func NewReadOptions() *ReadOptions { 53 | opt := C.leveldb_readoptions_create() 54 | return &ReadOptions{opt} 55 | } 56 | 57 | // NewWriteOptions allocates a new WriteOptions object. 58 | func NewWriteOptions() *WriteOptions { 59 | opt := C.leveldb_writeoptions_create() 60 | return &WriteOptions{opt} 61 | } 62 | 63 | // Close deallocates the Options, freeing its underlying C struct. 64 | func (o *Options) Close() { 65 | C.leveldb_options_destroy(o.Opt) 66 | } 67 | 68 | // SetComparator sets the comparator to be used for all read and write 69 | // operations. 70 | // 71 | // The comparator that created a database must be the same one (technically, 72 | // one with the same name string) that is used to perform read and write 73 | // operations. 74 | // 75 | // The default comparator is usually sufficient. 76 | func (o *Options) SetComparator(cmp *C.leveldb_comparator_t) { 77 | C.leveldb_options_set_comparator(o.Opt, cmp) 78 | } 79 | 80 | // SetErrorIfExists, if passed true, will cause the opening of a database that 81 | // already exists to throw an error. 82 | func (o *Options) SetErrorIfExists(error_if_exists bool) { 83 | eie := boolToUchar(error_if_exists) 84 | C.leveldb_options_set_error_if_exists(o.Opt, eie) 85 | } 86 | 87 | // SetCache places a cache object in the database when a database is opened. 88 | // 89 | // This is usually wise to use. See also ReadOptions.SetFillCache. 90 | func (o *Options) SetCache(cache *Cache) { 91 | C.leveldb_options_set_cache(o.Opt, cache.Cache) 92 | } 93 | 94 | // SetEnv sets the Env object for the new database handle. 95 | func (o *Options) SetEnv(env *Env) { 96 | C.leveldb_options_set_env(o.Opt, env.Env) 97 | } 98 | 99 | // SetInfoLog sets a *C.leveldb_logger_t object as the informational logger 100 | // for the database. 101 | func (o *Options) SetInfoLog(log *C.leveldb_logger_t) { 102 | C.leveldb_options_set_info_log(o.Opt, log) 103 | } 104 | 105 | // SetWriteBufferSize sets the number of bytes the database will build up in 106 | // memory (backed by an unsorted log on disk) before converting to a sorted 107 | // on-disk file. 108 | func (o *Options) SetWriteBufferSize(s int) { 109 | C.leveldb_options_set_write_buffer_size(o.Opt, C.size_t(s)) 110 | } 111 | 112 | // SetParanoidChecks, when called with true, will cause the database to do 113 | // aggressive checking of the data it is processing and will stop early if it 114 | // detects errors. 115 | // 116 | // See the LevelDB documentation docs for details. 117 | func (o *Options) SetParanoidChecks(pc bool) { 118 | C.leveldb_options_set_paranoid_checks(o.Opt, boolToUchar(pc)) 119 | } 120 | 121 | // SetMaxOpenFiles sets the number of files than can be used at once by the 122 | // database. 123 | // 124 | // See the LevelDB documentation for details. 125 | func (o *Options) SetMaxOpenFiles(n int) { 126 | C.leveldb_options_set_max_open_files(o.Opt, C.int(n)) 127 | } 128 | 129 | // SetBlockSize sets the approximate size of user data packed per block. 130 | // 131 | // The default is roughly 4096 uncompressed bytes. A better setting depends on 132 | // your use case. See the LevelDB documentation for details. 133 | func (o *Options) SetBlockSize(s int) { 134 | C.leveldb_options_set_block_size(o.Opt, C.size_t(s)) 135 | } 136 | 137 | // SetBlockRestartInterval is the number of keys between restarts points for 138 | // delta encoding keys. 139 | // 140 | // Most clients should leave this parameter alone. See the LevelDB 141 | // documentation for details. 142 | func (o *Options) SetBlockRestartInterval(n int) { 143 | C.leveldb_options_set_block_restart_interval(o.Opt, C.int(n)) 144 | } 145 | 146 | // SetCompression sets whether to compress blocks using the specified 147 | // compresssion algorithm. 148 | // 149 | // The default value is SnappyCompression and it is fast enough that it is 150 | // unlikely you want to turn it off. The other option is NoCompression. 151 | // 152 | // If the LevelDB library was built without Snappy compression enabled, the 153 | // SnappyCompression setting will be ignored. 154 | func (o *Options) SetCompression(t CompressionOpt) { 155 | C.leveldb_options_set_compression(o.Opt, C.int(t)) 156 | } 157 | 158 | // SetCreateIfMissing causes Open to create a new database on disk if it does 159 | // not already exist. 160 | func (o *Options) SetCreateIfMissing(b bool) { 161 | C.leveldb_options_set_create_if_missing(o.Opt, boolToUchar(b)) 162 | } 163 | 164 | // SetFilterPolicy causes Open to create a new database that will uses filter 165 | // created from the filter policy passed in. 166 | func (o *Options) SetFilterPolicy(fp *FilterPolicy) { 167 | var policy *C.leveldb_filterpolicy_t 168 | if fp != nil { 169 | policy = fp.Policy 170 | } 171 | C.leveldb_options_set_filter_policy(o.Opt, policy) 172 | } 173 | 174 | // Close deallocates the ReadOptions, freeing its underlying C struct. 175 | func (ro *ReadOptions) Close() { 176 | C.leveldb_readoptions_destroy(ro.Opt) 177 | } 178 | 179 | // SetVerifyChecksums controls whether all data read with this ReadOptions 180 | // will be verified against corresponding checksums. 181 | // 182 | // It defaults to false. See the LevelDB documentation for details. 183 | func (ro *ReadOptions) SetVerifyChecksums(b bool) { 184 | C.leveldb_readoptions_set_verify_checksums(ro.Opt, boolToUchar(b)) 185 | } 186 | 187 | // SetFillCache controls whether reads performed with this ReadOptions will 188 | // fill the Cache of the server. It defaults to true. 189 | // 190 | // It is useful to turn this off on ReadOptions for DB.Iterator (and DB.Get) 191 | // calls used in offline threads to prevent bulk scans from flushing out live 192 | // user data in the cache. 193 | // 194 | // See also Options.SetCache 195 | func (ro *ReadOptions) SetFillCache(b bool) { 196 | C.leveldb_readoptions_set_fill_cache(ro.Opt, boolToUchar(b)) 197 | } 198 | 199 | // SetSnapshot causes reads to provided as they were when the passed in 200 | // Snapshot was created by DB.NewSnapshot. This is useful for getting 201 | // consistent reads during a bulk operation. 202 | // 203 | // See the LevelDB documentation for details. 204 | func (ro *ReadOptions) SetSnapshot(snap *Snapshot) { 205 | var s *C.leveldb_snapshot_t 206 | if snap != nil { 207 | s = snap.snap 208 | } 209 | C.leveldb_readoptions_set_snapshot(ro.Opt, s) 210 | } 211 | 212 | // Close deallocates the WriteOptions, freeing its underlying C struct. 213 | func (wo *WriteOptions) Close() { 214 | C.leveldb_writeoptions_destroy(wo.Opt) 215 | } 216 | 217 | // SetSync controls whether each write performed with this WriteOptions will 218 | // be flushed from the operating system buffer cache before the write is 219 | // considered complete. 220 | // 221 | // If called with true, this will signficantly slow down writes. If called 222 | // with false, and the host machine crashes, some recent writes may be 223 | // lost. The default is false. 224 | // 225 | // See the LevelDB documentation for details. 226 | func (wo *WriteOptions) SetSync(b bool) { 227 | C.leveldb_writeoptions_set_sync(wo.Opt, boolToUchar(b)) 228 | } 229 | -------------------------------------------------------------------------------- /src/levigo/version.go: -------------------------------------------------------------------------------- 1 | package levigo 2 | 3 | /* 4 | #include "leveldb/c.h" 5 | */ 6 | import "C" 7 | 8 | func GetLevelDBMajorVersion() int { 9 | return int(C.leveldb_major_version()) 10 | } 11 | 12 | func GetLevelDBMinorVersion() int { 13 | return int(C.leveldb_minor_version()) 14 | } 15 | -------------------------------------------------------------------------------- /src/redis/redis.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "errors" 7 | "fmt" 8 | "log" 9 | "net" 10 | "strconv" 11 | ) 12 | 13 | type Redis struct { 14 | addr string 15 | password string 16 | db int 17 | conn net.Conn 18 | } 19 | 20 | var UnsupportedArgType = errors.New("unsupported arg type") 21 | var MalformedResponse = errors.New("malformed response") 22 | var NoConnection = errors.New("no connection") 23 | 24 | func composeMessage(cmd string, args []interface{}) ([]byte, error) { 25 | var buf bytes.Buffer 26 | fmt.Fprintf(&buf, "*%d\r\n", len(args)+1) 27 | fmt.Fprintf(&buf, "$%d\r\n%s\r\n", len(cmd), cmd) 28 | for _, arg := range args { 29 | var v string 30 | if str, ok := arg.(string); ok { 31 | v = str 32 | } else if str, ok := arg.(int); ok { 33 | v = strconv.Itoa(str) 34 | } else { 35 | return nil, UnsupportedArgType 36 | } 37 | 38 | fmt.Fprintf(&buf, "$%d\r\n%s\r\n", len(v), v) 39 | } 40 | return buf.Bytes(), nil 41 | } 42 | 43 | func readBulkString(reader *bufio.Reader, sz int) (str string, err error) { 44 | if sz < 0 { 45 | return 46 | } 47 | 48 | var buf = make([]byte, sz+2) 49 | var p = buf 50 | for { 51 | var n int 52 | n, err = reader.Read(p) 53 | if err != nil { 54 | return 55 | } 56 | if n < len(p) { 57 | p = p[n:] 58 | } else { 59 | break 60 | } 61 | } 62 | str = string(buf[:sz]) 63 | // log.Printf("string:%s", str) 64 | return 65 | } 66 | 67 | func readResponse(reader *bufio.Reader) (interface{}, error) { 68 | line, err := reader.ReadString('\n') 69 | if err != nil { 70 | return nil, err 71 | } 72 | 73 | content := line[1 : len(line)-2] 74 | switch line[0] { 75 | case '-': 76 | return nil, errors.New(content) 77 | case '+': 78 | return content, nil 79 | case ':': 80 | return strconv.Atoi(content) 81 | case '$': 82 | sz, _ := strconv.Atoi(content) 83 | return readBulkString(reader, sz) 84 | case '*': 85 | sz, _ := strconv.Atoi(content) 86 | if sz < 0 { 87 | return nil, nil 88 | } 89 | var ret = make([]string, sz) 90 | for i := 0; i < sz; i++ { 91 | nextline, err := reader.ReadString('\n') 92 | // log.Printf("header:%s", nextline) 93 | if err != nil { 94 | return nil, err 95 | } 96 | nextcontent := nextline[1 : len(nextline)-2] 97 | if nextline[0] == ':' { 98 | ret[i] = nextcontent 99 | } else if nextline[0] == '$' { 100 | sz, _ := strconv.Atoi(nextcontent) 101 | s, err := readBulkString(reader, sz) 102 | if err != nil { 103 | return nil, err 104 | } 105 | ret[i] = s 106 | } else { 107 | log.Printf("unexpected response(*): %s", nextline) 108 | return nil, MalformedResponse 109 | } 110 | } 111 | return ret, nil 112 | } 113 | log.Printf("unexpected response():%s", line) 114 | return nil, MalformedResponse 115 | } 116 | 117 | // for pub/sub, don't call it directly 118 | func (r *Redis) ReadResponse() (interface{}, error) { 119 | reader := bufio.NewReader(r.conn) 120 | return readResponse(reader) 121 | } 122 | 123 | func (r *Redis) exec(cmd string, args []interface{}) (interface{}, error) { 124 | if r.conn == nil { 125 | return nil, NoConnection 126 | } 127 | 128 | data, err := composeMessage(cmd, args) 129 | if err != nil { 130 | return nil, err 131 | } 132 | 133 | _, err = r.conn.Write(data) 134 | if err != nil { 135 | return nil, err 136 | } 137 | reader := bufio.NewReader(r.conn) 138 | return readResponse(reader) 139 | } 140 | 141 | func (r *Redis) Hget(key string, subkey string) (resp string, err error) { 142 | result, err := r.Exec("hget", key, subkey) 143 | if err != nil { 144 | return 145 | } 146 | resp = result.(string) 147 | return 148 | } 149 | 150 | func (r *Redis) Exec(cmd string, args ...interface{}) (interface{}, error) { 151 | return r.exec(cmd, args) 152 | } 153 | 154 | func (r *Redis) Hgetall(key string, obj map[string]string) (err error) { 155 | resp, err := r.Exec("hgetall", key) 156 | if err != nil { 157 | return 158 | } 159 | 160 | if values, ok := resp.([]string); ok { 161 | sz := len(values) 162 | // if sz 不为偶数,丢弃最后一项 163 | for i := 0; i < sz-1; i = i + 2 { 164 | obj[values[i]] = values[i+1] 165 | } 166 | } 167 | return 168 | } 169 | 170 | func (r *Redis) Hgetall_arr(key string) (resp []string, err error) { 171 | t, err := r.Exec("hgetall", key) 172 | if err != nil { 173 | return 174 | } 175 | 176 | resp = t.([]string) 177 | return 178 | } 179 | 180 | func (r *Redis) Type(key string) (name string, err error) { 181 | resp, err := r.Exec("type", key) 182 | if err != nil { 183 | return 184 | } 185 | name = resp.(string) 186 | return 187 | } 188 | 189 | func (r *Redis) Connect() (err error) { 190 | log.Printf("connect to redis:%s", r.addr) 191 | 192 | if r.conn != nil { 193 | return 194 | } 195 | 196 | r.conn, err = net.Dial("tcp", r.addr) 197 | if err != nil { 198 | return 199 | } 200 | 201 | if r.password != "" { 202 | _, err = r.Exec("auth", r.password) 203 | if err != nil { 204 | return 205 | } 206 | } 207 | 208 | _, err = r.Exec("select", r.db) 209 | return 210 | } 211 | 212 | func (r *Redis) Close() { 213 | if r.conn != nil { 214 | r.conn.Close() 215 | r.conn = nil 216 | } 217 | } 218 | 219 | func (r *Redis) ReConnect() error { 220 | r.Close() 221 | return r.Connect() 222 | } 223 | 224 | // new function 225 | func NewRedis(addr string, password string, db int) *Redis { 226 | return &Redis{addr: addr, password: password, db: db} 227 | } 228 | -------------------------------------------------------------------------------- /src/redis/redis_test.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import "testing" 4 | 5 | func TestRedis(t *testing.T) { 6 | cli := NewRedis("127.0.0.1:6300", "foobared", 2) 7 | err := cli.Connect() 8 | if err != nil { 9 | t.Errorf("connect to redis failed:%v", err) 10 | } 11 | 12 | _, err = cli.Exec("keys", "*") 13 | if err != nil { 14 | t.Errorf("hgetall failed:%v", err) 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /src/unqlitego/.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled Object files, Static and Dynamic libs (Shared Objects) 2 | *.o 3 | *.a 4 | *.so 5 | 6 | # Folders 7 | _obj 8 | _test 9 | 10 | # Architecture specific extensions/prefixes 11 | *.[568vq] 12 | [568vq].out 13 | 14 | *.cgo1.go 15 | *.cgo2.c 16 | _cgo_defun.c 17 | _cgo_gotypes.go 18 | _cgo_export.* 19 | 20 | _testmain.go 21 | 22 | *.exe 23 | .DS_Store 24 | -------------------------------------------------------------------------------- /src/unqlitego/.travis.yml: -------------------------------------------------------------------------------- 1 | language: go 2 | 3 | go: 4 | - 1.1 5 | - 1.2 6 | - tip 7 | 8 | install: 9 | - go get github.com/r7kamura/gospel 10 | - go install . 11 | 12 | script: 13 | - go test . 14 | -------------------------------------------------------------------------------- /src/unqlitego/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2013, irieda 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without modification, 5 | are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 10 | * Redistributions in binary form must reproduce the above copyright notice, this 11 | list of conditions and the following disclaimer in the documentation and/or 12 | other materials provided with the distribution. 13 | 14 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 15 | ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 16 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 17 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR 18 | ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 19 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 20 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 21 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 23 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- /src/unqlitego/README.md: -------------------------------------------------------------------------------- 1 | unqlitego 2 | ========= 3 | 4 | UnQLite Binding for golang. 5 | 6 | Install 7 | --------- 8 | 9 | ```sh 10 | $ go get github.com/nobonobo/unqlitego 11 | ``` 12 | 13 | Test 14 | --------- 15 | ```sh 16 | $ go get github.com/r7kamura/gospel 17 | $ cd ${GOPATH/:*/}/src/github.com/nobonobo/unqlitego 18 | $ go test . 19 | ``` 20 | 21 | Benchmark 22 | ---------- 23 | 24 | ```sh 25 | $ go get github.com/r7kamura/gospel 26 | $ cd ${GOPATH/:*/}/src/github.com/nobonobo/unqlitego 27 | $ go test -bench Bench* 28 | ``` 29 | 30 | Output:(Macbook Air 2011 mid) 31 | 32 | ``` 33 | BenchmarkFileStore 200000 9667 ns/op 34 | BenchmarkFileFetch 500000 7928 ns/op 35 | BenchmarkMemStore 500000 3824 ns/op 36 | BenchmarkMemFetch 1000000 3448 ns/op 37 | ``` 38 | -------------------------------------------------------------------------------- /src/unqlitego/unqlite.c: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/xjdrew/redis-persist/99b5d62f82022446cd17e59dbeb5fa31101d6300/src/unqlitego/unqlite.c -------------------------------------------------------------------------------- /src/unqlitego/unqlite.go: -------------------------------------------------------------------------------- 1 | package unqlitego 2 | 3 | /* 4 | #cgo linux CFLAGS: -DUNQLITE_ENABLE_THREADS=1 -Wno-unused-but-set-variable 5 | #cgo darwin CFLAGS: -DUNQLITE_ENABLE_THREADS=1 6 | #cgo windows CFLAGS: -DUNQLITE_ENABLE_THREADS=1 7 | #include "./unqlite.h" 8 | #include 9 | */ 10 | import "C" 11 | 12 | import ( 13 | "fmt" 14 | "runtime" 15 | "unsafe" 16 | ) 17 | 18 | // UnQLiteError ... standard error for this module 19 | type UnQLiteError int 20 | 21 | func (e UnQLiteError) Error() string { 22 | s := errString[e] 23 | if s == "" { 24 | return fmt.Sprintf("errno %d", int(e)) 25 | } 26 | return s 27 | } 28 | 29 | const ( 30 | // UnQLiteNoMemErr ... 31 | UnQLiteNoMemErr UnQLiteError = UnQLiteError(C.UNQLITE_NOMEM) 32 | ) 33 | 34 | var errString = map[UnQLiteError]string{ 35 | C.UNQLITE_NOMEM: "Out of memory", 36 | } 37 | 38 | // Database ... 39 | type Database struct { 40 | handle *C.unqlite 41 | } 42 | 43 | // Cursor ... 44 | type Cursor struct { 45 | parent *Database 46 | handle *C.unqlite_kv_cursor 47 | } 48 | 49 | func init() { 50 | C.unqlite_lib_init() 51 | if !IsThreadSafe() { 52 | panic("unqlite library was not compiled for thread-safe option UNQLITE_ENABLE_THREADS=1") 53 | } 54 | } 55 | 56 | // NewDatabase ... 57 | func NewDatabase(filename string) (db *Database, err error) { 58 | db = &Database{} 59 | name := C.CString(filename) 60 | defer C.free(unsafe.Pointer(name)) 61 | res := C.unqlite_open(&db.handle, name, C.UNQLITE_OPEN_CREATE) 62 | if res != C.UNQLITE_OK { 63 | err = UnQLiteError(res) 64 | } 65 | if db.handle != nil { 66 | runtime.SetFinalizer(db, (*Database).Close) 67 | } 68 | return 69 | } 70 | 71 | // Close ... 72 | func (db *Database) Close() (err error) { 73 | if db.handle != nil { 74 | res := C.unqlite_close(db.handle) 75 | if res != C.UNQLITE_OK { 76 | err = UnQLiteError(res) 77 | } 78 | db.handle = nil 79 | } 80 | return 81 | } 82 | 83 | // Store ... 84 | func (db *Database) Store(key, value []byte) (err error) { 85 | res := C.unqlite_kv_store(db.handle, 86 | unsafe.Pointer(&key[0]), C.int(len(key)), 87 | unsafe.Pointer(&value[0]), C.unqlite_int64(len(value))) 88 | if res == C.UNQLITE_OK { 89 | return nil 90 | } 91 | return UnQLiteError(res) 92 | } 93 | 94 | // Append ... 95 | func (db *Database) Append(key, value []byte) (err error) { 96 | res := C.unqlite_kv_append(db.handle, 97 | unsafe.Pointer(&key[0]), C.int(len(key)), 98 | unsafe.Pointer(&value[0]), C.unqlite_int64(len(value))) 99 | if res != C.UNQLITE_OK { 100 | err = UnQLiteError(res) 101 | } 102 | return 103 | } 104 | 105 | // Fetch ... 106 | func (db *Database) Fetch(key []byte) (value []byte, err error) { 107 | var n C.unqlite_int64 108 | res := C.unqlite_kv_fetch(db.handle, unsafe.Pointer(&key[0]), C.int(len(key)), nil, &n) 109 | if res != C.UNQLITE_OK { 110 | err = UnQLiteError(res) 111 | return 112 | } 113 | value = make([]byte, int(n)) 114 | res = C.unqlite_kv_fetch(db.handle, unsafe.Pointer(&key[0]), C.int(len(key)), unsafe.Pointer(&value[0]), &n) 115 | if res != C.UNQLITE_OK { 116 | err = UnQLiteError(res) 117 | } 118 | return 119 | } 120 | 121 | // Delete ... 122 | func (db *Database) Delete(key []byte) (err error) { 123 | res := C.unqlite_kv_delete(db.handle, unsafe.Pointer(&key[0]), C.int(len(key))) 124 | if res != C.UNQLITE_OK { 125 | err = UnQLiteError(res) 126 | } 127 | return 128 | } 129 | 130 | // Begin ... 131 | func (db *Database) Begin() (err error) { 132 | res := C.unqlite_begin(db.handle) 133 | if res != C.UNQLITE_OK { 134 | err = UnQLiteError(res) 135 | } 136 | return 137 | } 138 | 139 | // Commit ... 140 | func (db *Database) Commit() (err error) { 141 | res := C.unqlite_commit(db.handle) 142 | if res != C.UNQLITE_OK { 143 | err = UnQLiteError(res) 144 | } 145 | return 146 | } 147 | 148 | // Rollback ... 149 | func (db *Database) Rollback() (err error) { 150 | res := C.unqlite_rollback(db.handle) 151 | if res != C.UNQLITE_OK { 152 | err = UnQLiteError(res) 153 | } 154 | return 155 | } 156 | 157 | // NewCursor ... 158 | func (db *Database) NewCursor() (cursor *Cursor, err error) { 159 | cursor = &Cursor{parent: db} 160 | res := C.unqlite_kv_cursor_init(db.handle, &cursor.handle) 161 | if res != C.UNQLITE_OK { 162 | err = UnQLiteError(res) 163 | } 164 | runtime.SetFinalizer(cursor, (*Cursor).Close) 165 | return 166 | } 167 | 168 | // Close ... 169 | func (curs *Cursor) Close() (err error) { 170 | if curs.parent.handle != nil && curs.handle != nil { 171 | res := C.unqlite_kv_cursor_release(curs.parent.handle, curs.handle) 172 | if res != C.UNQLITE_OK { 173 | err = UnQLiteError(res) 174 | } 175 | curs.handle = nil 176 | } 177 | return 178 | } 179 | 180 | // Seek ... 181 | func (curs *Cursor) Seek(key []byte) (err error) { 182 | res := C.unqlite_kv_cursor_seek(curs.handle, unsafe.Pointer(&key[0]), C.int(len(key)), C.UNQLITE_CURSOR_MATCH_EXACT) 183 | if res != C.UNQLITE_OK { 184 | err = UnQLiteError(res) 185 | } 186 | return 187 | } 188 | 189 | // SeekLE ... 190 | func (curs *Cursor) SeekLE(key []byte) (err error) { 191 | res := C.unqlite_kv_cursor_seek(curs.handle, unsafe.Pointer(&key[0]), C.int(len(key)), C.UNQLITE_CURSOR_MATCH_LE) 192 | if res != C.UNQLITE_OK { 193 | err = UnQLiteError(res) 194 | } 195 | return 196 | } 197 | 198 | // SeekGE ... 199 | func (curs *Cursor) SeekGE(key []byte) (err error) { 200 | res := C.unqlite_kv_cursor_seek(curs.handle, unsafe.Pointer(&key[0]), C.int(len(key)), C.UNQLITE_CURSOR_MATCH_GE) 201 | if res != C.UNQLITE_OK { 202 | err = UnQLiteError(res) 203 | } 204 | return 205 | } 206 | 207 | // First ... 208 | func (curs *Cursor) First() (err error) { 209 | res := C.unqlite_kv_cursor_first_entry(curs.handle) 210 | if res != C.UNQLITE_OK { 211 | err = UnQLiteError(res) 212 | } 213 | return 214 | } 215 | 216 | // Last ... 217 | func (curs *Cursor) Last() (err error) { 218 | res := C.unqlite_kv_cursor_last_entry(curs.handle) 219 | if res != C.UNQLITE_OK { 220 | err = UnQLiteError(res) 221 | } 222 | return 223 | } 224 | 225 | // IsValid ... 226 | func (curs *Cursor) IsValid() (ok bool) { 227 | return C.unqlite_kv_cursor_valid_entry(curs.handle) == 1 228 | } 229 | 230 | // Next ... 231 | func (curs *Cursor) Next() (err error) { 232 | res := C.unqlite_kv_cursor_next_entry(curs.handle) 233 | if res != C.UNQLITE_OK { 234 | err = UnQLiteError(res) 235 | } 236 | return 237 | } 238 | 239 | // Prev ... 240 | func (curs *Cursor) Prev() (err error) { 241 | res := C.unqlite_kv_cursor_prev_entry(curs.handle) 242 | if res != C.UNQLITE_OK { 243 | err = UnQLiteError(res) 244 | } 245 | return 246 | } 247 | 248 | // Delete ... 249 | func (curs *Cursor) Delete() (err error) { 250 | res := C.unqlite_kv_cursor_delete_entry(curs.handle) 251 | if res != C.UNQLITE_OK { 252 | err = UnQLiteError(res) 253 | } 254 | return 255 | } 256 | 257 | // Reset ... 258 | func (curs *Cursor) Reset() (err error) { 259 | res := C.unqlite_kv_cursor_reset(curs.handle) 260 | if res != C.UNQLITE_OK { 261 | err = UnQLiteError(res) 262 | } 263 | return 264 | } 265 | 266 | // Key ... 267 | func (curs *Cursor) Key() (key []byte, err error) { 268 | var n C.int 269 | res := C.unqlite_kv_cursor_key(curs.handle, nil, &n) 270 | if res != C.UNQLITE_OK { 271 | err = UnQLiteError(res) 272 | return 273 | } 274 | key = make([]byte, int(n)) 275 | res = C.unqlite_kv_cursor_key(curs.handle, unsafe.Pointer(&key[0]), &n) 276 | if res != C.UNQLITE_OK { 277 | err = UnQLiteError(res) 278 | } 279 | return 280 | } 281 | 282 | // Value ... 283 | func (curs *Cursor) Value() (value []byte, err error) { 284 | var n C.unqlite_int64 285 | res := C.unqlite_kv_cursor_data(curs.handle, nil, &n) 286 | if res != C.UNQLITE_OK { 287 | err = UnQLiteError(res) 288 | return 289 | } 290 | value = make([]byte, int(n)) 291 | res = C.unqlite_kv_cursor_data(curs.handle, unsafe.Pointer(&value[0]), &n) 292 | if res != C.UNQLITE_OK { 293 | err = UnQLiteError(res) 294 | } 295 | return 296 | } 297 | 298 | // Shutdown ... 299 | func Shutdown() (err error) { 300 | res := C.unqlite_lib_shutdown() 301 | if res != C.UNQLITE_OK { 302 | err = UnQLiteError(res) 303 | } 304 | return 305 | } 306 | 307 | // IsThreadSafe ... 308 | func IsThreadSafe() bool { 309 | return C.unqlite_lib_is_threadsafe() == 1 310 | } 311 | 312 | // Version ... 313 | func Version() string { 314 | return C.GoString(C.unqlite_lib_version()) 315 | } 316 | 317 | // Signature ... 318 | func Signature() string { 319 | return C.GoString(C.unqlite_lib_signature()) 320 | } 321 | 322 | // Ident ... 323 | func Ident() string { 324 | return C.GoString(C.unqlite_lib_ident()) 325 | } 326 | 327 | // Copyright ... 328 | func Copyright() string { 329 | return C.GoString(C.unqlite_lib_copyright()) 330 | } 331 | 332 | /* TODO: implement 333 | 334 | // Database Engine Handle 335 | int unqlite_config(unqlite *pDb,int nOp,...); 336 | 337 | // Key/Value (KV) Store Interfaces 338 | int unqlite_kv_fetch_callback(unqlite *pDb,const void *pKey, 339 | int nKeyLen,int (*xConsumer)(const void *,unsigned int,void *),void *pUserData); 340 | int unqlite_kv_config(unqlite *pDb,int iOp,...); 341 | 342 | // Cursor Iterator Interfaces 343 | int unqlite_kv_cursor_key_callback(unqlite_kv_cursor *pCursor,int (*xConsumer)(const void *,unsigned int,void *),void *pUserData); 344 | int unqlite_kv_cursor_data_callback(unqlite_kv_cursor *pCursor,int (*xConsumer)(const void *,unsigned int,void *),void *pUserData); 345 | 346 | // Utility interfaces 347 | int unqlite_util_load_mmaped_file(const char *zFile,void **ppMap,unqlite_int64 *pFileSize); 348 | int unqlite_util_release_mmaped_file(void *pMap,unqlite_int64 iFileSize); 349 | 350 | // Global Library Management Interfaces 351 | int unqlite_lib_config(int nConfigOp,...); 352 | */ 353 | -------------------------------------------------------------------------------- /src/unqlitego/unqlite_test.go: -------------------------------------------------------------------------------- 1 | package unqlitego 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | . "github.com/r7kamura/gospel" 7 | "io/ioutil" 8 | "testing" 9 | ) 10 | 11 | func Testライブラリ(t *testing.T) { 12 | Describe(t, "正常系", func() { 13 | Context("基本テスト", func() { 14 | It("IsThreadSafe", func() { 15 | Expect(IsThreadSafe()).To(Equal, true) 16 | }) 17 | It("Version", func() { 18 | Expect(Version()).To(Equal, "1.1.6") 19 | }) 20 | It("Signature", func() { 21 | Expect(Signature()).To(Equal, "unqlite/1.1.6") 22 | }) 23 | It("Ident", func() { 24 | Expect(Ident()).To(Equal, "unqlite:b172a1e2c3f62fb35c8e1fb2795121f82356cad6") 25 | }) 26 | It("Copyright", func() { 27 | Expect(Copyright()).To(Equal, "Copyright (C) Symisc Systems, S.U.A.R.L [Mrad Chems Eddine ] 2012-2013, http://unqlite.org/") 28 | }) 29 | }) 30 | }) 31 | } 32 | 33 | func Testモジュール(t *testing.T) { 34 | var db *Database 35 | var src []byte 36 | src = []byte("value") 37 | 38 | Describe(t, "正常系", func() { 39 | Context("基本テスト", func() { 40 | It("NewDatabase", func() { 41 | f, err := ioutil.TempFile("", "sample.db") 42 | if err != nil { 43 | panic(err) 44 | } 45 | db, err = NewDatabase(f.Name()) 46 | Expect(err).To(NotExist) 47 | Expect(db).To(Exist) 48 | }) 49 | It("Database.Begin", func() { 50 | err := db.Begin() 51 | Expect(err).To(NotExist) 52 | }) 53 | It("Database.Store", func() { 54 | err := db.Store([]byte("sample"), src) 55 | Expect(err).To(NotExist) 56 | }) 57 | It("Database.Fetch", func() { 58 | dst, err := db.Fetch([]byte("sample")) 59 | Expect(err).To(NotExist) 60 | Expect(bytes.Compare(src, dst)).To(Equal, 0) 61 | }) 62 | It("Database.Append", func() { 63 | err1 := db.Append([]byte("sample"), []byte(" append")) 64 | Expect(err1).To(NotExist) 65 | dst, err2 := db.Fetch([]byte("sample")) 66 | Expect(err2).To(NotExist) 67 | Expect(bytes.Compare(append(src, []byte(" append")...), dst)).To(Equal, 0) 68 | }) 69 | It("Database.Commit", func() { 70 | err := db.Commit() 71 | Expect(err).To(NotExist) 72 | }) 73 | It("Database.Begin", func() { 74 | err := db.Begin() 75 | Expect(err).To(NotExist) 76 | }) 77 | It("Database.Delete", func() { 78 | err1 := db.Delete([]byte("sample")) 79 | Expect(err1).To(NotExist) 80 | _, err2 := db.Fetch([]byte("sample")) 81 | Expect(err2).To(Exist) 82 | }) 83 | It("Database.Rollback", func() { 84 | err := db.Rollback() 85 | Expect(err).To(NotExist) 86 | value, err2 := db.Fetch([]byte("sample")) 87 | Expect(err2).To(NotExist) 88 | Expect(value).To(Exist) 89 | }) 90 | It("Database.NewCursor", func() { 91 | cursor, err := db.NewCursor() 92 | Expect(err).To(NotExist) 93 | Expect(cursor).To(Exist) 94 | err = cursor.Seek([]byte("sample")) 95 | Expect(err).To(NotExist) 96 | }) 97 | It("Database.Close", func() { 98 | err := db.Close() 99 | Expect(err).To(NotExist) 100 | }) 101 | }) 102 | }) 103 | } 104 | 105 | func BenchmarkFileStore(b *testing.B) { 106 | b.StopTimer() 107 | f, err := ioutil.TempFile("", "sample.db") 108 | if err != nil { 109 | panic(err) 110 | } 111 | db, _ := NewDatabase(f.Name()) 112 | b.StartTimer() 113 | for i := 0; i < b.N; i++ { 114 | db.Store([]byte(fmt.Sprintf("%d", i)), []byte("abcdefghijklmnopabcdefghijklmnopabcdefghijklmnopabcdefghijklmnop")) 115 | } 116 | } 117 | 118 | func BenchmarkFileFetch(b *testing.B) { 119 | b.StopTimer() 120 | f, err := ioutil.TempFile("", "sample.db") 121 | if err != nil { 122 | panic(err) 123 | } 124 | db, _ := NewDatabase(f.Name()) 125 | for i := 0; i < b.N; i++ { 126 | db.Store([]byte(fmt.Sprintf("%d", i)), []byte("abcdefghijklmnopabcdefghijklmnopabcdefghijklmnopabcdefghijklmnop")) 127 | } 128 | b.StartTimer() 129 | for i := 0; i < b.N; i++ { 130 | _, _ = db.Fetch([]byte(fmt.Sprintf("%d", i))) 131 | } 132 | } 133 | 134 | func BenchmarkMemStore(b *testing.B) { 135 | b.StopTimer() 136 | db, _ := NewDatabase("") 137 | b.StartTimer() 138 | for i := 0; i < b.N; i++ { 139 | db.Store([]byte(fmt.Sprintf("%d", i)), []byte("abcdefghijklmnopabcdefghijklmnopabcdefghijklmnopabcdefghijklmnop")) 140 | } 141 | } 142 | 143 | func BenchmarkMemFetch(b *testing.B) { 144 | b.StopTimer() 145 | db, _ := NewDatabase("") 146 | for i := 0; i < b.N; i++ { 147 | db.Store([]byte(fmt.Sprintf("%d", i)), []byte("abcdefghijklmnopabcdefghijklmnopabcdefghijklmnopabcdefghijklmnop")) 148 | } 149 | b.StartTimer() 150 | for i := 0; i < b.N; i++ { 151 | _, _ = db.Fetch([]byte(fmt.Sprintf("%d", i))) 152 | } 153 | } 154 | -------------------------------------------------------------------------------- /tests/test_read_leveldb_in_json_rpc.py: -------------------------------------------------------------------------------- 1 | #encoding:utf8 2 | import json, socket, itertools, struct 3 | import redis 4 | import gevent 5 | import time 6 | from multiprocessing import Pool 7 | 8 | class JSONClient(object): 9 | 10 | def __init__(self, addr): 11 | self.socket = socket.create_connection(addr) 12 | self.id_counter = itertools.count() 13 | 14 | def __del__(self): 15 | self.socket.close() 16 | 17 | def call(self, name, params): 18 | request = dict(id=next(self.id_counter), 19 | params=params, 20 | method=name) 21 | request_strs = json.dumps(request).encode() 22 | raw = struct.pack(">I", len(request_strs)) 23 | self.socket.sendall(raw + request_strs) 24 | 25 | # This must loop if resp is bigger than 4K 26 | raw_len = self.socket.recv(4) 27 | package_len = struct.unpack(">I", raw_len)[0] 28 | raw_message = self.socket.recv(package_len) 29 | while len(raw_message) < package_len: 30 | raw_message = raw_message + self.socket.recv(package_len - len(raw_message)) 31 | response = json.loads(raw_message) 32 | if response.get('id') != request.get('id'): 33 | raise Exception("expected id=%s, received id=%s: %s" 34 | %(request.get('id'), response.get('id'), 35 | response.get('error'))) 36 | 37 | if response.get('error') is not None: 38 | raise Exception(response.get('error')) 39 | 40 | return response.get('result') 41 | 42 | count = 0 43 | last_ts = time.time() 44 | def Progress(): 45 | global count, last_ts 46 | count = count + 1 47 | if count % 100 == 0: 48 | cur_ts = time.time() 49 | print("processes %d, speed:%d" % (count, 100/(cur_ts-last_ts))) 50 | last_ts = cur_ts 51 | 52 | def Map(keys): 53 | rpc = JSONClient(("127.0.0.1", 5200)) 54 | for key in keys: 55 | rpc.call("Get", key) 56 | Progress() 57 | 58 | config = {} 59 | with open("conf/bench.json") as fh: 60 | content = fh.read() 61 | config = json.loads(content) 62 | HOST, PORT = config["redis"]["host"].split(":") 63 | redis_db = redis.StrictRedis(host=HOST, port=int(PORT),db=config["redis"]["db"],password=config["redis"]["password"]) 64 | all_keys = redis_db.keys() 65 | begin_timestamp = time.time() 66 | max_procs = 3 67 | parallels_keys = [] 68 | step = len(all_keys)/max_procs 69 | for i in range(0, len(all_keys), step): 70 | parallels_keys.append(all_keys[i:i + step]) 71 | print "parallels_keys", len(parallels_keys) 72 | pool = Pool(processes = max_procs) 73 | pool.map(Map, parallels_keys) 74 | end_timestamp = time.time() 75 | print ("end speed:", len(all_keys)/(end_timestamp - begin_timestamp)) 76 | 77 | --------------------------------------------------------------------------------