├── .gitignore ├── store ├── goleveldb │ ├── const.go │ ├── batch.go │ ├── snapshot.go │ ├── iterator.go │ └── db.go ├── writebatch.go ├── snapshot.go ├── driver │ ├── batch.go │ ├── store.go │ └── driver.go ├── store.go ├── tx.go ├── db.go ├── tx_test.go ├── iterator.go └── store_test.go ├── tools ├── check_lua.go ├── build_config.sh ├── redis_import │ ├── README.md │ ├── test.py │ └── redis_import.py ├── build_leveldb.sh └── generate_commands.py ├── info.go ├── binlog_test.go ├── config ├── config_test.go ├── config.toml └── config.go ├── multi_test.go ├── LICENSE ├── dump_test.go ├── multi.go ├── README_CN.md ├── doc.go ├── batch.go ├── const.go ├── nodb_test.go ├── tx.go ├── README.md ├── util.go ├── t_kv_test.go ├── nodb.go ├── t_list_test.go ├── t_hash_test.go ├── replication_test.go ├── scan.go ├── tx_test.go ├── nodb_db.go ├── dump.go ├── t_ttl.go ├── binlog_util.go ├── replication.go ├── scan_test.go ├── t_kv.go ├── binlog.go ├── t_set_test.go ├── t_zset_test.go ├── t_ttl_test.go ├── t_list.go ├── t_hash.go └── t_set.go /.gitignore: -------------------------------------------------------------------------------- 1 | build 2 | *.pyc 3 | .DS_Store 4 | nohup.out 5 | build_config.mk 6 | var 7 | .vscode 8 | -------------------------------------------------------------------------------- /store/goleveldb/const.go: -------------------------------------------------------------------------------- 1 | package goleveldb 2 | 3 | const DBName = "goleveldb" 4 | const MemDBName = "memory" 5 | -------------------------------------------------------------------------------- /store/writebatch.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "github.com/lunny/nodb/store/driver" 5 | ) 6 | 7 | type WriteBatch interface { 8 | driver.IWriteBatch 9 | } 10 | -------------------------------------------------------------------------------- /tools/check_lua.go: -------------------------------------------------------------------------------- 1 | // +build ignore 2 | 3 | package main 4 | 5 | import "github.com/siddontang/golua/lua" 6 | 7 | func main() { 8 | L := lua.NewState() 9 | L.Close() 10 | } 11 | -------------------------------------------------------------------------------- /store/snapshot.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "github.com/lunny/nodb/store/driver" 5 | ) 6 | 7 | type Snapshot struct { 8 | driver.ISnapshot 9 | } 10 | 11 | func (s *Snapshot) NewIterator() *Iterator { 12 | it := new(Iterator) 13 | it.it = s.ISnapshot.NewIterator() 14 | 15 | return it 16 | } 17 | -------------------------------------------------------------------------------- /store/goleveldb/batch.go: -------------------------------------------------------------------------------- 1 | package goleveldb 2 | 3 | import ( 4 | "github.com/syndtr/goleveldb/leveldb" 5 | ) 6 | 7 | type WriteBatch struct { 8 | db *DB 9 | wbatch *leveldb.Batch 10 | } 11 | 12 | func (w *WriteBatch) Put(key, value []byte) { 13 | w.wbatch.Put(key, value) 14 | } 15 | 16 | func (w *WriteBatch) Delete(key []byte) { 17 | w.wbatch.Delete(key) 18 | } 19 | 20 | func (w *WriteBatch) Commit() error { 21 | return w.db.db.Write(w.wbatch, nil) 22 | } 23 | 24 | func (w *WriteBatch) Rollback() error { 25 | w.wbatch.Reset() 26 | return nil 27 | } 28 | -------------------------------------------------------------------------------- /store/goleveldb/snapshot.go: -------------------------------------------------------------------------------- 1 | package goleveldb 2 | 3 | import ( 4 | "github.com/lunny/nodb/store/driver" 5 | "github.com/syndtr/goleveldb/leveldb" 6 | ) 7 | 8 | type Snapshot struct { 9 | db *DB 10 | snp *leveldb.Snapshot 11 | } 12 | 13 | func (s *Snapshot) Get(key []byte) ([]byte, error) { 14 | return s.snp.Get(key, s.db.iteratorOpts) 15 | } 16 | 17 | func (s *Snapshot) NewIterator() driver.IIterator { 18 | it := &Iterator{ 19 | s.snp.NewIterator(nil, s.db.iteratorOpts), 20 | } 21 | return it 22 | } 23 | 24 | func (s *Snapshot) Close() { 25 | s.snp.Release() 26 | } 27 | -------------------------------------------------------------------------------- /tools/build_config.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | OUTPUT=$1 4 | PREFIX=$2 5 | if test -z "$OUTPUT" || test -z "$PREFIX"; then 6 | echo "usage: $0 " >&2 7 | exit 1 8 | fi 9 | 10 | # Delete existing output, if it exists 11 | rm -f $OUTPUT 12 | touch $OUTPUT 13 | 14 | source ./dev.sh 15 | 16 | echo "CGO_CFLAGS=$CGO_CFLAGS" >> $OUTPUT 17 | echo "CGO_CXXFLAGS=$CGO_CXXFLAGS" >> $OUTPUT 18 | echo "CGO_LDFLAGS=$CGO_LDFLAGS" >> $OUTPUT 19 | echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH" >> $OUTPUT 20 | echo "DYLD_LIBRARY_PATH=$DYLD_LIBRARY_PATH" >> $OUTPUT 21 | echo "GO_BUILD_TAGS=$GO_BUILD_TAGS" >> $OUTPUT -------------------------------------------------------------------------------- /info.go: -------------------------------------------------------------------------------- 1 | package nodb 2 | 3 | // todo, add info 4 | 5 | // type Keyspace struct { 6 | // Kvs int `json:"kvs"` 7 | // KvExpires int `json:"kv_expires"` 8 | 9 | // Lists int `json:"lists"` 10 | // ListExpires int `json:"list_expires"` 11 | 12 | // Bitmaps int `json:"bitmaps"` 13 | // BitmapExpires int `json:"bitmap_expires"` 14 | 15 | // ZSets int `json:"zsets"` 16 | // ZSetExpires int `json:"zset_expires"` 17 | 18 | // Hashes int `json:"hashes"` 19 | // HashExpires int `json:"hahsh_expires"` 20 | // } 21 | 22 | // type Info struct { 23 | // KeySpaces [MaxDBNumber]Keyspace 24 | // } 25 | -------------------------------------------------------------------------------- /tools/redis_import/README.md: -------------------------------------------------------------------------------- 1 | ## Notice 2 | 3 | 1. The tool doesn't support `set` data type. 4 | 2. The tool doesn't support `bitmap` data type. 5 | 2. Our `zset` use integer instead of double, so the zset float score in Redis 6 | will be **converted to integer**. 7 | 3. Only Support Redis version greater than `2.8.0`, because we use `scan` command to scan data. 8 | Also, you need `redis-py` greater than `2.9.0`. 9 | 10 | 11 | 12 | ## Usage 13 | 14 | 15 | $ python redis_import.py redis_host redis_port redis_db ledis_host ledis_port 16 | 17 | 18 | We will use the same db index as redis. That's to say, data in redis[0] will be transfer to ledisdb[0]. But if redis db `index >= 16`, we will refuse to transfer, because ledisdb only support db `index < 16`. -------------------------------------------------------------------------------- /binlog_test.go: -------------------------------------------------------------------------------- 1 | package nodb 2 | 3 | import ( 4 | "io/ioutil" 5 | "os" 6 | "testing" 7 | 8 | "github.com/lunny/nodb/config" 9 | ) 10 | 11 | func TestBinLog(t *testing.T) { 12 | cfg := new(config.Config) 13 | 14 | cfg.BinLog.MaxFileNum = 1 15 | cfg.BinLog.MaxFileSize = 1024 16 | cfg.DataDir = "/tmp/ledis_binlog" 17 | 18 | os.RemoveAll(cfg.DataDir) 19 | 20 | b, err := NewBinLog(cfg) 21 | if err != nil { 22 | t.Fatal(err) 23 | } 24 | 25 | if err := b.Log(make([]byte, 1024)); err != nil { 26 | t.Fatal(err) 27 | } 28 | 29 | if err := b.Log(make([]byte, 1024)); err != nil { 30 | t.Fatal(err) 31 | } 32 | 33 | if fs, err := ioutil.ReadDir(b.LogPath()); err != nil { 34 | t.Fatal(err) 35 | } else if len(fs) != 2 { 36 | t.Fatal(len(fs)) 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /config/config_test.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | ) 7 | 8 | func TestConfig(t *testing.T) { 9 | dstCfg := new(Config) 10 | dstCfg.Addr = "127.0.0.1:6380" 11 | dstCfg.HttpAddr = "127.0.0.1:11181" 12 | dstCfg.DataDir = "/tmp/ledis_server" 13 | dstCfg.DBName = "leveldb" 14 | 15 | dstCfg.LevelDB.Compression = false 16 | dstCfg.LevelDB.BlockSize = 32768 17 | dstCfg.LevelDB.WriteBufferSize = 67108864 18 | dstCfg.LevelDB.CacheSize = 524288000 19 | dstCfg.LevelDB.MaxOpenFiles = 1024 20 | dstCfg.LMDB.MapSize = 524288000 21 | dstCfg.LMDB.NoSync = true 22 | 23 | cfg, err := NewConfigWithFile("./config.toml") 24 | if err != nil { 25 | t.Fatal(err) 26 | } 27 | 28 | if !reflect.DeepEqual(dstCfg, cfg) { 29 | t.Fatal("parse toml error") 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /store/driver/batch.go: -------------------------------------------------------------------------------- 1 | package driver 2 | 3 | type BatchPuter interface { 4 | BatchPut([]Write) error 5 | } 6 | 7 | type Write struct { 8 | Key []byte 9 | Value []byte 10 | } 11 | 12 | type WriteBatch struct { 13 | batch BatchPuter 14 | wb []Write 15 | } 16 | 17 | func (w *WriteBatch) Put(key, value []byte) { 18 | if value == nil { 19 | value = []byte{} 20 | } 21 | w.wb = append(w.wb, Write{key, value}) 22 | } 23 | 24 | func (w *WriteBatch) Delete(key []byte) { 25 | w.wb = append(w.wb, Write{key, nil}) 26 | } 27 | 28 | func (w *WriteBatch) Commit() error { 29 | return w.batch.BatchPut(w.wb) 30 | } 31 | 32 | func (w *WriteBatch) Rollback() error { 33 | w.wb = w.wb[0:0] 34 | return nil 35 | } 36 | 37 | func NewWriteBatch(puter BatchPuter) IWriteBatch { 38 | return &WriteBatch{puter, []Write{}} 39 | } 40 | -------------------------------------------------------------------------------- /store/goleveldb/iterator.go: -------------------------------------------------------------------------------- 1 | package goleveldb 2 | 3 | import ( 4 | "github.com/syndtr/goleveldb/leveldb/iterator" 5 | ) 6 | 7 | type Iterator struct { 8 | it iterator.Iterator 9 | } 10 | 11 | func (it *Iterator) Key() []byte { 12 | return it.it.Key() 13 | } 14 | 15 | func (it *Iterator) Value() []byte { 16 | return it.it.Value() 17 | } 18 | 19 | func (it *Iterator) Close() error { 20 | if it.it != nil { 21 | it.it.Release() 22 | it.it = nil 23 | } 24 | return nil 25 | } 26 | 27 | func (it *Iterator) Valid() bool { 28 | return it.it.Valid() 29 | } 30 | 31 | func (it *Iterator) Next() { 32 | it.it.Next() 33 | } 34 | 35 | func (it *Iterator) Prev() { 36 | it.it.Prev() 37 | } 38 | 39 | func (it *Iterator) First() { 40 | it.it.First() 41 | } 42 | 43 | func (it *Iterator) Last() { 44 | it.it.Last() 45 | } 46 | 47 | func (it *Iterator) Seek(key []byte) { 48 | it.it.Seek(key) 49 | } 50 | -------------------------------------------------------------------------------- /multi_test.go: -------------------------------------------------------------------------------- 1 | package nodb 2 | 3 | import ( 4 | "sync" 5 | "testing" 6 | ) 7 | 8 | func TestMulti(t *testing.T) { 9 | db := getTestDB() 10 | 11 | key := []byte("test_multi_1") 12 | v1 := []byte("v1") 13 | v2 := []byte("v2") 14 | 15 | m, err := db.Multi() 16 | if err != nil { 17 | t.Fatal(err) 18 | } 19 | 20 | wg := sync.WaitGroup{} 21 | 22 | wg.Add(1) 23 | 24 | go func() { 25 | if err := db.Set(key, v2); err != nil { 26 | t.Fatal(err) 27 | } 28 | wg.Done() 29 | }() 30 | 31 | if err := m.Set(key, v1); err != nil { 32 | t.Fatal(err) 33 | } 34 | 35 | if v, err := m.Get(key); err != nil { 36 | t.Fatal(err) 37 | } else if string(v) != string(v1) { 38 | t.Fatal(string(v)) 39 | } 40 | 41 | m.Close() 42 | 43 | wg.Wait() 44 | 45 | if v, err := db.Get(key); err != nil { 46 | t.Fatal(err) 47 | } else if string(v) != string(v2) { 48 | t.Fatal(string(v)) 49 | } 50 | 51 | } 52 | -------------------------------------------------------------------------------- /store/driver/store.go: -------------------------------------------------------------------------------- 1 | package driver 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/lunny/nodb/config" 7 | ) 8 | 9 | type Store interface { 10 | String() string 11 | Open(path string, cfg *config.Config) (IDB, error) 12 | Repair(path string, cfg *config.Config) error 13 | } 14 | 15 | var dbs = map[string]Store{} 16 | 17 | func Register(s Store) { 18 | name := s.String() 19 | if _, ok := dbs[name]; ok { 20 | panic(fmt.Errorf("store %s is registered", s)) 21 | } 22 | 23 | dbs[name] = s 24 | } 25 | 26 | func ListStores() []string { 27 | s := []string{} 28 | for k, _ := range dbs { 29 | s = append(s, k) 30 | } 31 | 32 | return s 33 | } 34 | 35 | func GetStore(cfg *config.Config) (Store, error) { 36 | if len(cfg.DBName) == 0 { 37 | cfg.DBName = config.DefaultDBName 38 | } 39 | 40 | s, ok := dbs[cfg.DBName] 41 | if !ok { 42 | return nil, fmt.Errorf("store %s is not registered", cfg.DBName) 43 | } 44 | 45 | return s, nil 46 | } 47 | -------------------------------------------------------------------------------- /config/config.toml: -------------------------------------------------------------------------------- 1 | # LedisDB configuration 2 | 3 | # Server listen address 4 | addr = "127.0.0.1:6380" 5 | 6 | # Server http listen address, set empty to disable 7 | http_addr = "127.0.0.1:11181" 8 | 9 | # Data store path, all ledisdb's data will be saved here 10 | data_dir = "/tmp/ledis_server" 11 | 12 | # Log server command, set empty to disable 13 | access_log = "" 14 | 15 | # Set slaveof to enable replication from master, empty, no replication 16 | slaveof = "" 17 | 18 | # Choose which backend storage to use, now support: 19 | # 20 | # leveldb 21 | # rocksdb 22 | # goleveldb 23 | # lmdb 24 | # boltdb 25 | # hyperleveldb 26 | # memory 27 | # 28 | db_name = "leveldb" 29 | 30 | [leveldb] 31 | compression = false 32 | block_size = 32768 33 | write_buffer_size = 67108864 34 | cache_size = 524288000 35 | max_open_files = 1024 36 | 37 | [lmdb] 38 | map_size = 524288000 39 | nosync = true 40 | 41 | [binlog] 42 | max_file_size = 0 43 | max_file_num = 0 44 | 45 | 46 | -------------------------------------------------------------------------------- /store/store.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path" 7 | "github.com/lunny/nodb/config" 8 | "github.com/lunny/nodb/store/driver" 9 | 10 | _ "github.com/lunny/nodb/store/goleveldb" 11 | ) 12 | 13 | func getStorePath(cfg *config.Config) string { 14 | return path.Join(cfg.DataDir, fmt.Sprintf("%s_data", cfg.DBName)) 15 | } 16 | 17 | func Open(cfg *config.Config) (*DB, error) { 18 | s, err := driver.GetStore(cfg) 19 | if err != nil { 20 | return nil, err 21 | } 22 | 23 | path := getStorePath(cfg) 24 | 25 | if err := os.MkdirAll(path, os.ModePerm); err != nil { 26 | return nil, err 27 | } 28 | 29 | idb, err := s.Open(path, cfg) 30 | if err != nil { 31 | return nil, err 32 | } 33 | 34 | db := &DB{idb} 35 | 36 | return db, nil 37 | } 38 | 39 | func Repair(cfg *config.Config) error { 40 | s, err := driver.GetStore(cfg) 41 | if err != nil { 42 | return err 43 | } 44 | 45 | path := getStorePath(cfg) 46 | 47 | return s.Repair(path, cfg) 48 | } 49 | 50 | func init() { 51 | } 52 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2014 siddontang 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /store/driver/driver.go: -------------------------------------------------------------------------------- 1 | package driver 2 | 3 | import ( 4 | "errors" 5 | ) 6 | 7 | var ( 8 | ErrTxSupport = errors.New("transaction is not supported") 9 | ) 10 | 11 | type IDB interface { 12 | Close() error 13 | 14 | Get(key []byte) ([]byte, error) 15 | 16 | Put(key []byte, value []byte) error 17 | Delete(key []byte) error 18 | 19 | NewIterator() IIterator 20 | 21 | NewWriteBatch() IWriteBatch 22 | 23 | NewSnapshot() (ISnapshot, error) 24 | 25 | Begin() (Tx, error) 26 | } 27 | 28 | type ISnapshot interface { 29 | Get(key []byte) ([]byte, error) 30 | NewIterator() IIterator 31 | Close() 32 | } 33 | 34 | type IIterator interface { 35 | Close() error 36 | 37 | First() 38 | Last() 39 | Seek(key []byte) 40 | 41 | Next() 42 | Prev() 43 | 44 | Valid() bool 45 | 46 | Key() []byte 47 | Value() []byte 48 | } 49 | 50 | type IWriteBatch interface { 51 | Put(key []byte, value []byte) 52 | Delete(key []byte) 53 | Commit() error 54 | Rollback() error 55 | } 56 | 57 | type Tx interface { 58 | Get(key []byte) ([]byte, error) 59 | Put(key []byte, value []byte) error 60 | Delete(key []byte) error 61 | 62 | NewIterator() IIterator 63 | NewWriteBatch() IWriteBatch 64 | 65 | Commit() error 66 | Rollback() error 67 | } 68 | -------------------------------------------------------------------------------- /dump_test.go: -------------------------------------------------------------------------------- 1 | package nodb 2 | 3 | import ( 4 | "bytes" 5 | "os" 6 | "testing" 7 | 8 | "github.com/lunny/nodb/config" 9 | "github.com/lunny/nodb/store" 10 | ) 11 | 12 | func TestDump(t *testing.T) { 13 | cfgM := new(config.Config) 14 | cfgM.DataDir = "/tmp/test_ledis_master" 15 | 16 | os.RemoveAll(cfgM.DataDir) 17 | 18 | master, err := Open(cfgM) 19 | if err != nil { 20 | t.Fatal(err) 21 | } 22 | 23 | cfgS := new(config.Config) 24 | cfgS.DataDir = "/tmp/test_ledis_slave" 25 | os.RemoveAll(cfgM.DataDir) 26 | 27 | var slave *Nodb 28 | if slave, err = Open(cfgS); err != nil { 29 | t.Fatal(err) 30 | } 31 | 32 | db, _ := master.Select(0) 33 | 34 | db.Set([]byte("a"), []byte("1")) 35 | db.Set([]byte("b"), []byte("2")) 36 | db.Set([]byte("c"), []byte("3")) 37 | 38 | if err := master.DumpFile("/tmp/testdb.dump"); err != nil { 39 | t.Fatal(err) 40 | } 41 | 42 | if _, err := slave.LoadDumpFile("/tmp/testdb.dump"); err != nil { 43 | t.Fatal(err) 44 | } 45 | 46 | it := master.ldb.RangeLimitIterator(nil, nil, store.RangeClose, 0, -1) 47 | for ; it.Valid(); it.Next() { 48 | key := it.Key() 49 | value := it.Value() 50 | 51 | if v, err := slave.ldb.Get(key); err != nil { 52 | t.Fatal(err) 53 | } else if !bytes.Equal(v, value) { 54 | t.Fatal("load dump error") 55 | } 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /store/tx.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "github.com/lunny/nodb/store/driver" 5 | ) 6 | 7 | type Tx struct { 8 | driver.Tx 9 | } 10 | 11 | func (tx *Tx) NewIterator() *Iterator { 12 | it := new(Iterator) 13 | it.it = tx.Tx.NewIterator() 14 | 15 | return it 16 | } 17 | 18 | func (tx *Tx) NewWriteBatch() WriteBatch { 19 | return tx.Tx.NewWriteBatch() 20 | } 21 | 22 | func (tx *Tx) RangeIterator(min []byte, max []byte, rangeType uint8) *RangeLimitIterator { 23 | return NewRangeLimitIterator(tx.NewIterator(), &Range{min, max, rangeType}, &Limit{0, -1}) 24 | } 25 | 26 | func (tx *Tx) RevRangeIterator(min []byte, max []byte, rangeType uint8) *RangeLimitIterator { 27 | return NewRevRangeLimitIterator(tx.NewIterator(), &Range{min, max, rangeType}, &Limit{0, -1}) 28 | } 29 | 30 | //count < 0, unlimit. 31 | // 32 | //offset must >= 0, if < 0, will get nothing. 33 | func (tx *Tx) RangeLimitIterator(min []byte, max []byte, rangeType uint8, offset int, count int) *RangeLimitIterator { 34 | return NewRangeLimitIterator(tx.NewIterator(), &Range{min, max, rangeType}, &Limit{offset, count}) 35 | } 36 | 37 | //count < 0, unlimit. 38 | // 39 | //offset must >= 0, if < 0, will get nothing. 40 | func (tx *Tx) RevRangeLimitIterator(min []byte, max []byte, rangeType uint8, offset int, count int) *RangeLimitIterator { 41 | return NewRevRangeLimitIterator(tx.NewIterator(), &Range{min, max, rangeType}, &Limit{offset, count}) 42 | } 43 | -------------------------------------------------------------------------------- /tools/build_leveldb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SNAPPY_DIR=/usr/local/snappy 4 | LEVELDB_DIR=/usr/local/leveldb 5 | 6 | ROOT_DIR=$(pwd) 7 | 8 | BUILD_DIR=/tmp/build_leveldb 9 | 10 | mkdir -p $BUILD_DIR 11 | 12 | cd $BUILD_DIR 13 | 14 | if [ ! -f $SNAPPY_DIR/lib/libsnappy.a ]; then 15 | (git clone https://github.com/siddontang/snappy.git ; \ 16 | cd ./snappy && \ 17 | autoreconf --force --install && \ 18 | ./configure --prefix=$SNAPPY_DIR && \ 19 | make && \ 20 | make install && \ 21 | cd ..) 22 | else 23 | echo "skip install snappy" 24 | fi 25 | 26 | cd $BUILD_DIR 27 | 28 | if [ ! -f $LEVELDB_DIR/lib/libleveldb.a ]; then 29 | (git clone https://github.com/siddontang/leveldb.git ; \ 30 | cd ./leveldb && \ 31 | echo "echo \"PLATFORM_CFLAGS+=-I$SNAPPY_DIR/include\" >> build_config.mk" >> build_detect_platform && 32 | echo "echo \"PLATFORM_CXXFLAGS+=-I$SNAPPY_DIR/include\" >> build_config.mk" >> build_detect_platform && 33 | echo "echo \"PLATFORM_LDFLAGS+=-L $SNAPPY_DIR/lib -lsnappy\" >> build_config.mk" >> build_detect_platform && 34 | make SNAPPY=1 && \ 35 | make && \ 36 | mkdir -p $LEVELDB_DIR/include/leveldb && \ 37 | install include/leveldb/*.h $LEVELDB_DIR/include/leveldb && \ 38 | mkdir -p $LEVELDB_DIR/lib && \ 39 | cp -f libleveldb.* $LEVELDB_DIR/lib &&\ 40 | cd ..) 41 | else 42 | echo "skip install leveldb" 43 | fi 44 | 45 | cd $ROOT_DIR 46 | -------------------------------------------------------------------------------- /multi.go: -------------------------------------------------------------------------------- 1 | package nodb 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | ) 7 | 8 | var ( 9 | ErrNestMulti = errors.New("nest multi not supported") 10 | ErrMultiDone = errors.New("multi has been closed") 11 | ) 12 | 13 | type Multi struct { 14 | *DB 15 | } 16 | 17 | func (db *DB) IsInMulti() bool { 18 | return db.status == DBInMulti 19 | } 20 | 21 | // begin a mutli to execute commands, 22 | // it will block any other write operations before you close the multi, unlike transaction, mutli can not rollback 23 | func (db *DB) Multi() (*Multi, error) { 24 | if db.IsInMulti() { 25 | return nil, ErrNestMulti 26 | } 27 | 28 | m := new(Multi) 29 | 30 | m.DB = new(DB) 31 | m.DB.status = DBInMulti 32 | 33 | m.DB.l = db.l 34 | 35 | m.l.wLock.Lock() 36 | 37 | m.DB.sdb = db.sdb 38 | 39 | m.DB.bucket = db.sdb 40 | 41 | m.DB.index = db.index 42 | 43 | m.DB.kvBatch = m.newBatch() 44 | m.DB.listBatch = m.newBatch() 45 | m.DB.hashBatch = m.newBatch() 46 | m.DB.zsetBatch = m.newBatch() 47 | m.DB.binBatch = m.newBatch() 48 | m.DB.setBatch = m.newBatch() 49 | 50 | return m, nil 51 | } 52 | 53 | func (m *Multi) newBatch() *batch { 54 | return m.l.newBatch(m.bucket.NewWriteBatch(), &multiBatchLocker{}, nil) 55 | } 56 | 57 | func (m *Multi) Close() error { 58 | if m.bucket == nil { 59 | return ErrMultiDone 60 | } 61 | m.l.wLock.Unlock() 62 | m.bucket = nil 63 | return nil 64 | } 65 | 66 | func (m *Multi) Select(index int) error { 67 | if index < 0 || index >= int(MaxDBNumber) { 68 | return fmt.Errorf("invalid db index %d", index) 69 | } 70 | 71 | m.DB.index = uint8(index) 72 | return nil 73 | } 74 | -------------------------------------------------------------------------------- /store/db.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "github.com/lunny/nodb/store/driver" 5 | ) 6 | 7 | type DB struct { 8 | driver.IDB 9 | } 10 | 11 | func (db *DB) NewIterator() *Iterator { 12 | it := new(Iterator) 13 | it.it = db.IDB.NewIterator() 14 | 15 | return it 16 | } 17 | 18 | func (db *DB) NewWriteBatch() WriteBatch { 19 | return db.IDB.NewWriteBatch() 20 | } 21 | 22 | func (db *DB) NewSnapshot() (*Snapshot, error) { 23 | var err error 24 | s := &Snapshot{} 25 | if s.ISnapshot, err = db.IDB.NewSnapshot(); err != nil { 26 | return nil, err 27 | } 28 | 29 | return s, nil 30 | } 31 | 32 | func (db *DB) RangeIterator(min []byte, max []byte, rangeType uint8) *RangeLimitIterator { 33 | return NewRangeLimitIterator(db.NewIterator(), &Range{min, max, rangeType}, &Limit{0, -1}) 34 | } 35 | 36 | func (db *DB) RevRangeIterator(min []byte, max []byte, rangeType uint8) *RangeLimitIterator { 37 | return NewRevRangeLimitIterator(db.NewIterator(), &Range{min, max, rangeType}, &Limit{0, -1}) 38 | } 39 | 40 | //count < 0, unlimit. 41 | // 42 | //offset must >= 0, if < 0, will get nothing. 43 | func (db *DB) RangeLimitIterator(min []byte, max []byte, rangeType uint8, offset int, count int) *RangeLimitIterator { 44 | return NewRangeLimitIterator(db.NewIterator(), &Range{min, max, rangeType}, &Limit{offset, count}) 45 | } 46 | 47 | //count < 0, unlimit. 48 | // 49 | //offset must >= 0, if < 0, will get nothing. 50 | func (db *DB) RevRangeLimitIterator(min []byte, max []byte, rangeType uint8, offset int, count int) *RangeLimitIterator { 51 | return NewRevRangeLimitIterator(db.NewIterator(), &Range{min, max, rangeType}, &Limit{offset, count}) 52 | } 53 | 54 | func (db *DB) Begin() (*Tx, error) { 55 | tx, err := db.IDB.Begin() 56 | if err != nil { 57 | return nil, err 58 | } 59 | 60 | return &Tx{tx}, nil 61 | } 62 | -------------------------------------------------------------------------------- /README_CN.md: -------------------------------------------------------------------------------- 1 | # NoDB 2 | 3 | [English](https://github.com/lunny/nodb/blob/master/README.md) 4 | 5 | Nodb 是 [ledisdb](https://github.com/siddontang/ledisdb) 的克隆和缩减版本。该版本去掉了所有C和其它语言的依赖,只保留Go语言的。目标是提供一个Nosql数据库的开发库而不是提供一个像Redis那样的服务器。因此如果你想要的是一个独立服务器,你可以直接选择ledisdb。 6 | 7 | Nodb 是一个纯Go的高性能 NoSQL 数据库。他支持 kv, list, hash, zset, bitmap, set 等数据结构。 8 | 9 | Nodb 当前底层使用 (goleveldb)[https://github.com/syndtr/goleveldb] 来存储数据。 10 | 11 | ## 特性 12 | 13 | + 丰富的数据结构支持: KV, List, Hash, ZSet, Bitmap, Set。 14 | + 永久存储并且不受内存的限制。 15 | + 高性能那个。 16 | + 可以方便的嵌入到你的应用程序中。 17 | 18 | ## 安装 19 | 20 | go get github.com/lunny/nodb 21 | 22 | ## 例子 23 | 24 | ### 打开和选择数据库 25 | ```go 26 | import( 27 | "github.com/lunny/nodb" 28 | "github.com/lunny/nodb/config" 29 | ) 30 | 31 | cfg := new(config.Config) 32 | cfg.DataDir = "./" 33 | dbs, err := nodb.Open(cfg) 34 | if err != nil { 35 | fmt.Printf("nodb: error opening db: %v", err) 36 | } 37 | db, _ := dbs.Select(0) 38 | ``` 39 | ### KV 40 | 41 | KV 是最基础的功能,和其它Nosql一样。 42 | ```go 43 | err := db.Set(key, value) 44 | value, err := db.Get(key) 45 | ``` 46 | ### List 47 | 48 | List 是一些值的简单列表,按照插入的顺序排列。你可以从左或右push和pop值。 49 | ```go 50 | err := db.LPush(key, value1) 51 | err := db.RPush(key, value2) 52 | value1, err := db.LPop(key) 53 | value2, err := db.RPop(key) 54 | ``` 55 | ### Hash 56 | 57 | Hash 是一个field和value对应的map。 58 | ```go 59 | n, err := db.HSet(key, field1, value1) 60 | n, err := db.HSet(key, field2, value2) 61 | value1, err := db.HGet(key, field1) 62 | value2, err := db.HGet(key, field2) 63 | ``` 64 | ### ZSet 65 | 66 | ZSet 是一个排序的值集合。zset的每个成员对应一个score,这是一个int64的值用于从小到大排序。成员不可重复,但是score可以相同。 67 | ```go 68 | n, err := db.ZAdd(key, ScorePair{score1, member1}, ScorePair{score2, member2}) 69 | ay, err := db.ZRangeByScore(key, minScore, maxScore, 0, -1) 70 | ``` 71 | 72 | ## 链接 73 | 74 | + [Ledisdb Official Website](http://ledisdb.com) 75 | + [GoDoc](https://godoc.org/github.com/lunny/nodb) 76 | + [GoWalker](https://gowalker.org/github.com/lunny/nodb) 77 | 78 | 79 | ## 感谢 80 | 81 | Gmail: siddontang@gmail.com 82 | -------------------------------------------------------------------------------- /doc.go: -------------------------------------------------------------------------------- 1 | // package nodb is a high performance embedded NoSQL. 2 | // 3 | // nodb supports various data structure like kv, list, hash and zset like redis. 4 | // 5 | // Other features include binlog replication, data with a limited time-to-live. 6 | // 7 | // Usage 8 | // 9 | // First create a nodb instance before use: 10 | // 11 | // l := nodb.Open(cfg) 12 | // 13 | // cfg is a Config instance which contains configuration for nodb use, 14 | // like DataDir (root directory for nodb working to store data). 15 | // 16 | // After you create a nodb instance, you can select a DB to store you data: 17 | // 18 | // db, _ := l.Select(0) 19 | // 20 | // DB must be selected by a index, nodb supports only 16 databases, so the index range is [0-15]. 21 | // 22 | // KV 23 | // 24 | // KV is the most basic nodb type like any other key-value database. 25 | // 26 | // err := db.Set(key, value) 27 | // value, err := db.Get(key) 28 | // 29 | // List 30 | // 31 | // List is simply lists of values, sorted by insertion order. 32 | // You can push or pop value on the list head (left) or tail (right). 33 | // 34 | // err := db.LPush(key, value1) 35 | // err := db.RPush(key, value2) 36 | // value1, err := db.LPop(key) 37 | // value2, err := db.RPop(key) 38 | // 39 | // Hash 40 | // 41 | // Hash is a map between fields and values. 42 | // 43 | // n, err := db.HSet(key, field1, value1) 44 | // n, err := db.HSet(key, field2, value2) 45 | // value1, err := db.HGet(key, field1) 46 | // value2, err := db.HGet(key, field2) 47 | // 48 | // ZSet 49 | // 50 | // ZSet is a sorted collections of values. 51 | // Every member of zset is associated with score, a int64 value which used to sort, from smallest to greatest score. 52 | // Members are unique, but score may be same. 53 | // 54 | // n, err := db.ZAdd(key, ScorePair{score1, member1}, ScorePair{score2, member2}) 55 | // ay, err := db.ZRangeByScore(key, minScore, maxScore, 0, -1) 56 | // 57 | // Binlog 58 | // 59 | // nodb supports binlog, so you can sync binlog to another server for replication. If you want to open binlog support, set UseBinLog to true in config. 60 | // 61 | package nodb 62 | -------------------------------------------------------------------------------- /batch.go: -------------------------------------------------------------------------------- 1 | package nodb 2 | 3 | import ( 4 | "sync" 5 | 6 | "github.com/lunny/nodb/store" 7 | ) 8 | 9 | type batch struct { 10 | l *Nodb 11 | 12 | store.WriteBatch 13 | 14 | sync.Locker 15 | 16 | logs [][]byte 17 | 18 | tx *Tx 19 | } 20 | 21 | func (b *batch) Commit() error { 22 | b.l.commitLock.Lock() 23 | defer b.l.commitLock.Unlock() 24 | 25 | err := b.WriteBatch.Commit() 26 | 27 | if b.l.binlog != nil { 28 | if err == nil { 29 | if b.tx == nil { 30 | b.l.binlog.Log(b.logs...) 31 | } else { 32 | b.tx.logs = append(b.tx.logs, b.logs...) 33 | } 34 | } 35 | b.logs = [][]byte{} 36 | } 37 | 38 | return err 39 | } 40 | 41 | func (b *batch) Lock() { 42 | b.Locker.Lock() 43 | } 44 | 45 | func (b *batch) Unlock() { 46 | if b.l.binlog != nil { 47 | b.logs = [][]byte{} 48 | } 49 | b.WriteBatch.Rollback() 50 | b.Locker.Unlock() 51 | } 52 | 53 | func (b *batch) Put(key []byte, value []byte) { 54 | if b.l.binlog != nil { 55 | buf := encodeBinLogPut(key, value) 56 | b.logs = append(b.logs, buf) 57 | } 58 | b.WriteBatch.Put(key, value) 59 | } 60 | 61 | func (b *batch) Delete(key []byte) { 62 | if b.l.binlog != nil { 63 | buf := encodeBinLogDelete(key) 64 | b.logs = append(b.logs, buf) 65 | } 66 | b.WriteBatch.Delete(key) 67 | } 68 | 69 | type dbBatchLocker struct { 70 | l *sync.Mutex 71 | wrLock *sync.RWMutex 72 | } 73 | 74 | func (l *dbBatchLocker) Lock() { 75 | l.wrLock.RLock() 76 | l.l.Lock() 77 | } 78 | 79 | func (l *dbBatchLocker) Unlock() { 80 | l.l.Unlock() 81 | l.wrLock.RUnlock() 82 | } 83 | 84 | type txBatchLocker struct { 85 | } 86 | 87 | func (l *txBatchLocker) Lock() {} 88 | func (l *txBatchLocker) Unlock() {} 89 | 90 | type multiBatchLocker struct { 91 | } 92 | 93 | func (l *multiBatchLocker) Lock() {} 94 | func (l *multiBatchLocker) Unlock() {} 95 | 96 | func (l *Nodb) newBatch(wb store.WriteBatch, locker sync.Locker, tx *Tx) *batch { 97 | b := new(batch) 98 | b.l = l 99 | b.WriteBatch = wb 100 | 101 | b.tx = tx 102 | b.Locker = locker 103 | 104 | b.logs = [][]byte{} 105 | return b 106 | } 107 | -------------------------------------------------------------------------------- /tools/generate_commands.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import json 4 | import time 5 | import sys 6 | import os 7 | from collections import OrderedDict as dict 8 | 9 | 10 | def json_to_js(json_path, js_path): 11 | """Convert `commands.json` to `commands.js`""" 12 | keys = [] 13 | with open(json_path) as fp: 14 | _json = json.load(fp) 15 | for k in _json.keys(): 16 | keys.append(k.encode('utf-8')) 17 | with open(js_path, "w") as fp: 18 | generate_time(fp) 19 | fp.write("module.exports = [\n") 20 | for k in sorted(keys): 21 | fp.write('\t"%s",\n' % k.lower()) 22 | fp.write("]") 23 | 24 | 25 | def json_to_go_array(json_path, go_path): 26 | g_fp = open(go_path, "w") 27 | with open(json_path) as fp: 28 | _json = json.load(fp) 29 | generate_time(g_fp) 30 | g_fp.write("package main\n\nvar helpCommands = [][]string{\n") 31 | _json_sorted = dict(sorted(_json.items(), key=lambda x: x[0])) 32 | for k, v in _json_sorted.iteritems(): 33 | g_fp.write('\t{"%s", "%s", "%s"},\n' % (k, v["arguments"], v["group"])) 34 | g_fp.write("}\n") 35 | g_fp.close() 36 | 37 | 38 | def generate_time(fp): 39 | fp.write("//This file was generated by .tools/generate_commands.py on %s \n" % 40 | time.strftime('%a %b %d %Y %H:%M:%S %z')) 41 | 42 | 43 | if __name__ == "__main__": 44 | usage = """ 45 | Usage: python %s src_path dst_path" 46 | 47 | 1. for Node.js client: 48 | 49 | python generate.py /path/to/commands.json /path/to/commands.js 50 | 51 | 2. for cmd/ledis_cli/const.go 52 | 53 | python generate.py /path/to/commands.json /path/to/const.go 54 | 55 | """ 56 | 57 | if len(sys.argv) != 3: 58 | sys.exit(usage % os.path.basename(sys.argv[0])) 59 | 60 | src_path, dst_path = sys.argv[1:] 61 | dst_path_base = os.path.basename(dst_path) 62 | 63 | if dst_path_base.endswith(".js"): 64 | json_to_js(src_path, dst_path) 65 | 66 | elif dst_path_base.startswith("const.go"): 67 | json_to_go_array(src_path, dst_path) 68 | 69 | else: 70 | print "Not support arguments" 71 | -------------------------------------------------------------------------------- /const.go: -------------------------------------------------------------------------------- 1 | package nodb 2 | 3 | import ( 4 | "errors" 5 | ) 6 | 7 | const ( 8 | NoneType byte = 0 9 | KVType byte = 1 10 | HashType byte = 2 11 | HSizeType byte = 3 12 | ListType byte = 4 13 | LMetaType byte = 5 14 | ZSetType byte = 6 15 | ZSizeType byte = 7 16 | ZScoreType byte = 8 17 | BitType byte = 9 18 | BitMetaType byte = 10 19 | SetType byte = 11 20 | SSizeType byte = 12 21 | 22 | maxDataType byte = 100 23 | 24 | ExpTimeType byte = 101 25 | ExpMetaType byte = 102 26 | ) 27 | 28 | var ( 29 | TypeName = map[byte]string{ 30 | KVType: "kv", 31 | HashType: "hash", 32 | HSizeType: "hsize", 33 | ListType: "list", 34 | LMetaType: "lmeta", 35 | ZSetType: "zset", 36 | ZSizeType: "zsize", 37 | ZScoreType: "zscore", 38 | BitType: "bit", 39 | BitMetaType: "bitmeta", 40 | SetType: "set", 41 | SSizeType: "ssize", 42 | ExpTimeType: "exptime", 43 | ExpMetaType: "expmeta", 44 | } 45 | ) 46 | 47 | const ( 48 | defaultScanCount int = 10 49 | ) 50 | 51 | var ( 52 | errKeySize = errors.New("invalid key size") 53 | errValueSize = errors.New("invalid value size") 54 | errHashFieldSize = errors.New("invalid hash field size") 55 | errSetMemberSize = errors.New("invalid set member size") 56 | errZSetMemberSize = errors.New("invalid zset member size") 57 | errExpireValue = errors.New("invalid expire value") 58 | ) 59 | 60 | const ( 61 | //we don't support too many databases 62 | MaxDBNumber uint8 = 16 63 | 64 | //max key size 65 | MaxKeySize int = 1024 66 | 67 | //max hash field size 68 | MaxHashFieldSize int = 1024 69 | 70 | //max zset member size 71 | MaxZSetMemberSize int = 1024 72 | 73 | //max set member size 74 | MaxSetMemberSize int = 1024 75 | 76 | //max value size 77 | MaxValueSize int = 10 * 1024 * 1024 78 | ) 79 | 80 | var ( 81 | ErrScoreMiss = errors.New("zset score miss") 82 | ) 83 | 84 | const ( 85 | BinLogTypeDeletion uint8 = 0x0 86 | BinLogTypePut uint8 = 0x1 87 | BinLogTypeCommand uint8 = 0x2 88 | ) 89 | 90 | const ( 91 | DBAutoCommit uint8 = 0x0 92 | DBInTransaction uint8 = 0x1 93 | DBInMulti uint8 = 0x2 94 | ) 95 | 96 | var ( 97 | Version = "0.1" 98 | ) 99 | -------------------------------------------------------------------------------- /nodb_test.go: -------------------------------------------------------------------------------- 1 | package nodb 2 | 3 | import ( 4 | "os" 5 | "sync" 6 | "testing" 7 | 8 | "github.com/lunny/nodb/config" 9 | ) 10 | 11 | var testLedis *Nodb 12 | var testLedisOnce sync.Once 13 | 14 | func getTestDB() *DB { 15 | f := func() { 16 | cfg := new(config.Config) 17 | cfg.DataDir = "/tmp/test_ledis" 18 | // cfg.BinLog.MaxFileSize = 1073741824 19 | // cfg.BinLog.MaxFileNum = 3 20 | 21 | os.RemoveAll(cfg.DataDir) 22 | 23 | var err error 24 | testLedis, err = Open(cfg) 25 | if err != nil { 26 | println(err.Error()) 27 | panic(err) 28 | } 29 | } 30 | 31 | testLedisOnce.Do(f) 32 | db, _ := testLedis.Select(0) 33 | return db 34 | } 35 | 36 | func TestDB(t *testing.T) { 37 | getTestDB() 38 | } 39 | 40 | func TestSelect(t *testing.T) { 41 | db0, _ := testLedis.Select(0) 42 | db1, _ := testLedis.Select(1) 43 | 44 | key0 := []byte("db0_test_key") 45 | key1 := []byte("db1_test_key") 46 | 47 | db0.Set(key0, []byte("0")) 48 | db1.Set(key1, []byte("1")) 49 | 50 | if v, err := db0.Get(key0); err != nil { 51 | t.Fatal(err) 52 | } else if string(v) != "0" { 53 | t.Fatal(string(v)) 54 | } 55 | 56 | if v, err := db1.Get(key1); err != nil { 57 | t.Fatal(err) 58 | } else if string(v) != "1" { 59 | t.Fatal(string(v)) 60 | } 61 | } 62 | 63 | func TestFlush(t *testing.T) { 64 | db0, _ := testLedis.Select(0) 65 | db1, _ := testLedis.Select(1) 66 | 67 | db0.Set([]byte("a"), []byte("1")) 68 | db0.ZAdd([]byte("zset_0"), ScorePair{int64(1), []byte("ma")}) 69 | db0.ZAdd([]byte("zset_0"), ScorePair{int64(2), []byte("mb")}) 70 | 71 | db1.Set([]byte("b"), []byte("2")) 72 | db1.LPush([]byte("lst"), []byte("a1"), []byte("b2")) 73 | db1.ZAdd([]byte("zset_0"), ScorePair{int64(3), []byte("mc")}) 74 | 75 | db1.FlushAll() 76 | 77 | // 0 - existing 78 | if exists, _ := db0.Exists([]byte("a")); exists <= 0 { 79 | t.Fatal(false) 80 | } 81 | 82 | if zcnt, _ := db0.ZCard([]byte("zset_0")); zcnt != 2 { 83 | t.Fatal(zcnt) 84 | } 85 | 86 | // 1 - deleted 87 | if exists, _ := db1.Exists([]byte("b")); exists > 0 { 88 | t.Fatal(false) 89 | } 90 | 91 | if llen, _ := db1.LLen([]byte("lst")); llen > 0 { 92 | t.Fatal(llen) 93 | } 94 | 95 | if zcnt, _ := db1.ZCard([]byte("zset_1")); zcnt > 0 { 96 | t.Fatal(zcnt) 97 | } 98 | } 99 | -------------------------------------------------------------------------------- /tx.go: -------------------------------------------------------------------------------- 1 | package nodb 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | 7 | "github.com/lunny/nodb/store" 8 | ) 9 | 10 | var ( 11 | ErrNestTx = errors.New("nest transaction not supported") 12 | ErrTxDone = errors.New("Transaction has already been committed or rolled back") 13 | ) 14 | 15 | type Tx struct { 16 | *DB 17 | 18 | tx *store.Tx 19 | 20 | logs [][]byte 21 | } 22 | 23 | func (db *DB) IsTransaction() bool { 24 | return db.status == DBInTransaction 25 | } 26 | 27 | // Begin a transaction, it will block all other write operations before calling Commit or Rollback. 28 | // You must be very careful to prevent long-time transaction. 29 | func (db *DB) Begin() (*Tx, error) { 30 | if db.IsTransaction() { 31 | return nil, ErrNestTx 32 | } 33 | 34 | tx := new(Tx) 35 | 36 | tx.DB = new(DB) 37 | tx.DB.l = db.l 38 | 39 | tx.l.wLock.Lock() 40 | 41 | tx.DB.sdb = db.sdb 42 | 43 | var err error 44 | tx.tx, err = db.sdb.Begin() 45 | if err != nil { 46 | tx.l.wLock.Unlock() 47 | return nil, err 48 | } 49 | 50 | tx.DB.bucket = tx.tx 51 | 52 | tx.DB.status = DBInTransaction 53 | 54 | tx.DB.index = db.index 55 | 56 | tx.DB.kvBatch = tx.newBatch() 57 | tx.DB.listBatch = tx.newBatch() 58 | tx.DB.hashBatch = tx.newBatch() 59 | tx.DB.zsetBatch = tx.newBatch() 60 | tx.DB.binBatch = tx.newBatch() 61 | tx.DB.setBatch = tx.newBatch() 62 | 63 | return tx, nil 64 | } 65 | 66 | func (tx *Tx) Commit() error { 67 | if tx.tx == nil { 68 | return ErrTxDone 69 | } 70 | 71 | tx.l.commitLock.Lock() 72 | err := tx.tx.Commit() 73 | tx.tx = nil 74 | 75 | if len(tx.logs) > 0 { 76 | tx.l.binlog.Log(tx.logs...) 77 | } 78 | 79 | tx.l.commitLock.Unlock() 80 | 81 | tx.l.wLock.Unlock() 82 | 83 | tx.DB.bucket = nil 84 | 85 | return err 86 | } 87 | 88 | func (tx *Tx) Rollback() error { 89 | if tx.tx == nil { 90 | return ErrTxDone 91 | } 92 | 93 | err := tx.tx.Rollback() 94 | tx.tx = nil 95 | 96 | tx.l.wLock.Unlock() 97 | tx.DB.bucket = nil 98 | 99 | return err 100 | } 101 | 102 | func (tx *Tx) newBatch() *batch { 103 | return tx.l.newBatch(tx.tx.NewWriteBatch(), &txBatchLocker{}, tx) 104 | } 105 | 106 | func (tx *Tx) Select(index int) error { 107 | if index < 0 || index >= int(MaxDBNumber) { 108 | return fmt.Errorf("invalid db index %d", index) 109 | } 110 | 111 | tx.DB.index = uint8(index) 112 | return nil 113 | } 114 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # NoDB 2 | 3 | [中文](https://github.com/lunny/nodb/blob/master/README_CN.md) 4 | 5 | Nodb is a fork of [ledisdb](https://github.com/siddontang/ledisdb) and shrink version. It's get rid of all C or other language codes and only keep Go's. It aims to provide a nosql database library rather than a redis like server. So if you want a redis like server, ledisdb is the best choose. 6 | 7 | Nodb is a pure Go and high performance NoSQL database library. It supports some data structure like kv, list, hash, zset, bitmap, set. 8 | 9 | Nodb now use [goleveldb](https://github.com/syndtr/goleveldb) as backend to store data. 10 | 11 | ## Features 12 | 13 | + Rich data structure: KV, List, Hash, ZSet, Bitmap, Set. 14 | + Stores lots of data, over the memory limit. 15 | + Supports expiration and ttl. 16 | + Easy to embed in your own Go application. 17 | 18 | ## Install 19 | 20 | go get github.com/lunny/nodb 21 | 22 | ## Package Example 23 | 24 | ### Open And Select database 25 | ```go 26 | import( 27 | "github.com/lunny/nodb" 28 | "github.com/lunny/nodb/config" 29 | ) 30 | 31 | cfg := new(config.Config) 32 | cfg.DataDir = "./" 33 | dbs, err := nodb.Open(cfg) 34 | if err != nil { 35 | fmt.Printf("nodb: error opening db: %v", err) 36 | } 37 | 38 | db, _ := dbs.Select(0) 39 | ``` 40 | ### KV 41 | 42 | KV is the most basic nodb type like any other key-value database. 43 | ```go 44 | err := db.Set(key, value) 45 | value, err := db.Get(key) 46 | ``` 47 | ### List 48 | 49 | List is simply lists of values, sorted by insertion order. 50 | You can push or pop value on the list head (left) or tail (right). 51 | ```go 52 | err := db.LPush(key, value1) 53 | err := db.RPush(key, value2) 54 | value1, err := db.LPop(key) 55 | value2, err := db.RPop(key) 56 | ``` 57 | ### Hash 58 | 59 | Hash is a map between fields and values. 60 | ```go 61 | n, err := db.HSet(key, field1, value1) 62 | n, err := db.HSet(key, field2, value2) 63 | value1, err := db.HGet(key, field1) 64 | value2, err := db.HGet(key, field2) 65 | ``` 66 | ### ZSet 67 | 68 | ZSet is a sorted collections of values. 69 | Every member of zset is associated with score, a int64 value which used to sort, from smallest to greatest score. 70 | Members are unique, but score may be same. 71 | ```go 72 | n, err := db.ZAdd(key, ScorePair{score1, member1}, ScorePair{score2, member2}) 73 | ay, err := db.ZRangeByScore(key, minScore, maxScore, 0, -1) 74 | ``` 75 | ## Links 76 | 77 | + [Ledisdb Official Website](http://ledisdb.com) 78 | + [GoDoc](https://godoc.org/github.com/lunny/nodb) 79 | + [GoWalker](https://gowalker.org/github.com/lunny/nodb) 80 | 81 | 82 | ## Thanks 83 | 84 | Gmail: siddontang@gmail.com 85 | -------------------------------------------------------------------------------- /util.go: -------------------------------------------------------------------------------- 1 | package nodb 2 | 3 | import ( 4 | "encoding/binary" 5 | "errors" 6 | "reflect" 7 | "strconv" 8 | "unsafe" 9 | ) 10 | 11 | var errIntNumber = errors.New("invalid integer") 12 | 13 | // no copy to change slice to string 14 | // use your own risk 15 | func String(b []byte) (s string) { 16 | pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&b)) 17 | pstring := (*reflect.StringHeader)(unsafe.Pointer(&s)) 18 | pstring.Data = pbytes.Data 19 | pstring.Len = pbytes.Len 20 | return 21 | } 22 | 23 | // no copy to change string to slice 24 | // use your own risk 25 | func Slice(s string) (b []byte) { 26 | pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&b)) 27 | pstring := (*reflect.StringHeader)(unsafe.Pointer(&s)) 28 | pbytes.Data = pstring.Data 29 | pbytes.Len = pstring.Len 30 | pbytes.Cap = pstring.Len 31 | return 32 | } 33 | 34 | func Int64(v []byte, err error) (int64, error) { 35 | if err != nil { 36 | return 0, err 37 | } else if v == nil || len(v) == 0 { 38 | return 0, nil 39 | } else if len(v) != 8 { 40 | return 0, errIntNumber 41 | } 42 | 43 | return int64(binary.LittleEndian.Uint64(v)), nil 44 | } 45 | 46 | func PutInt64(v int64) []byte { 47 | var b []byte 48 | pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&b)) 49 | pbytes.Data = uintptr(unsafe.Pointer(&v)) 50 | pbytes.Len = 8 51 | pbytes.Cap = 8 52 | return b 53 | } 54 | 55 | func StrInt64(v []byte, err error) (int64, error) { 56 | if err != nil { 57 | return 0, err 58 | } else if v == nil { 59 | return 0, nil 60 | } else { 61 | return strconv.ParseInt(String(v), 10, 64) 62 | } 63 | } 64 | 65 | func StrInt32(v []byte, err error) (int32, error) { 66 | if err != nil { 67 | return 0, err 68 | } else if v == nil { 69 | return 0, nil 70 | } else { 71 | res, err := strconv.ParseInt(String(v), 10, 32) 72 | return int32(res), err 73 | } 74 | } 75 | 76 | func StrInt8(v []byte, err error) (int8, error) { 77 | if err != nil { 78 | return 0, err 79 | } else if v == nil { 80 | return 0, nil 81 | } else { 82 | res, err := strconv.ParseInt(String(v), 10, 8) 83 | return int8(res), err 84 | } 85 | } 86 | 87 | func StrPutInt64(v int64) []byte { 88 | return strconv.AppendInt(nil, v, 10) 89 | } 90 | 91 | func MinUInt32(a uint32, b uint32) uint32 { 92 | if a > b { 93 | return b 94 | } else { 95 | return a 96 | } 97 | } 98 | 99 | func MaxUInt32(a uint32, b uint32) uint32 { 100 | if a > b { 101 | return a 102 | } else { 103 | return b 104 | } 105 | } 106 | 107 | func MaxInt32(a int32, b int32) int32 { 108 | if a > b { 109 | return a 110 | } else { 111 | return b 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /t_kv_test.go: -------------------------------------------------------------------------------- 1 | package nodb 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | ) 7 | 8 | func TestKVCodec(t *testing.T) { 9 | db := getTestDB() 10 | 11 | ek := db.encodeKVKey([]byte("key")) 12 | 13 | if k, err := db.decodeKVKey(ek); err != nil { 14 | t.Fatal(err) 15 | } else if string(k) != "key" { 16 | t.Fatal(string(k)) 17 | } 18 | } 19 | 20 | func TestDBKV(t *testing.T) { 21 | db := getTestDB() 22 | 23 | key1 := []byte("testdb_kv_a") 24 | 25 | if err := db.Set(key1, []byte("hello world 1")); err != nil { 26 | t.Fatal(err) 27 | } 28 | 29 | key2 := []byte("testdb_kv_b") 30 | 31 | if err := db.Set(key2, []byte("hello world 2")); err != nil { 32 | t.Fatal(err) 33 | } 34 | 35 | ay, _ := db.MGet(key1, key2) 36 | 37 | if v1 := ay[0]; string(v1) != "hello world 1" { 38 | t.Fatal(string(v1)) 39 | } 40 | 41 | if v2 := ay[1]; string(v2) != "hello world 2" { 42 | t.Fatal(string(v2)) 43 | } 44 | 45 | } 46 | 47 | func TestKVPersist(t *testing.T) { 48 | db := getTestDB() 49 | 50 | key := []byte("persist") 51 | db.Set(key, []byte{}) 52 | 53 | if n, err := db.Persist(key); err != nil { 54 | t.Fatal(err) 55 | } else if n != 0 { 56 | t.Fatal(n) 57 | } 58 | 59 | if _, err := db.Expire(key, 10); err != nil { 60 | t.Fatal(err) 61 | } 62 | 63 | if n, err := db.Persist(key); err != nil { 64 | t.Fatal(err) 65 | } else if n != 1 { 66 | t.Fatal(n) 67 | } 68 | } 69 | func TestKVFlush(t *testing.T) { 70 | db := getTestDB() 71 | db.FlushAll() 72 | 73 | for i := 0; i < 2000; i++ { 74 | key := fmt.Sprintf("%d", i) 75 | if err := db.Set([]byte(key), []byte("v")); err != nil { 76 | t.Fatal(err.Error()) 77 | } 78 | } 79 | 80 | if v, err := db.Scan(nil, 3000, true, ""); err != nil { 81 | t.Fatal(err.Error()) 82 | } else if len(v) != 2000 { 83 | t.Fatal("invalid value ", len(v)) 84 | } 85 | 86 | for i := 0; i < 2000; i++ { 87 | key := fmt.Sprintf("%d", i) 88 | if v, err := db.Get([]byte(key)); err != nil { 89 | t.Fatal(err.Error()) 90 | } else if string(v) != "v" { 91 | t.Fatal("invalid value ", v) 92 | } 93 | } 94 | 95 | if n, err := db.flush(); err != nil { 96 | t.Fatal(err.Error()) 97 | } else if n != 2000 { 98 | t.Fatal("invalid value ", n) 99 | } 100 | 101 | if v, err := db.Scan(nil, 3000, true, ""); err != nil { 102 | t.Fatal(err.Error()) 103 | } else if len(v) != 0 { 104 | t.Fatal("invalid value length ", len(v)) 105 | } 106 | 107 | for i := 0; i < 2000; i++ { 108 | 109 | key := []byte(fmt.Sprintf("%d", i)) 110 | 111 | if v, err := db.Get(key); err != nil { 112 | t.Fatal(err.Error()) 113 | } else if v != nil { 114 | 115 | t.Fatal("invalid value ", v) 116 | } 117 | } 118 | } 119 | -------------------------------------------------------------------------------- /nodb.go: -------------------------------------------------------------------------------- 1 | package nodb 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | "time" 7 | 8 | "github.com/lunny/log" 9 | "github.com/lunny/nodb/config" 10 | "github.com/lunny/nodb/store" 11 | ) 12 | 13 | type Nodb struct { 14 | cfg *config.Config 15 | 16 | ldb *store.DB 17 | dbs [MaxDBNumber]*DB 18 | 19 | quit chan struct{} 20 | jobs *sync.WaitGroup 21 | 22 | binlog *BinLog 23 | 24 | wLock sync.RWMutex //allow one write at same time 25 | commitLock sync.Mutex //allow one write commit at same time 26 | } 27 | 28 | func Open(cfg *config.Config) (*Nodb, error) { 29 | if len(cfg.DataDir) == 0 { 30 | cfg.DataDir = config.DefaultDataDir 31 | } 32 | 33 | ldb, err := store.Open(cfg) 34 | if err != nil { 35 | return nil, err 36 | } 37 | 38 | l := new(Nodb) 39 | 40 | l.quit = make(chan struct{}) 41 | l.jobs = new(sync.WaitGroup) 42 | 43 | l.ldb = ldb 44 | 45 | if cfg.BinLog.MaxFileNum > 0 && cfg.BinLog.MaxFileSize > 0 { 46 | l.binlog, err = NewBinLog(cfg) 47 | if err != nil { 48 | return nil, err 49 | } 50 | } else { 51 | l.binlog = nil 52 | } 53 | 54 | for i := uint8(0); i < MaxDBNumber; i++ { 55 | l.dbs[i] = l.newDB(i) 56 | } 57 | 58 | l.activeExpireCycle() 59 | 60 | return l, nil 61 | } 62 | 63 | func (l *Nodb) Close() { 64 | close(l.quit) 65 | l.jobs.Wait() 66 | 67 | l.ldb.Close() 68 | 69 | if l.binlog != nil { 70 | l.binlog.Close() 71 | l.binlog = nil 72 | } 73 | } 74 | 75 | func (l *Nodb) Select(index int) (*DB, error) { 76 | if index < 0 || index >= int(MaxDBNumber) { 77 | return nil, fmt.Errorf("invalid db index %d", index) 78 | } 79 | 80 | return l.dbs[index], nil 81 | } 82 | 83 | func (l *Nodb) FlushAll() error { 84 | for index, db := range l.dbs { 85 | if _, err := db.FlushAll(); err != nil { 86 | log.Error("flush db %d error %s", index, err.Error()) 87 | } 88 | } 89 | 90 | return nil 91 | } 92 | 93 | // very dangerous to use 94 | func (l *Nodb) DataDB() *store.DB { 95 | return l.ldb 96 | } 97 | 98 | func (l *Nodb) activeExpireCycle() { 99 | var executors []*elimination = make([]*elimination, len(l.dbs)) 100 | for i, db := range l.dbs { 101 | executors[i] = db.newEliminator() 102 | } 103 | 104 | l.jobs.Add(1) 105 | go func() { 106 | tick := time.NewTicker(1 * time.Second) 107 | end := false 108 | done := make(chan struct{}) 109 | for !end { 110 | select { 111 | case <-tick.C: 112 | go func() { 113 | for _, eli := range executors { 114 | eli.active() 115 | } 116 | done <- struct{}{} 117 | }() 118 | <-done 119 | case <-l.quit: 120 | end = true 121 | break 122 | } 123 | } 124 | 125 | tick.Stop() 126 | l.jobs.Done() 127 | }() 128 | } 129 | -------------------------------------------------------------------------------- /store/tx_test.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/lunny/nodb/store/driver" 7 | ) 8 | 9 | func TestTx(t *testing.T) { 10 | 11 | } 12 | 13 | func testTx(db *DB, t *testing.T) { 14 | if tx, err := db.Begin(); err != nil { 15 | if err == driver.ErrTxSupport { 16 | return 17 | } else { 18 | t.Fatal(err) 19 | } 20 | } else { 21 | tx.Rollback() 22 | } 23 | 24 | key1 := []byte("1") 25 | key2 := []byte("2") 26 | key3 := []byte("3") 27 | key4 := []byte("4") 28 | 29 | db.Put(key1, []byte("1")) 30 | db.Put(key2, []byte("2")) 31 | 32 | tx, err := db.Begin() 33 | if err != nil { 34 | t.Fatal(err) 35 | } 36 | 37 | if err := tx.Put(key1, []byte("a")); err != nil { 38 | t.Fatal(err) 39 | } 40 | 41 | if err := tx.Put(key2, []byte("b")); err != nil { 42 | t.Fatal(err) 43 | } 44 | 45 | if err := tx.Put(key3, []byte("c")); err != nil { 46 | t.Fatal(err) 47 | } 48 | 49 | if err := tx.Put(key4, []byte("d")); err != nil { 50 | t.Fatal(err) 51 | } 52 | 53 | it := tx.NewIterator() 54 | 55 | it.Seek(key1) 56 | 57 | if !it.Valid() { 58 | t.Fatal("must valid") 59 | } else if string(it.Value()) != "a" { 60 | t.Fatal(string(it.Value())) 61 | } 62 | 63 | it.SeekToFirst() 64 | 65 | if !it.Valid() { 66 | t.Fatal("must valid") 67 | } else if string(it.Value()) != "a" { 68 | t.Fatal(string(it.Value())) 69 | } 70 | 71 | it.Seek(key2) 72 | 73 | if !it.Valid() { 74 | t.Fatal("must valid") 75 | } else if string(it.Value()) != "b" { 76 | t.Fatal(string(it.Value())) 77 | } 78 | 79 | it.Next() 80 | 81 | if !it.Valid() { 82 | t.Fatal("must valid") 83 | } else if string(it.Value()) != "c" { 84 | t.Fatal(string(it.Value())) 85 | } 86 | 87 | it.SeekToLast() 88 | 89 | if !it.Valid() { 90 | t.Fatal("must valid") 91 | } else if string(it.Value()) != "d" { 92 | t.Fatal(string(it.Value())) 93 | } 94 | 95 | it.Close() 96 | 97 | tx.Rollback() 98 | 99 | if v, err := db.Get(key1); err != nil { 100 | t.Fatal(err) 101 | } else if string(v) != "1" { 102 | t.Fatal(string(v)) 103 | } 104 | 105 | tx, err = db.Begin() 106 | if err != nil { 107 | t.Fatal(err) 108 | } 109 | 110 | if err := tx.Put(key1, []byte("a")); err != nil { 111 | t.Fatal(err) 112 | } 113 | 114 | it = tx.NewIterator() 115 | 116 | it.Seek(key2) 117 | 118 | if !it.Valid() { 119 | t.Fatal("must valid") 120 | } else if string(it.Value()) != "2" { 121 | t.Fatal(string(it.Value())) 122 | } 123 | 124 | it.Close() 125 | 126 | if err := tx.Commit(); err != nil { 127 | t.Fatal(err) 128 | } 129 | 130 | if v, err := db.Get(key1); err != nil { 131 | t.Fatal(err) 132 | } else if string(v) != "a" { 133 | t.Fatal(string(v)) 134 | } 135 | } 136 | -------------------------------------------------------------------------------- /t_list_test.go: -------------------------------------------------------------------------------- 1 | package nodb 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | ) 7 | 8 | func TestListCodec(t *testing.T) { 9 | db := getTestDB() 10 | 11 | key := []byte("key") 12 | 13 | ek := db.lEncodeMetaKey(key) 14 | if k, err := db.lDecodeMetaKey(ek); err != nil { 15 | t.Fatal(err) 16 | } else if string(k) != "key" { 17 | t.Fatal(string(k)) 18 | } 19 | 20 | ek = db.lEncodeListKey(key, 1024) 21 | if k, seq, err := db.lDecodeListKey(ek); err != nil { 22 | t.Fatal(err) 23 | } else if string(k) != "key" { 24 | t.Fatal(string(k)) 25 | } else if seq != 1024 { 26 | t.Fatal(seq) 27 | } 28 | } 29 | 30 | func TestDBList(t *testing.T) { 31 | db := getTestDB() 32 | 33 | key := []byte("testdb_list_a") 34 | 35 | if n, err := db.RPush(key, []byte("1"), []byte("2"), []byte("3")); err != nil { 36 | t.Fatal(err) 37 | } else if n != 3 { 38 | t.Fatal(n) 39 | } 40 | 41 | if ay, err := db.LRange(key, 0, -1); err != nil { 42 | t.Fatal(err) 43 | } else if len(ay) != 3 { 44 | t.Fatal(len(ay)) 45 | } else { 46 | for i := range ay { 47 | if ay[i][0] != '1'+byte(i) { 48 | t.Fatal(string(ay[i])) 49 | } 50 | } 51 | } 52 | 53 | if k, err := db.RPop(key); err != nil { 54 | t.Fatal(err) 55 | } else if string(k) != "3" { 56 | t.Fatal(string(k)) 57 | } 58 | 59 | if k, err := db.LPop(key); err != nil { 60 | t.Fatal(err) 61 | } else if string(k) != "1" { 62 | t.Fatal(string(k)) 63 | } 64 | 65 | if llen, err := db.LLen(key); err != nil { 66 | t.Fatal(err) 67 | } else if llen != 1 { 68 | t.Fatal(llen) 69 | } 70 | 71 | if num, err := db.LClear(key); err != nil { 72 | t.Fatal(err) 73 | } else if num != 1 { 74 | t.Fatal(num) 75 | } 76 | 77 | if llen, _ := db.LLen(key); llen != 0 { 78 | t.Fatal(llen) 79 | } 80 | } 81 | 82 | func TestListPersist(t *testing.T) { 83 | db := getTestDB() 84 | 85 | key := []byte("persist") 86 | db.LPush(key, []byte("a")) 87 | 88 | if n, err := db.LPersist(key); err != nil { 89 | t.Fatal(err) 90 | } else if n != 0 { 91 | t.Fatal(n) 92 | } 93 | 94 | if _, err := db.LExpire(key, 10); err != nil { 95 | t.Fatal(err) 96 | } 97 | 98 | if n, err := db.LPersist(key); err != nil { 99 | t.Fatal(err) 100 | } else if n != 1 { 101 | t.Fatal(n) 102 | } 103 | } 104 | 105 | func TestLFlush(t *testing.T) { 106 | db := getTestDB() 107 | db.FlushAll() 108 | 109 | for i := 0; i < 2000; i++ { 110 | key := fmt.Sprintf("%d", i) 111 | if _, err := db.LPush([]byte(key), []byte("v")); err != nil { 112 | t.Fatal(err.Error()) 113 | } 114 | } 115 | 116 | if v, err := db.LScan(nil, 3000, true, ""); err != nil { 117 | t.Fatal(err.Error()) 118 | } else if len(v) != 2000 { 119 | t.Fatal("invalid value ", len(v)) 120 | } 121 | 122 | if n, err := db.lFlush(); err != nil { 123 | t.Fatal(err.Error()) 124 | } else if n != 2000 { 125 | t.Fatal("invalid value ", n) 126 | } 127 | 128 | if v, err := db.LScan(nil, 3000, true, ""); err != nil { 129 | t.Fatal(err.Error()) 130 | } else if len(v) != 0 { 131 | t.Fatal("invalid value length ", len(v)) 132 | } 133 | } 134 | -------------------------------------------------------------------------------- /config/config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "io/ioutil" 5 | 6 | "github.com/BurntSushi/toml" 7 | ) 8 | 9 | type Size int 10 | 11 | const ( 12 | DefaultAddr string = "127.0.0.1:6380" 13 | DefaultHttpAddr string = "127.0.0.1:11181" 14 | 15 | DefaultDBName string = "goleveldb" 16 | 17 | DefaultDataDir string = "./data" 18 | ) 19 | 20 | const ( 21 | MaxBinLogFileSize int = 1024 * 1024 * 1024 22 | MaxBinLogFileNum int = 10000 23 | 24 | DefaultBinLogFileSize int = MaxBinLogFileSize 25 | DefaultBinLogFileNum int = 10 26 | ) 27 | 28 | type LevelDBConfig struct { 29 | Compression bool `toml:"compression"` 30 | BlockSize int `toml:"block_size"` 31 | WriteBufferSize int `toml:"write_buffer_size"` 32 | CacheSize int `toml:"cache_size"` 33 | MaxOpenFiles int `toml:"max_open_files"` 34 | } 35 | 36 | type LMDBConfig struct { 37 | MapSize int `toml:"map_size"` 38 | NoSync bool `toml:"nosync"` 39 | } 40 | 41 | type BinLogConfig struct { 42 | MaxFileSize int `toml:"max_file_size"` 43 | MaxFileNum int `toml:"max_file_num"` 44 | } 45 | 46 | type Config struct { 47 | DataDir string `toml:"data_dir"` 48 | 49 | DBName string `toml:"db_name"` 50 | 51 | LevelDB LevelDBConfig `toml:"leveldb"` 52 | 53 | LMDB LMDBConfig `toml:"lmdb"` 54 | 55 | BinLog BinLogConfig `toml:"binlog"` 56 | 57 | SlaveOf string `toml:"slaveof"` 58 | 59 | AccessLog string `toml:"access_log"` 60 | } 61 | 62 | func NewConfigWithFile(fileName string) (*Config, error) { 63 | data, err := ioutil.ReadFile(fileName) 64 | if err != nil { 65 | return nil, err 66 | } 67 | 68 | return NewConfigWithData(data) 69 | } 70 | 71 | func NewConfigWithData(data []byte) (*Config, error) { 72 | cfg := NewConfigDefault() 73 | 74 | _, err := toml.Decode(string(data), cfg) 75 | if err != nil { 76 | return nil, err 77 | } 78 | 79 | return cfg, nil 80 | } 81 | 82 | func NewConfigDefault() *Config { 83 | cfg := new(Config) 84 | 85 | cfg.DataDir = DefaultDataDir 86 | 87 | cfg.DBName = DefaultDBName 88 | 89 | // disable binlog 90 | cfg.BinLog.MaxFileNum = 0 91 | cfg.BinLog.MaxFileSize = 0 92 | 93 | // disable replication 94 | cfg.SlaveOf = "" 95 | 96 | // disable access log 97 | cfg.AccessLog = "" 98 | 99 | cfg.LMDB.MapSize = 20 * 1024 * 1024 100 | cfg.LMDB.NoSync = true 101 | 102 | return cfg 103 | } 104 | 105 | func (cfg *LevelDBConfig) Adjust() { 106 | if cfg.CacheSize <= 0 { 107 | cfg.CacheSize = 4 * 1024 * 1024 108 | } 109 | 110 | if cfg.BlockSize <= 0 { 111 | cfg.BlockSize = 4 * 1024 112 | } 113 | 114 | if cfg.WriteBufferSize <= 0 { 115 | cfg.WriteBufferSize = 4 * 1024 * 1024 116 | } 117 | 118 | if cfg.MaxOpenFiles < 1024 { 119 | cfg.MaxOpenFiles = 1024 120 | } 121 | } 122 | 123 | func (cfg *BinLogConfig) Adjust() { 124 | if cfg.MaxFileSize <= 0 { 125 | cfg.MaxFileSize = DefaultBinLogFileSize 126 | } else if cfg.MaxFileSize > MaxBinLogFileSize { 127 | cfg.MaxFileSize = MaxBinLogFileSize 128 | } 129 | 130 | if cfg.MaxFileNum <= 0 { 131 | cfg.MaxFileNum = DefaultBinLogFileNum 132 | } else if cfg.MaxFileNum > MaxBinLogFileNum { 133 | cfg.MaxFileNum = MaxBinLogFileNum 134 | } 135 | } 136 | -------------------------------------------------------------------------------- /t_hash_test.go: -------------------------------------------------------------------------------- 1 | package nodb 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | ) 7 | 8 | func TestHashCodec(t *testing.T) { 9 | db := getTestDB() 10 | 11 | key := []byte("key") 12 | field := []byte("field") 13 | 14 | ek := db.hEncodeSizeKey(key) 15 | if k, err := db.hDecodeSizeKey(ek); err != nil { 16 | t.Fatal(err) 17 | } else if string(k) != "key" { 18 | t.Fatal(string(k)) 19 | } 20 | 21 | ek = db.hEncodeHashKey(key, field) 22 | if k, f, err := db.hDecodeHashKey(ek); err != nil { 23 | t.Fatal(err) 24 | } else if string(k) != "key" { 25 | t.Fatal(string(k)) 26 | } else if string(f) != "field" { 27 | t.Fatal(string(f)) 28 | } 29 | } 30 | 31 | func TestDBHash(t *testing.T) { 32 | db := getTestDB() 33 | 34 | key := []byte("testdb_hash_a") 35 | 36 | if n, err := db.HSet(key, []byte("a"), []byte("hello world 1")); err != nil { 37 | t.Fatal(err) 38 | } else if n != 1 { 39 | t.Fatal(n) 40 | } 41 | 42 | if n, err := db.HSet(key, []byte("b"), []byte("hello world 2")); err != nil { 43 | t.Fatal(err) 44 | } else if n != 1 { 45 | t.Fatal(n) 46 | } 47 | 48 | ay, _ := db.HMget(key, []byte("a"), []byte("b")) 49 | 50 | if v1 := ay[0]; string(v1) != "hello world 1" { 51 | t.Fatal(string(v1)) 52 | } 53 | 54 | if v2 := ay[1]; string(v2) != "hello world 2" { 55 | t.Fatal(string(v2)) 56 | } 57 | 58 | } 59 | 60 | func TestHashPersist(t *testing.T) { 61 | db := getTestDB() 62 | 63 | key := []byte("persist") 64 | db.HSet(key, []byte("field"), []byte{}) 65 | 66 | if n, err := db.HPersist(key); err != nil { 67 | t.Fatal(err) 68 | } else if n != 0 { 69 | t.Fatal(n) 70 | } 71 | 72 | if _, err := db.HExpire(key, 10); err != nil { 73 | t.Fatal(err) 74 | } 75 | 76 | if n, err := db.HPersist(key); err != nil { 77 | t.Fatal(err) 78 | } else if n != 1 { 79 | t.Fatal(n) 80 | } 81 | } 82 | 83 | func TestHFlush(t *testing.T) { 84 | db := getTestDB() 85 | db.FlushAll() 86 | 87 | for i := 0; i < 2000; i++ { 88 | key := fmt.Sprintf("%d", i) 89 | if _, err := db.HSet([]byte(key), []byte("f"), []byte("v")); err != nil { 90 | t.Fatal(err.Error()) 91 | } 92 | } 93 | 94 | if v, err := db.HScan(nil, 3000, true, ""); err != nil { 95 | t.Fatal(err.Error()) 96 | } else if len(v) != 2000 { 97 | t.Fatal("invalid value ", len(v)) 98 | } 99 | 100 | for i := 0; i < 2000; i++ { 101 | key := fmt.Sprintf("%d", i) 102 | if v, err := db.HGet([]byte(key), []byte("f")); err != nil { 103 | t.Fatal(err.Error()) 104 | } else if string(v) != "v" { 105 | t.Fatal("invalid value ", v) 106 | } 107 | } 108 | 109 | if n, err := db.hFlush(); err != nil { 110 | t.Fatal(err.Error()) 111 | } else if n != 2000 { 112 | t.Fatal("invalid value ", n) 113 | } 114 | 115 | if v, err := db.HScan(nil, 3000, true, ""); err != nil { 116 | t.Fatal(err.Error()) 117 | } else if len(v) != 0 { 118 | t.Fatal("invalid value length ", len(v)) 119 | } 120 | 121 | for i := 0; i < 2000; i++ { 122 | 123 | key := []byte(fmt.Sprintf("%d", i)) 124 | 125 | if v, err := db.HGet(key, []byte("f")); err != nil { 126 | t.Fatal(err.Error()) 127 | } else if v != nil { 128 | 129 | t.Fatal("invalid value ", v) 130 | } 131 | } 132 | 133 | } 134 | -------------------------------------------------------------------------------- /tools/redis_import/test.py: -------------------------------------------------------------------------------- 1 | #coding: utf-8 2 | 3 | import random, string 4 | 5 | import redis 6 | import ledis 7 | 8 | from redis_import import copy, scan, set_ttl 9 | 10 | rds = redis.Redis() 11 | lds = ledis.Ledis(port=6380) 12 | 13 | 14 | def random_word(words, length): 15 | return ''.join(random.choice(words) for i in range(length)) 16 | 17 | 18 | def get_words(): 19 | word_file = "/usr/share/dict/words" 20 | words = open(word_file).read().splitlines() 21 | return words[:1000] 22 | 23 | 24 | def get_mapping(words, length=1000): 25 | d = {} 26 | for word in words: 27 | d[word] = random.randint(1, length) 28 | return d 29 | 30 | 31 | def random_string(client, words, length=1000): 32 | d = get_mapping(words, length) 33 | client.mset(d) 34 | 35 | 36 | def random_hash(client, words, length=1000): 37 | d = get_mapping(words, length) 38 | client.hmset("hashName", d) 39 | 40 | 41 | def random_list(client, words, length=1000): 42 | client.lpush("listName", *words) 43 | 44 | 45 | def random_zset(client, words, length=1000): 46 | d = get_mapping(words, length) 47 | client.zadd("zsetName", **d) 48 | 49 | 50 | def test(): 51 | words = get_words() 52 | print "Flush all redis data before insert new." 53 | rds.flushall() 54 | 55 | random_string(rds, words) 56 | print "random_string done" 57 | 58 | random_hash(rds, words) 59 | print "random_hash done" 60 | 61 | random_list(rds, words) 62 | print "random_list done" 63 | 64 | random_zset(rds, words) 65 | print "random_zset done" 66 | 67 | 68 | lds.lclear("listName") 69 | lds.hclear("hashName") 70 | lds.zclear("zsetName") 71 | copy(rds, lds, convert=True) 72 | 73 | # for all keys 74 | keys = scan(rds, 1000) 75 | for key in keys: 76 | if rds.type(key) == "string" and not lds.exists(key): 77 | print key 78 | print "String data not consistent" 79 | 80 | # for list 81 | l1 = rds.lrange("listName", 0, -1) 82 | l2 = lds.lrange("listName", 0, -1) 83 | assert l1 == l2 84 | 85 | #for hash 86 | for key in keys: 87 | if rds.type(key) == "hash": 88 | assert rds.hgetall(key) == lds.hgetall(key) 89 | assert sorted(rds.hkeys(key)) == sorted(lds.hkeys(key)) 90 | assert sorted(rds.hvals(key)) == sorted(lds.hvals(key)) 91 | 92 | # for zset 93 | z1 = rds.zrange("zsetName", 0, -1, withscores=True) 94 | z2 = lds.zrange("zsetName", 0, -1, withscores=True) 95 | assert z1 == z2 96 | 97 | 98 | def ledis_ttl(ledis_client, key, k_type): 99 | ttls = { 100 | "string": lds.ttl, 101 | "list": lds.lttl, 102 | "hash": lds.httl, 103 | "zset": lds.zttl, 104 | } 105 | return ttls[k_type](key) 106 | 107 | 108 | def test_ttl(): 109 | keys, total = scan(rds, 1000) 110 | for key in keys: 111 | k_type = rds.type(key) 112 | rds.expire(key, (60 * 60 * 24)) 113 | set_ttl(rds, lds, key, k_type) 114 | if rds.ttl(key): 115 | assert ledis_ttl(lds, key, k_type) > 0 116 | 117 | if __name__ == "__main__": 118 | test() 119 | test_ttl() 120 | print "Test passed." 121 | -------------------------------------------------------------------------------- /replication_test.go: -------------------------------------------------------------------------------- 1 | package nodb 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "os" 7 | "path" 8 | "testing" 9 | 10 | "github.com/lunny/nodb/config" 11 | "github.com/lunny/nodb/store" 12 | ) 13 | 14 | func checkLedisEqual(master *Nodb, slave *Nodb) error { 15 | it := master.ldb.RangeLimitIterator(nil, nil, store.RangeClose, 0, -1) 16 | for ; it.Valid(); it.Next() { 17 | key := it.Key() 18 | value := it.Value() 19 | 20 | if v, err := slave.ldb.Get(key); err != nil { 21 | return err 22 | } else if !bytes.Equal(v, value) { 23 | return fmt.Errorf("replication error %d != %d", len(v), len(value)) 24 | } 25 | } 26 | 27 | return nil 28 | } 29 | 30 | func TestReplication(t *testing.T) { 31 | var master *Nodb 32 | var slave *Nodb 33 | var err error 34 | 35 | cfgM := new(config.Config) 36 | cfgM.DataDir = "/tmp/test_repl/master" 37 | 38 | cfgM.BinLog.MaxFileNum = 10 39 | cfgM.BinLog.MaxFileSize = 50 40 | 41 | os.RemoveAll(cfgM.DataDir) 42 | 43 | master, err = Open(cfgM) 44 | if err != nil { 45 | t.Fatal(err) 46 | } 47 | 48 | cfgS := new(config.Config) 49 | cfgS.DataDir = "/tmp/test_repl/slave" 50 | 51 | os.RemoveAll(cfgS.DataDir) 52 | 53 | slave, err = Open(cfgS) 54 | if err != nil { 55 | t.Fatal(err) 56 | } 57 | 58 | db, _ := master.Select(0) 59 | db.Set([]byte("a"), []byte("value")) 60 | db.Set([]byte("b"), []byte("value")) 61 | db.Set([]byte("c"), []byte("value")) 62 | 63 | if tx, err := db.Begin(); err == nil { 64 | tx.HSet([]byte("a"), []byte("1"), []byte("value")) 65 | tx.HSet([]byte("b"), []byte("2"), []byte("value")) 66 | tx.HSet([]byte("c"), []byte("3"), []byte("value")) 67 | tx.Commit() 68 | } else { 69 | db.HSet([]byte("a"), []byte("1"), []byte("value")) 70 | db.HSet([]byte("b"), []byte("2"), []byte("value")) 71 | db.HSet([]byte("c"), []byte("3"), []byte("value")) 72 | } 73 | 74 | m, _ := db.Multi() 75 | m.Set([]byte("a1"), []byte("value")) 76 | m.Set([]byte("b1"), []byte("value")) 77 | m.Set([]byte("c1"), []byte("value")) 78 | m.Close() 79 | 80 | for _, name := range master.binlog.LogNames() { 81 | p := path.Join(master.binlog.LogPath(), name) 82 | 83 | err = slave.ReplicateFromBinLog(p) 84 | if err != nil { 85 | t.Fatal(err) 86 | } 87 | } 88 | 89 | if err = checkLedisEqual(master, slave); err != nil { 90 | t.Fatal(err) 91 | } 92 | 93 | slave.FlushAll() 94 | 95 | db.Set([]byte("a1"), []byte("value")) 96 | db.Set([]byte("b1"), []byte("value")) 97 | db.Set([]byte("c1"), []byte("value")) 98 | 99 | db.HSet([]byte("a1"), []byte("1"), []byte("value")) 100 | db.HSet([]byte("b1"), []byte("2"), []byte("value")) 101 | db.HSet([]byte("c1"), []byte("3"), []byte("value")) 102 | 103 | if tx, err := db.Begin(); err == nil { 104 | tx.HSet([]byte("a1"), []byte("1"), []byte("value1")) 105 | tx.HSet([]byte("b1"), []byte("2"), []byte("value1")) 106 | tx.HSet([]byte("c1"), []byte("3"), []byte("value1")) 107 | tx.Rollback() 108 | } 109 | 110 | info := new(BinLogAnchor) 111 | info.LogFileIndex = 1 112 | info.LogPos = 0 113 | var buf bytes.Buffer 114 | var n int 115 | 116 | for { 117 | buf.Reset() 118 | n, err = master.ReadEventsTo(info, &buf) 119 | if err != nil { 120 | t.Fatal(err) 121 | } else if info.LogFileIndex == -1 { 122 | t.Fatal("invalid log file index -1") 123 | } else if info.LogFileIndex == 0 { 124 | t.Fatal("invalid log file index 0") 125 | } else { 126 | if err = slave.ReplicateFromReader(&buf); err != nil { 127 | t.Fatal(err) 128 | } 129 | if n == 0 { 130 | break 131 | } 132 | } 133 | } 134 | 135 | if err = checkLedisEqual(master, slave); err != nil { 136 | t.Fatal(err) 137 | } 138 | } 139 | -------------------------------------------------------------------------------- /scan.go: -------------------------------------------------------------------------------- 1 | package nodb 2 | 3 | import ( 4 | "bytes" 5 | "errors" 6 | "regexp" 7 | 8 | "github.com/lunny/nodb/store" 9 | ) 10 | 11 | var errDataType = errors.New("error data type") 12 | var errMetaKey = errors.New("error meta key") 13 | 14 | // Seek search the prefix key 15 | func (db *DB) Seek(key []byte) (*store.Iterator, error) { 16 | return db.seek(KVType, key) 17 | } 18 | 19 | func (db *DB) seek(dataType byte, key []byte) (*store.Iterator, error) { 20 | var minKey []byte 21 | var err error 22 | 23 | if len(key) > 0 { 24 | if err = checkKeySize(key); err != nil { 25 | return nil, err 26 | } 27 | if minKey, err = db.encodeMetaKey(dataType, key); err != nil { 28 | return nil, err 29 | } 30 | 31 | } else { 32 | if minKey, err = db.encodeMinKey(dataType); err != nil { 33 | return nil, err 34 | } 35 | } 36 | 37 | it := db.bucket.NewIterator() 38 | it.Seek(minKey) 39 | return it, nil 40 | } 41 | 42 | func (db *DB) MaxKey() ([]byte, error) { 43 | return db.encodeMaxKey(KVType) 44 | } 45 | 46 | func (db *DB) Key(it *store.Iterator) ([]byte, error) { 47 | return db.decodeMetaKey(KVType, it.Key()) 48 | } 49 | 50 | func (db *DB) scan(dataType byte, key []byte, count int, inclusive bool, match string) ([][]byte, error) { 51 | var minKey, maxKey []byte 52 | var err error 53 | var r *regexp.Regexp 54 | 55 | if len(match) > 0 { 56 | if r, err = regexp.Compile(match); err != nil { 57 | return nil, err 58 | } 59 | } 60 | 61 | if len(key) > 0 { 62 | if err = checkKeySize(key); err != nil { 63 | return nil, err 64 | } 65 | if minKey, err = db.encodeMetaKey(dataType, key); err != nil { 66 | return nil, err 67 | } 68 | 69 | } else { 70 | if minKey, err = db.encodeMinKey(dataType); err != nil { 71 | return nil, err 72 | } 73 | } 74 | 75 | if maxKey, err = db.encodeMaxKey(dataType); err != nil { 76 | return nil, err 77 | } 78 | 79 | if count <= 0 { 80 | count = defaultScanCount 81 | } 82 | 83 | v := make([][]byte, 0, count) 84 | 85 | it := db.bucket.NewIterator() 86 | it.Seek(minKey) 87 | 88 | if !inclusive { 89 | if it.Valid() && bytes.Equal(it.RawKey(), minKey) { 90 | it.Next() 91 | } 92 | } 93 | 94 | for i := 0; it.Valid() && i < count && bytes.Compare(it.RawKey(), maxKey) < 0; it.Next() { 95 | if k, err := db.decodeMetaKey(dataType, it.Key()); err != nil { 96 | continue 97 | } else if r != nil && !r.Match(k) { 98 | continue 99 | } else { 100 | v = append(v, k) 101 | i++ 102 | } 103 | } 104 | it.Close() 105 | return v, nil 106 | } 107 | 108 | func (db *DB) encodeMinKey(dataType byte) ([]byte, error) { 109 | return db.encodeMetaKey(dataType, nil) 110 | } 111 | 112 | func (db *DB) encodeMaxKey(dataType byte) ([]byte, error) { 113 | k, err := db.encodeMetaKey(dataType, nil) 114 | if err != nil { 115 | return nil, err 116 | } 117 | k[len(k)-1] = dataType + 1 118 | return k, nil 119 | } 120 | 121 | func (db *DB) encodeMetaKey(dataType byte, key []byte) ([]byte, error) { 122 | switch dataType { 123 | case KVType: 124 | return db.encodeKVKey(key), nil 125 | case LMetaType: 126 | return db.lEncodeMetaKey(key), nil 127 | case HSizeType: 128 | return db.hEncodeSizeKey(key), nil 129 | case ZSizeType: 130 | return db.zEncodeSizeKey(key), nil 131 | case BitMetaType: 132 | return db.bEncodeMetaKey(key), nil 133 | case SSizeType: 134 | return db.sEncodeSizeKey(key), nil 135 | default: 136 | return nil, errDataType 137 | } 138 | } 139 | func (db *DB) decodeMetaKey(dataType byte, ek []byte) ([]byte, error) { 140 | if len(ek) < 2 || ek[0] != db.index || ek[1] != dataType { 141 | return nil, errMetaKey 142 | } 143 | return ek[2:], nil 144 | } 145 | -------------------------------------------------------------------------------- /tx_test.go: -------------------------------------------------------------------------------- 1 | package nodb 2 | 3 | import "testing" 4 | 5 | func testTxRollback(t *testing.T, db *DB) { 6 | var err error 7 | key1 := []byte("tx_key1") 8 | key2 := []byte("tx_key2") 9 | field2 := []byte("tx_field2") 10 | 11 | err = db.Set(key1, []byte("value")) 12 | if err != nil { 13 | t.Fatal(err) 14 | } 15 | 16 | _, err = db.HSet(key2, field2, []byte("value")) 17 | if err != nil { 18 | t.Fatal(err) 19 | } 20 | 21 | var tx *Tx 22 | tx, err = db.Begin() 23 | if err != nil { 24 | t.Fatal(err) 25 | } 26 | 27 | defer tx.Rollback() 28 | 29 | err = tx.Set(key1, []byte("1")) 30 | 31 | if err != nil { 32 | t.Fatal(err) 33 | } 34 | 35 | _, err = tx.HSet(key2, field2, []byte("2")) 36 | 37 | if err != nil { 38 | t.Fatal(err) 39 | } 40 | 41 | _, err = tx.HSet([]byte("no_key"), field2, []byte("2")) 42 | 43 | if err != nil { 44 | t.Fatal(err) 45 | } 46 | 47 | if v, err := tx.Get(key1); err != nil { 48 | t.Fatal(err) 49 | } else if string(v) != "1" { 50 | t.Fatal(string(v)) 51 | } 52 | 53 | if v, err := tx.HGet(key2, field2); err != nil { 54 | t.Fatal(err) 55 | } else if string(v) != "2" { 56 | t.Fatal(string(v)) 57 | } 58 | 59 | err = tx.Rollback() 60 | if err != nil { 61 | t.Fatal(err) 62 | } 63 | 64 | if v, err := db.Get(key1); err != nil { 65 | t.Fatal(err) 66 | } else if string(v) != "value" { 67 | t.Fatal(string(v)) 68 | } 69 | 70 | if v, err := db.HGet(key2, field2); err != nil { 71 | t.Fatal(err) 72 | } else if string(v) != "value" { 73 | t.Fatal(string(v)) 74 | } 75 | } 76 | 77 | func testTxCommit(t *testing.T, db *DB) { 78 | var err error 79 | key1 := []byte("tx_key1") 80 | key2 := []byte("tx_key2") 81 | field2 := []byte("tx_field2") 82 | 83 | err = db.Set(key1, []byte("value")) 84 | if err != nil { 85 | t.Fatal(err) 86 | } 87 | 88 | _, err = db.HSet(key2, field2, []byte("value")) 89 | if err != nil { 90 | t.Fatal(err) 91 | } 92 | 93 | var tx *Tx 94 | tx, err = db.Begin() 95 | if err != nil { 96 | t.Fatal(err) 97 | } 98 | 99 | defer tx.Rollback() 100 | 101 | err = tx.Set(key1, []byte("1")) 102 | 103 | if err != nil { 104 | t.Fatal(err) 105 | } 106 | 107 | _, err = tx.HSet(key2, field2, []byte("2")) 108 | 109 | if err != nil { 110 | t.Fatal(err) 111 | } 112 | 113 | if v, err := tx.Get(key1); err != nil { 114 | t.Fatal(err) 115 | } else if string(v) != "1" { 116 | t.Fatal(string(v)) 117 | } 118 | 119 | if v, err := tx.HGet(key2, field2); err != nil { 120 | t.Fatal(err) 121 | } else if string(v) != "2" { 122 | t.Fatal(string(v)) 123 | } 124 | 125 | err = tx.Commit() 126 | if err != nil { 127 | t.Fatal(err) 128 | } 129 | 130 | if v, err := db.Get(key1); err != nil { 131 | t.Fatal(err) 132 | } else if string(v) != "1" { 133 | t.Fatal(string(v)) 134 | } 135 | 136 | if v, err := db.HGet(key2, field2); err != nil { 137 | t.Fatal(err) 138 | } else if string(v) != "2" { 139 | t.Fatal(string(v)) 140 | } 141 | } 142 | 143 | func testTxSelect(t *testing.T, db *DB) { 144 | tx, err := db.Begin() 145 | if err != nil { 146 | t.Fatal(err) 147 | } 148 | 149 | defer tx.Rollback() 150 | 151 | tx.Set([]byte("tx_select_1"), []byte("a")) 152 | 153 | tx.Select(1) 154 | 155 | tx.Set([]byte("tx_select_2"), []byte("b")) 156 | 157 | if err = tx.Commit(); err != nil { 158 | t.Fatal(err) 159 | } 160 | 161 | if v, err := db.Get([]byte("tx_select_1")); err != nil { 162 | t.Fatal(err) 163 | } else if string(v) != "a" { 164 | t.Fatal(string(v)) 165 | } 166 | 167 | if v, err := db.Get([]byte("tx_select_2")); err != nil { 168 | t.Fatal(err) 169 | } else if v != nil { 170 | t.Fatal("must nil") 171 | } 172 | 173 | db, _ = db.l.Select(1) 174 | 175 | if v, err := db.Get([]byte("tx_select_2")); err != nil { 176 | t.Fatal(err) 177 | } else if string(v) != "b" { 178 | t.Fatal(string(v)) 179 | } 180 | 181 | if v, err := db.Get([]byte("tx_select_1")); err != nil { 182 | t.Fatal(err) 183 | } else if v != nil { 184 | t.Fatal("must nil") 185 | } 186 | } 187 | -------------------------------------------------------------------------------- /store/goleveldb/db.go: -------------------------------------------------------------------------------- 1 | package goleveldb 2 | 3 | import ( 4 | "github.com/syndtr/goleveldb/leveldb" 5 | "github.com/syndtr/goleveldb/leveldb/cache" 6 | "github.com/syndtr/goleveldb/leveldb/filter" 7 | "github.com/syndtr/goleveldb/leveldb/opt" 8 | "github.com/syndtr/goleveldb/leveldb/storage" 9 | 10 | "github.com/lunny/nodb/config" 11 | "github.com/lunny/nodb/store/driver" 12 | 13 | "os" 14 | ) 15 | 16 | const defaultFilterBits int = 10 17 | 18 | type Store struct { 19 | } 20 | 21 | func (s Store) String() string { 22 | return DBName 23 | } 24 | 25 | type MemStore struct { 26 | } 27 | 28 | func (s MemStore) String() string { 29 | return MemDBName 30 | } 31 | 32 | type DB struct { 33 | path string 34 | 35 | cfg *config.LevelDBConfig 36 | 37 | db *leveldb.DB 38 | 39 | opts *opt.Options 40 | 41 | iteratorOpts *opt.ReadOptions 42 | 43 | cache cache.Cache 44 | 45 | filter filter.Filter 46 | } 47 | 48 | func (s Store) Open(path string, cfg *config.Config) (driver.IDB, error) { 49 | if err := os.MkdirAll(path, os.ModePerm); err != nil { 50 | return nil, err 51 | } 52 | 53 | db := new(DB) 54 | db.path = path 55 | db.cfg = &cfg.LevelDB 56 | 57 | db.initOpts() 58 | 59 | var err error 60 | db.db, err = leveldb.OpenFile(db.path, db.opts) 61 | 62 | if err != nil { 63 | return nil, err 64 | } 65 | 66 | return db, nil 67 | } 68 | 69 | func (s Store) Repair(path string, cfg *config.Config) error { 70 | db, err := leveldb.RecoverFile(path, newOptions(&cfg.LevelDB)) 71 | if err != nil { 72 | return err 73 | } 74 | 75 | db.Close() 76 | return nil 77 | } 78 | 79 | func (s MemStore) Open(path string, cfg *config.Config) (driver.IDB, error) { 80 | db := new(DB) 81 | db.path = path 82 | db.cfg = &cfg.LevelDB 83 | 84 | db.initOpts() 85 | 86 | var err error 87 | db.db, err = leveldb.Open(storage.NewMemStorage(), db.opts) 88 | if err != nil { 89 | return nil, err 90 | } 91 | 92 | return db, nil 93 | } 94 | 95 | func (s MemStore) Repair(path string, cfg *config.Config) error { 96 | return nil 97 | } 98 | 99 | func (db *DB) initOpts() { 100 | db.opts = newOptions(db.cfg) 101 | 102 | db.iteratorOpts = &opt.ReadOptions{} 103 | db.iteratorOpts.DontFillCache = true 104 | } 105 | 106 | func newOptions(cfg *config.LevelDBConfig) *opt.Options { 107 | opts := &opt.Options{} 108 | opts.ErrorIfMissing = false 109 | 110 | cfg.Adjust() 111 | 112 | //opts.BlockCacher = cache.NewLRU(cfg.CacheSize) 113 | opts.BlockCacheCapacity = cfg.CacheSize 114 | 115 | //we must use bloomfilter 116 | opts.Filter = filter.NewBloomFilter(defaultFilterBits) 117 | 118 | if !cfg.Compression { 119 | opts.Compression = opt.NoCompression 120 | } else { 121 | opts.Compression = opt.SnappyCompression 122 | } 123 | 124 | opts.BlockSize = cfg.BlockSize 125 | opts.WriteBuffer = cfg.WriteBufferSize 126 | 127 | return opts 128 | } 129 | 130 | func (db *DB) Close() error { 131 | return db.db.Close() 132 | } 133 | 134 | func (db *DB) Put(key, value []byte) error { 135 | return db.db.Put(key, value, nil) 136 | } 137 | 138 | func (db *DB) Get(key []byte) ([]byte, error) { 139 | v, err := db.db.Get(key, nil) 140 | if err == leveldb.ErrNotFound { 141 | return nil, nil 142 | } 143 | return v, nil 144 | } 145 | 146 | func (db *DB) Delete(key []byte) error { 147 | return db.db.Delete(key, nil) 148 | } 149 | 150 | func (db *DB) NewWriteBatch() driver.IWriteBatch { 151 | wb := &WriteBatch{ 152 | db: db, 153 | wbatch: new(leveldb.Batch), 154 | } 155 | return wb 156 | } 157 | 158 | func (db *DB) NewIterator() driver.IIterator { 159 | it := &Iterator{ 160 | db.db.NewIterator(nil, db.iteratorOpts), 161 | } 162 | 163 | return it 164 | } 165 | 166 | func (db *DB) Begin() (driver.Tx, error) { 167 | return nil, driver.ErrTxSupport 168 | } 169 | 170 | func (db *DB) NewSnapshot() (driver.ISnapshot, error) { 171 | snapshot, err := db.db.GetSnapshot() 172 | if err != nil { 173 | return nil, err 174 | } 175 | 176 | s := &Snapshot{ 177 | db: db, 178 | snp: snapshot, 179 | } 180 | 181 | return s, nil 182 | } 183 | 184 | func init() { 185 | driver.Register(Store{}) 186 | driver.Register(MemStore{}) 187 | } 188 | -------------------------------------------------------------------------------- /nodb_db.go: -------------------------------------------------------------------------------- 1 | package nodb 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | 7 | "github.com/lunny/nodb/store" 8 | ) 9 | 10 | type ibucket interface { 11 | Get(key []byte) ([]byte, error) 12 | 13 | Put(key []byte, value []byte) error 14 | Delete(key []byte) error 15 | 16 | NewIterator() *store.Iterator 17 | 18 | NewWriteBatch() store.WriteBatch 19 | 20 | RangeIterator(min []byte, max []byte, rangeType uint8) *store.RangeLimitIterator 21 | RevRangeIterator(min []byte, max []byte, rangeType uint8) *store.RangeLimitIterator 22 | RangeLimitIterator(min []byte, max []byte, rangeType uint8, offset int, count int) *store.RangeLimitIterator 23 | RevRangeLimitIterator(min []byte, max []byte, rangeType uint8, offset int, count int) *store.RangeLimitIterator 24 | } 25 | 26 | type DB struct { 27 | l *Nodb 28 | 29 | sdb *store.DB 30 | 31 | bucket ibucket 32 | 33 | index uint8 34 | 35 | kvBatch *batch 36 | listBatch *batch 37 | hashBatch *batch 38 | zsetBatch *batch 39 | binBatch *batch 40 | setBatch *batch 41 | 42 | status uint8 43 | } 44 | 45 | func (l *Nodb) newDB(index uint8) *DB { 46 | d := new(DB) 47 | 48 | d.l = l 49 | 50 | d.sdb = l.ldb 51 | 52 | d.bucket = d.sdb 53 | 54 | d.status = DBAutoCommit 55 | d.index = index 56 | 57 | d.kvBatch = d.newBatch() 58 | d.listBatch = d.newBatch() 59 | d.hashBatch = d.newBatch() 60 | d.zsetBatch = d.newBatch() 61 | d.binBatch = d.newBatch() 62 | d.setBatch = d.newBatch() 63 | 64 | return d 65 | } 66 | 67 | func (db *DB) newBatch() *batch { 68 | return db.l.newBatch(db.bucket.NewWriteBatch(), &dbBatchLocker{l: &sync.Mutex{}, wrLock: &db.l.wLock}, nil) 69 | } 70 | 71 | func (db *DB) Index() int { 72 | return int(db.index) 73 | } 74 | 75 | func (db *DB) IsAutoCommit() bool { 76 | return db.status == DBAutoCommit 77 | } 78 | 79 | func (db *DB) FlushAll() (drop int64, err error) { 80 | all := [...](func() (int64, error)){ 81 | db.flush, 82 | db.lFlush, 83 | db.hFlush, 84 | db.zFlush, 85 | db.bFlush, 86 | db.sFlush} 87 | 88 | for _, flush := range all { 89 | if n, e := flush(); e != nil { 90 | err = e 91 | return 92 | } else { 93 | drop += n 94 | } 95 | } 96 | 97 | return 98 | } 99 | 100 | func (db *DB) newEliminator() *elimination { 101 | eliminator := newEliminator(db) 102 | 103 | eliminator.regRetireContext(KVType, db.kvBatch, db.delete) 104 | eliminator.regRetireContext(ListType, db.listBatch, db.lDelete) 105 | eliminator.regRetireContext(HashType, db.hashBatch, db.hDelete) 106 | eliminator.regRetireContext(ZSetType, db.zsetBatch, db.zDelete) 107 | eliminator.regRetireContext(BitType, db.binBatch, db.bDelete) 108 | eliminator.regRetireContext(SetType, db.setBatch, db.sDelete) 109 | 110 | return eliminator 111 | } 112 | 113 | func (db *DB) flushRegion(t *batch, minKey []byte, maxKey []byte) (drop int64, err error) { 114 | it := db.bucket.RangeIterator(minKey, maxKey, store.RangeROpen) 115 | for ; it.Valid(); it.Next() { 116 | t.Delete(it.RawKey()) 117 | drop++ 118 | if drop&1023 == 0 { 119 | if err = t.Commit(); err != nil { 120 | return 121 | } 122 | } 123 | } 124 | it.Close() 125 | return 126 | } 127 | 128 | func (db *DB) flushType(t *batch, dataType byte) (drop int64, err error) { 129 | var deleteFunc func(t *batch, key []byte) int64 130 | var metaDataType byte 131 | switch dataType { 132 | case KVType: 133 | deleteFunc = db.delete 134 | metaDataType = KVType 135 | case ListType: 136 | deleteFunc = db.lDelete 137 | metaDataType = LMetaType 138 | case HashType: 139 | deleteFunc = db.hDelete 140 | metaDataType = HSizeType 141 | case ZSetType: 142 | deleteFunc = db.zDelete 143 | metaDataType = ZSizeType 144 | case BitType: 145 | deleteFunc = db.bDelete 146 | metaDataType = BitMetaType 147 | case SetType: 148 | deleteFunc = db.sDelete 149 | metaDataType = SSizeType 150 | default: 151 | return 0, fmt.Errorf("invalid data type: %s", TypeName[dataType]) 152 | } 153 | 154 | var keys [][]byte 155 | keys, err = db.scan(metaDataType, nil, 1024, false, "") 156 | for len(keys) != 0 || err != nil { 157 | for _, key := range keys { 158 | deleteFunc(t, key) 159 | db.rmExpire(t, dataType, key) 160 | 161 | } 162 | 163 | if err = t.Commit(); err != nil { 164 | return 165 | } else { 166 | drop += int64(len(keys)) 167 | } 168 | keys, err = db.scan(metaDataType, nil, 1024, false, "") 169 | } 170 | return 171 | } 172 | -------------------------------------------------------------------------------- /dump.go: -------------------------------------------------------------------------------- 1 | package nodb 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "encoding/binary" 7 | "io" 8 | "os" 9 | 10 | "github.com/siddontang/go-snappy/snappy" 11 | ) 12 | 13 | //dump format 14 | // fileIndex(bigendian int64)|filePos(bigendian int64) 15 | // |keylen(bigendian int32)|key|valuelen(bigendian int32)|value...... 16 | // 17 | //key and value are both compressed for fast transfer dump on network using snappy 18 | 19 | type BinLogAnchor struct { 20 | LogFileIndex int64 21 | LogPos int64 22 | } 23 | 24 | func (m *BinLogAnchor) WriteTo(w io.Writer) error { 25 | if err := binary.Write(w, binary.BigEndian, m.LogFileIndex); err != nil { 26 | return err 27 | } 28 | 29 | if err := binary.Write(w, binary.BigEndian, m.LogPos); err != nil { 30 | return err 31 | } 32 | return nil 33 | } 34 | 35 | func (m *BinLogAnchor) ReadFrom(r io.Reader) error { 36 | err := binary.Read(r, binary.BigEndian, &m.LogFileIndex) 37 | if err != nil { 38 | return err 39 | } 40 | 41 | err = binary.Read(r, binary.BigEndian, &m.LogPos) 42 | if err != nil { 43 | return err 44 | } 45 | 46 | return nil 47 | } 48 | 49 | func (l *Nodb) DumpFile(path string) error { 50 | f, err := os.Create(path) 51 | if err != nil { 52 | return err 53 | } 54 | defer f.Close() 55 | 56 | return l.Dump(f) 57 | } 58 | 59 | func (l *Nodb) Dump(w io.Writer) error { 60 | m := new(BinLogAnchor) 61 | 62 | var err error 63 | 64 | l.wLock.Lock() 65 | defer l.wLock.Unlock() 66 | 67 | if l.binlog != nil { 68 | m.LogFileIndex = l.binlog.LogFileIndex() 69 | m.LogPos = l.binlog.LogFilePos() 70 | } 71 | 72 | wb := bufio.NewWriterSize(w, 4096) 73 | if err = m.WriteTo(wb); err != nil { 74 | return err 75 | } 76 | 77 | it := l.ldb.NewIterator() 78 | it.SeekToFirst() 79 | 80 | compressBuf := make([]byte, 4096) 81 | 82 | var key []byte 83 | var value []byte 84 | for ; it.Valid(); it.Next() { 85 | key = it.RawKey() 86 | value = it.RawValue() 87 | 88 | if key, err = snappy.Encode(compressBuf, key); err != nil { 89 | return err 90 | } 91 | 92 | if err = binary.Write(wb, binary.BigEndian, uint16(len(key))); err != nil { 93 | return err 94 | } 95 | 96 | if _, err = wb.Write(key); err != nil { 97 | return err 98 | } 99 | 100 | if value, err = snappy.Encode(compressBuf, value); err != nil { 101 | return err 102 | } 103 | 104 | if err = binary.Write(wb, binary.BigEndian, uint32(len(value))); err != nil { 105 | return err 106 | } 107 | 108 | if _, err = wb.Write(value); err != nil { 109 | return err 110 | } 111 | } 112 | 113 | if err = wb.Flush(); err != nil { 114 | return err 115 | } 116 | 117 | compressBuf = nil 118 | 119 | return nil 120 | } 121 | 122 | func (l *Nodb) LoadDumpFile(path string) (*BinLogAnchor, error) { 123 | f, err := os.Open(path) 124 | if err != nil { 125 | return nil, err 126 | } 127 | defer f.Close() 128 | 129 | return l.LoadDump(f) 130 | } 131 | 132 | func (l *Nodb) LoadDump(r io.Reader) (*BinLogAnchor, error) { 133 | l.wLock.Lock() 134 | defer l.wLock.Unlock() 135 | 136 | info := new(BinLogAnchor) 137 | 138 | rb := bufio.NewReaderSize(r, 4096) 139 | 140 | err := info.ReadFrom(rb) 141 | if err != nil { 142 | return nil, err 143 | } 144 | 145 | var keyLen uint16 146 | var valueLen uint32 147 | 148 | var keyBuf bytes.Buffer 149 | var valueBuf bytes.Buffer 150 | 151 | deKeyBuf := make([]byte, 4096) 152 | deValueBuf := make([]byte, 4096) 153 | 154 | var key, value []byte 155 | 156 | for { 157 | if err = binary.Read(rb, binary.BigEndian, &keyLen); err != nil && err != io.EOF { 158 | return nil, err 159 | } else if err == io.EOF { 160 | break 161 | } 162 | 163 | if _, err = io.CopyN(&keyBuf, rb, int64(keyLen)); err != nil { 164 | return nil, err 165 | } 166 | 167 | if key, err = snappy.Decode(deKeyBuf, keyBuf.Bytes()); err != nil { 168 | return nil, err 169 | } 170 | 171 | if err = binary.Read(rb, binary.BigEndian, &valueLen); err != nil { 172 | return nil, err 173 | } 174 | 175 | if _, err = io.CopyN(&valueBuf, rb, int64(valueLen)); err != nil { 176 | return nil, err 177 | } 178 | 179 | if value, err = snappy.Decode(deValueBuf, valueBuf.Bytes()); err != nil { 180 | return nil, err 181 | } 182 | 183 | if err = l.ldb.Put(key, value); err != nil { 184 | return nil, err 185 | } 186 | 187 | keyBuf.Reset() 188 | valueBuf.Reset() 189 | } 190 | 191 | deKeyBuf = nil 192 | deValueBuf = nil 193 | 194 | //if binlog enable, we will delete all binlogs and open a new one for handling simply 195 | if l.binlog != nil { 196 | l.binlog.PurgeAll() 197 | } 198 | 199 | return info, nil 200 | } 201 | -------------------------------------------------------------------------------- /t_ttl.go: -------------------------------------------------------------------------------- 1 | package nodb 2 | 3 | import ( 4 | "encoding/binary" 5 | "errors" 6 | "time" 7 | 8 | "github.com/lunny/nodb/store" 9 | ) 10 | 11 | var ( 12 | errExpMetaKey = errors.New("invalid expire meta key") 13 | errExpTimeKey = errors.New("invalid expire time key") 14 | ) 15 | 16 | type retireCallback func(*batch, []byte) int64 17 | 18 | type elimination struct { 19 | db *DB 20 | exp2Tx []*batch 21 | exp2Retire []retireCallback 22 | } 23 | 24 | var errExpType = errors.New("invalid expire type") 25 | 26 | func (db *DB) expEncodeTimeKey(dataType byte, key []byte, when int64) []byte { 27 | buf := make([]byte, len(key)+11) 28 | 29 | buf[0] = db.index 30 | buf[1] = ExpTimeType 31 | buf[2] = dataType 32 | pos := 3 33 | 34 | binary.BigEndian.PutUint64(buf[pos:], uint64(when)) 35 | pos += 8 36 | 37 | copy(buf[pos:], key) 38 | 39 | return buf 40 | } 41 | 42 | func (db *DB) expEncodeMetaKey(dataType byte, key []byte) []byte { 43 | buf := make([]byte, len(key)+3) 44 | 45 | buf[0] = db.index 46 | buf[1] = ExpMetaType 47 | buf[2] = dataType 48 | pos := 3 49 | 50 | copy(buf[pos:], key) 51 | 52 | return buf 53 | } 54 | 55 | func (db *DB) expDecodeMetaKey(mk []byte) (byte, []byte, error) { 56 | if len(mk) <= 3 || mk[0] != db.index || mk[1] != ExpMetaType { 57 | return 0, nil, errExpMetaKey 58 | } 59 | 60 | return mk[2], mk[3:], nil 61 | } 62 | 63 | func (db *DB) expDecodeTimeKey(tk []byte) (byte, []byte, int64, error) { 64 | if len(tk) < 11 || tk[0] != db.index || tk[1] != ExpTimeType { 65 | return 0, nil, 0, errExpTimeKey 66 | } 67 | 68 | return tk[2], tk[11:], int64(binary.BigEndian.Uint64(tk[3:])), nil 69 | } 70 | 71 | func (db *DB) expire(t *batch, dataType byte, key []byte, duration int64) { 72 | db.expireAt(t, dataType, key, time.Now().Unix()+duration) 73 | } 74 | 75 | func (db *DB) expireAt(t *batch, dataType byte, key []byte, when int64) { 76 | mk := db.expEncodeMetaKey(dataType, key) 77 | tk := db.expEncodeTimeKey(dataType, key, when) 78 | 79 | t.Put(tk, mk) 80 | t.Put(mk, PutInt64(when)) 81 | } 82 | 83 | func (db *DB) ttl(dataType byte, key []byte) (t int64, err error) { 84 | mk := db.expEncodeMetaKey(dataType, key) 85 | 86 | if t, err = Int64(db.bucket.Get(mk)); err != nil || t == 0 { 87 | t = -1 88 | } else { 89 | t -= time.Now().Unix() 90 | if t <= 0 { 91 | t = -1 92 | } 93 | // if t == -1 : to remove ???? 94 | } 95 | 96 | return t, err 97 | } 98 | 99 | func (db *DB) rmExpire(t *batch, dataType byte, key []byte) (int64, error) { 100 | mk := db.expEncodeMetaKey(dataType, key) 101 | if v, err := db.bucket.Get(mk); err != nil { 102 | return 0, err 103 | } else if v == nil { 104 | return 0, nil 105 | } else if when, err2 := Int64(v, nil); err2 != nil { 106 | return 0, err2 107 | } else { 108 | tk := db.expEncodeTimeKey(dataType, key, when) 109 | t.Delete(mk) 110 | t.Delete(tk) 111 | return 1, nil 112 | } 113 | } 114 | 115 | func (db *DB) expFlush(t *batch, dataType byte) (err error) { 116 | minKey := make([]byte, 3) 117 | minKey[0] = db.index 118 | minKey[1] = ExpTimeType 119 | minKey[2] = dataType 120 | 121 | maxKey := make([]byte, 3) 122 | maxKey[0] = db.index 123 | maxKey[1] = ExpMetaType 124 | maxKey[2] = dataType + 1 125 | 126 | _, err = db.flushRegion(t, minKey, maxKey) 127 | err = t.Commit() 128 | return 129 | } 130 | 131 | ////////////////////////////////////////////////////////// 132 | // 133 | ////////////////////////////////////////////////////////// 134 | 135 | func newEliminator(db *DB) *elimination { 136 | eli := new(elimination) 137 | eli.db = db 138 | eli.exp2Tx = make([]*batch, maxDataType) 139 | eli.exp2Retire = make([]retireCallback, maxDataType) 140 | return eli 141 | } 142 | 143 | func (eli *elimination) regRetireContext(dataType byte, t *batch, onRetire retireCallback) { 144 | 145 | // todo .. need to ensure exist - mapExpMetaType[expType] 146 | 147 | eli.exp2Tx[dataType] = t 148 | eli.exp2Retire[dataType] = onRetire 149 | } 150 | 151 | // call by outside ... (from *db to another *db) 152 | func (eli *elimination) active() { 153 | now := time.Now().Unix() 154 | db := eli.db 155 | dbGet := db.bucket.Get 156 | 157 | minKey := db.expEncodeTimeKey(NoneType, nil, 0) 158 | maxKey := db.expEncodeTimeKey(maxDataType, nil, now) 159 | 160 | it := db.bucket.RangeLimitIterator(minKey, maxKey, store.RangeROpen, 0, -1) 161 | for ; it.Valid(); it.Next() { 162 | tk := it.RawKey() 163 | mk := it.RawValue() 164 | 165 | dt, k, _, err := db.expDecodeTimeKey(tk) 166 | if err != nil { 167 | continue 168 | } 169 | 170 | t := eli.exp2Tx[dt] 171 | onRetire := eli.exp2Retire[dt] 172 | if tk == nil || onRetire == nil { 173 | continue 174 | } 175 | 176 | t.Lock() 177 | 178 | if exp, err := Int64(dbGet(mk)); err == nil { 179 | // check expire again 180 | if exp <= now { 181 | onRetire(t, k) 182 | t.Delete(tk) 183 | t.Delete(mk) 184 | 185 | t.Commit() 186 | } 187 | 188 | } 189 | 190 | t.Unlock() 191 | } 192 | it.Close() 193 | 194 | return 195 | } 196 | -------------------------------------------------------------------------------- /tools/redis_import/redis_import.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding: utf-8 3 | 4 | # refer: https://github.com/ideawu/ssdb/blob/master/tools/redis-import.php 5 | 6 | # Notice: for zset, float score will be converted to integer. 7 | 8 | import sys 9 | import os 10 | from collections import OrderedDict as od 11 | 12 | import redis 13 | import ledis 14 | 15 | total = 0 16 | entries = 0 17 | 18 | 19 | def scan_available(redis_client): 20 | """"Scan Command is available since redis-server 2.8.0""" 21 | 22 | if "scan" in dir(redis_client): 23 | info = redis_client.info() 24 | server_version = info["redis_version"] 25 | version_list = server_version.split(".") 26 | if len(version_list) > 2: 27 | n = int(version_list[0]) * 10 + int(version_list[1]) 28 | if n >= 28: 29 | return True 30 | return False 31 | 32 | 33 | def set_ttl(redis_client, ledis_client, key, k_type): 34 | k_types = { 35 | "string": ledis_client.expire, 36 | "list": ledis_client.lexpire, 37 | "hash": ledis_client.hexpire, 38 | "set": ledis_client.zexpire, 39 | "zset": ledis_client.zexpire 40 | } 41 | timeout = redis_client.ttl(key) 42 | if timeout > 0: 43 | k_types[k_type](key, timeout) 44 | 45 | 46 | def copy_key(redis_client, ledis_client, key, convert=False): 47 | global entries 48 | k_type = redis_client.type(key) 49 | if k_type == "string": 50 | value = redis_client.get(key) 51 | ledis_client.set(key, value) 52 | set_ttl(redis_client, ledis_client, key, k_type) 53 | entries += 1 54 | 55 | elif k_type == "list": 56 | _list = redis_client.lrange(key, 0, -1) 57 | for value in _list: 58 | ledis_client.rpush(key, value) 59 | set_ttl(redis_client, ledis_client, key, k_type) 60 | entries += 1 61 | 62 | elif k_type == "hash": 63 | mapping = od(redis_client.hgetall(key)) 64 | ledis_client.hmset(key, mapping) 65 | set_ttl(redis_client, ledis_client, key, k_type) 66 | entries += 1 67 | 68 | elif k_type == "zset": 69 | out = redis_client.zrange(key, 0, -1, withscores=True) 70 | pieces = od() 71 | for i in od(out).iteritems(): 72 | pieces[i[0]] = int(i[1]) 73 | ledis_client.zadd(key, **pieces) 74 | set_ttl(redis_client, ledis_client, key, k_type) 75 | entries += 1 76 | 77 | else: 78 | print "KEY %s of TYPE %s is not supported by LedisDB." % (key, k_type) 79 | 80 | 81 | def copy_keys(redis_client, ledis_client, keys, convert=False): 82 | for key in keys: 83 | copy_key(redis_client, ledis_client, key, convert=convert) 84 | 85 | 86 | def scan(redis_client, count=1000): 87 | keys = [] 88 | total = redis_client.dbsize() 89 | if total > 1000: 90 | print "It may take a while, be patient please." 91 | 92 | first = True 93 | cursor = 0 94 | while cursor != 0 or first: 95 | cursor, data = redis_client.scan(cursor, count=count) 96 | keys.extend(data) 97 | first = False 98 | assert len(keys) == total 99 | return keys, total 100 | 101 | 102 | def copy(redis_client, ledis_client, count=1000, convert=False): 103 | if scan_available(redis_client): 104 | print "\nTransfer begin ...\n" 105 | keys, total = scan(redis_client, count=count) 106 | copy_keys(redis_client, ledis_client, keys, convert=convert) 107 | 108 | else: 109 | msg = """We do not support Redis version less than 2.8.0. 110 | Please check both your redis server version and redis-py 111 | version. 112 | """ 113 | print msg 114 | sys.exit() 115 | print "%d keys, %d entries copied" % (total, entries) 116 | 117 | 118 | def usage(): 119 | usage = """ 120 | Usage: 121 | python %s redis_host redis_port redis_db ledis_host ledis_port 122 | """ 123 | print usage % os.path.basename(sys.argv[0]) 124 | 125 | 126 | def get_prompt(choice): 127 | yes = set(['yes', 'ye', 'y', '']) 128 | no = set(['no', 'n']) 129 | 130 | if choice in yes: 131 | return True 132 | elif choice in no: 133 | return False 134 | else: 135 | sys.stdout.write("Please respond with 'yes' or 'no'") 136 | 137 | 138 | def main(): 139 | if len(sys.argv) < 6: 140 | usage() 141 | sys.exit() 142 | convert = False 143 | if len(sys.argv) >= 6: 144 | (redis_host, redis_port, redis_db, ledis_host, ledis_port) = sys.argv[1:6] 145 | if int(redis_db) >= 16: 146 | print redis_db 147 | sys.exit("LedisDB only support 16 databases([0-15]") 148 | 149 | choice = raw_input("[y/N]").lower() 150 | if not get_prompt(choice): 151 | sys.exit("No proceed") 152 | 153 | redis_c = redis.Redis(host=redis_host, port=int(redis_port), db=int(redis_db)) 154 | ledis_c = ledis.Ledis(host=ledis_host, port=int(ledis_port), db=int(redis_db)) 155 | try: 156 | redis_c.ping() 157 | except redis.ConnectionError: 158 | print "Could not connect to Redis Server" 159 | sys.exit() 160 | 161 | try: 162 | ledis_c.ping() 163 | except redis.ConnectionError: 164 | print "Could not connect to LedisDB Server" 165 | sys.exit() 166 | 167 | copy(redis_c, ledis_c, convert=convert) 168 | print "Done\n" 169 | 170 | 171 | if __name__ == "__main__": 172 | main() 173 | -------------------------------------------------------------------------------- /binlog_util.go: -------------------------------------------------------------------------------- 1 | package nodb 2 | 3 | import ( 4 | "encoding/binary" 5 | "errors" 6 | "fmt" 7 | "strconv" 8 | ) 9 | 10 | var ( 11 | errBinLogDeleteType = errors.New("invalid bin log delete type") 12 | errBinLogPutType = errors.New("invalid bin log put type") 13 | errBinLogCommandType = errors.New("invalid bin log command type") 14 | ) 15 | 16 | func encodeBinLogDelete(key []byte) []byte { 17 | buf := make([]byte, 1+len(key)) 18 | buf[0] = BinLogTypeDeletion 19 | copy(buf[1:], key) 20 | return buf 21 | } 22 | 23 | func decodeBinLogDelete(sz []byte) ([]byte, error) { 24 | if len(sz) < 1 || sz[0] != BinLogTypeDeletion { 25 | return nil, errBinLogDeleteType 26 | } 27 | 28 | return sz[1:], nil 29 | } 30 | 31 | func encodeBinLogPut(key []byte, value []byte) []byte { 32 | buf := make([]byte, 3+len(key)+len(value)) 33 | buf[0] = BinLogTypePut 34 | pos := 1 35 | binary.BigEndian.PutUint16(buf[pos:], uint16(len(key))) 36 | pos += 2 37 | copy(buf[pos:], key) 38 | pos += len(key) 39 | copy(buf[pos:], value) 40 | 41 | return buf 42 | } 43 | 44 | func decodeBinLogPut(sz []byte) ([]byte, []byte, error) { 45 | if len(sz) < 3 || sz[0] != BinLogTypePut { 46 | return nil, nil, errBinLogPutType 47 | } 48 | 49 | keyLen := int(binary.BigEndian.Uint16(sz[1:])) 50 | if 3+keyLen > len(sz) { 51 | return nil, nil, errBinLogPutType 52 | } 53 | 54 | return sz[3 : 3+keyLen], sz[3+keyLen:], nil 55 | } 56 | 57 | func FormatBinLogEvent(event []byte) (string, error) { 58 | logType := uint8(event[0]) 59 | 60 | var err error 61 | var k []byte 62 | var v []byte 63 | 64 | var buf []byte = make([]byte, 0, 1024) 65 | 66 | switch logType { 67 | case BinLogTypePut: 68 | k, v, err = decodeBinLogPut(event) 69 | buf = append(buf, "PUT "...) 70 | case BinLogTypeDeletion: 71 | k, err = decodeBinLogDelete(event) 72 | buf = append(buf, "DELETE "...) 73 | default: 74 | err = errInvalidBinLogEvent 75 | } 76 | 77 | if err != nil { 78 | return "", err 79 | } 80 | 81 | if buf, err = formatDataKey(buf, k); err != nil { 82 | return "", err 83 | } 84 | 85 | if v != nil && len(v) != 0 { 86 | buf = append(buf, fmt.Sprintf(" %q", v)...) 87 | } 88 | 89 | return String(buf), nil 90 | } 91 | 92 | func formatDataKey(buf []byte, k []byte) ([]byte, error) { 93 | if len(k) < 2 { 94 | return nil, errInvalidBinLogEvent 95 | } 96 | 97 | buf = append(buf, fmt.Sprintf("DB:%2d ", k[0])...) 98 | buf = append(buf, fmt.Sprintf("%s ", TypeName[k[1]])...) 99 | 100 | db := new(DB) 101 | db.index = k[0] 102 | 103 | //to do format at respective place 104 | 105 | switch k[1] { 106 | case KVType: 107 | if key, err := db.decodeKVKey(k); err != nil { 108 | return nil, err 109 | } else { 110 | buf = strconv.AppendQuote(buf, String(key)) 111 | } 112 | case HashType: 113 | if key, field, err := db.hDecodeHashKey(k); err != nil { 114 | return nil, err 115 | } else { 116 | buf = strconv.AppendQuote(buf, String(key)) 117 | buf = append(buf, ' ') 118 | buf = strconv.AppendQuote(buf, String(field)) 119 | } 120 | case HSizeType: 121 | if key, err := db.hDecodeSizeKey(k); err != nil { 122 | return nil, err 123 | } else { 124 | buf = strconv.AppendQuote(buf, String(key)) 125 | } 126 | case ListType: 127 | if key, seq, err := db.lDecodeListKey(k); err != nil { 128 | return nil, err 129 | } else { 130 | buf = strconv.AppendQuote(buf, String(key)) 131 | buf = append(buf, ' ') 132 | buf = strconv.AppendInt(buf, int64(seq), 10) 133 | } 134 | case LMetaType: 135 | if key, err := db.lDecodeMetaKey(k); err != nil { 136 | return nil, err 137 | } else { 138 | buf = strconv.AppendQuote(buf, String(key)) 139 | } 140 | case ZSetType: 141 | if key, m, err := db.zDecodeSetKey(k); err != nil { 142 | return nil, err 143 | } else { 144 | buf = strconv.AppendQuote(buf, String(key)) 145 | buf = append(buf, ' ') 146 | buf = strconv.AppendQuote(buf, String(m)) 147 | } 148 | case ZSizeType: 149 | if key, err := db.zDecodeSizeKey(k); err != nil { 150 | return nil, err 151 | } else { 152 | buf = strconv.AppendQuote(buf, String(key)) 153 | } 154 | case ZScoreType: 155 | if key, m, score, err := db.zDecodeScoreKey(k); err != nil { 156 | return nil, err 157 | } else { 158 | buf = strconv.AppendQuote(buf, String(key)) 159 | buf = append(buf, ' ') 160 | buf = strconv.AppendQuote(buf, String(m)) 161 | buf = append(buf, ' ') 162 | buf = strconv.AppendInt(buf, score, 10) 163 | } 164 | case BitType: 165 | if key, seq, err := db.bDecodeBinKey(k); err != nil { 166 | return nil, err 167 | } else { 168 | buf = strconv.AppendQuote(buf, String(key)) 169 | buf = append(buf, ' ') 170 | buf = strconv.AppendUint(buf, uint64(seq), 10) 171 | } 172 | case BitMetaType: 173 | if key, err := db.bDecodeMetaKey(k); err != nil { 174 | return nil, err 175 | } else { 176 | buf = strconv.AppendQuote(buf, String(key)) 177 | } 178 | case SetType: 179 | if key, member, err := db.sDecodeSetKey(k); err != nil { 180 | return nil, err 181 | } else { 182 | buf = strconv.AppendQuote(buf, String(key)) 183 | buf = append(buf, ' ') 184 | buf = strconv.AppendQuote(buf, String(member)) 185 | } 186 | case SSizeType: 187 | if key, err := db.sDecodeSizeKey(k); err != nil { 188 | return nil, err 189 | } else { 190 | buf = strconv.AppendQuote(buf, String(key)) 191 | } 192 | case ExpTimeType: 193 | if tp, key, t, err := db.expDecodeTimeKey(k); err != nil { 194 | return nil, err 195 | } else { 196 | buf = append(buf, TypeName[tp]...) 197 | buf = append(buf, ' ') 198 | buf = strconv.AppendQuote(buf, String(key)) 199 | buf = append(buf, ' ') 200 | buf = strconv.AppendInt(buf, t, 10) 201 | } 202 | case ExpMetaType: 203 | if tp, key, err := db.expDecodeMetaKey(k); err != nil { 204 | return nil, err 205 | } else { 206 | buf = append(buf, TypeName[tp]...) 207 | buf = append(buf, ' ') 208 | buf = strconv.AppendQuote(buf, String(key)) 209 | } 210 | default: 211 | return nil, errInvalidBinLogEvent 212 | } 213 | 214 | return buf, nil 215 | } 216 | -------------------------------------------------------------------------------- /replication.go: -------------------------------------------------------------------------------- 1 | package nodb 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "errors" 7 | "io" 8 | "os" 9 | "time" 10 | 11 | "github.com/lunny/log" 12 | "github.com/lunny/nodb/store/driver" 13 | ) 14 | 15 | const ( 16 | maxReplBatchNum = 100 17 | maxReplLogSize = 1 * 1024 * 1024 18 | ) 19 | 20 | var ( 21 | ErrSkipEvent = errors.New("skip to next event") 22 | ) 23 | 24 | var ( 25 | errInvalidBinLogEvent = errors.New("invalid binglog event") 26 | errInvalidBinLogFile = errors.New("invalid binlog file") 27 | ) 28 | 29 | type replBatch struct { 30 | wb driver.IWriteBatch 31 | events [][]byte 32 | l *Nodb 33 | 34 | lastHead *BinLogHead 35 | } 36 | 37 | func (b *replBatch) Commit() error { 38 | b.l.commitLock.Lock() 39 | defer b.l.commitLock.Unlock() 40 | 41 | err := b.wb.Commit() 42 | if err != nil { 43 | b.Rollback() 44 | return err 45 | } 46 | 47 | if b.l.binlog != nil { 48 | if err = b.l.binlog.Log(b.events...); err != nil { 49 | b.Rollback() 50 | return err 51 | } 52 | } 53 | 54 | b.events = [][]byte{} 55 | b.lastHead = nil 56 | 57 | return nil 58 | } 59 | 60 | func (b *replBatch) Rollback() error { 61 | b.wb.Rollback() 62 | b.events = [][]byte{} 63 | b.lastHead = nil 64 | return nil 65 | } 66 | 67 | func (l *Nodb) replicateEvent(b *replBatch, event []byte) error { 68 | if len(event) == 0 { 69 | return errInvalidBinLogEvent 70 | } 71 | 72 | b.events = append(b.events, event) 73 | 74 | logType := uint8(event[0]) 75 | switch logType { 76 | case BinLogTypePut: 77 | return l.replicatePutEvent(b, event) 78 | case BinLogTypeDeletion: 79 | return l.replicateDeleteEvent(b, event) 80 | default: 81 | return errInvalidBinLogEvent 82 | } 83 | } 84 | 85 | func (l *Nodb) replicatePutEvent(b *replBatch, event []byte) error { 86 | key, value, err := decodeBinLogPut(event) 87 | if err != nil { 88 | return err 89 | } 90 | 91 | b.wb.Put(key, value) 92 | 93 | return nil 94 | } 95 | 96 | func (l *Nodb) replicateDeleteEvent(b *replBatch, event []byte) error { 97 | key, err := decodeBinLogDelete(event) 98 | if err != nil { 99 | return err 100 | } 101 | 102 | b.wb.Delete(key) 103 | 104 | return nil 105 | } 106 | 107 | func ReadEventFromReader(rb io.Reader, f func(head *BinLogHead, event []byte) error) error { 108 | head := &BinLogHead{} 109 | var err error 110 | 111 | for { 112 | if err = head.Read(rb); err != nil { 113 | if err == io.EOF { 114 | break 115 | } else { 116 | return err 117 | } 118 | } 119 | 120 | var dataBuf bytes.Buffer 121 | 122 | if _, err = io.CopyN(&dataBuf, rb, int64(head.PayloadLen)); err != nil { 123 | return err 124 | } 125 | 126 | err = f(head, dataBuf.Bytes()) 127 | if err != nil && err != ErrSkipEvent { 128 | return err 129 | } 130 | } 131 | 132 | return nil 133 | } 134 | 135 | func (l *Nodb) ReplicateFromReader(rb io.Reader) error { 136 | b := new(replBatch) 137 | 138 | b.wb = l.ldb.NewWriteBatch() 139 | b.l = l 140 | 141 | f := func(head *BinLogHead, event []byte) error { 142 | if b.lastHead == nil { 143 | b.lastHead = head 144 | } else if !b.lastHead.InSameBatch(head) { 145 | if err := b.Commit(); err != nil { 146 | log.Fatal("replication error %s, skip to next", err.Error()) 147 | return ErrSkipEvent 148 | } 149 | b.lastHead = head 150 | } 151 | 152 | err := l.replicateEvent(b, event) 153 | if err != nil { 154 | log.Fatal("replication error %s, skip to next", err.Error()) 155 | return ErrSkipEvent 156 | } 157 | return nil 158 | } 159 | 160 | err := ReadEventFromReader(rb, f) 161 | if err != nil { 162 | b.Rollback() 163 | return err 164 | } 165 | return b.Commit() 166 | } 167 | 168 | func (l *Nodb) ReplicateFromData(data []byte) error { 169 | rb := bytes.NewReader(data) 170 | 171 | err := l.ReplicateFromReader(rb) 172 | 173 | return err 174 | } 175 | 176 | func (l *Nodb) ReplicateFromBinLog(filePath string) error { 177 | f, err := os.Open(filePath) 178 | if err != nil { 179 | return err 180 | } 181 | 182 | rb := bufio.NewReaderSize(f, 4096) 183 | 184 | err = l.ReplicateFromReader(rb) 185 | 186 | f.Close() 187 | 188 | return err 189 | } 190 | 191 | // try to read events, if no events read, try to wait the new event singal until timeout seconds 192 | func (l *Nodb) ReadEventsToTimeout(info *BinLogAnchor, w io.Writer, timeout int) (n int, err error) { 193 | lastIndex := info.LogFileIndex 194 | lastPos := info.LogPos 195 | 196 | n = 0 197 | if l.binlog == nil { 198 | //binlog not supported 199 | info.LogFileIndex = 0 200 | info.LogPos = 0 201 | return 202 | } 203 | 204 | n, err = l.ReadEventsTo(info, w) 205 | if err == nil && info.LogFileIndex == lastIndex && info.LogPos == lastPos { 206 | //no events read 207 | select { 208 | case <-l.binlog.Wait(): 209 | case <-time.After(time.Duration(timeout) * time.Second): 210 | } 211 | return l.ReadEventsTo(info, w) 212 | } 213 | return 214 | } 215 | 216 | func (l *Nodb) ReadEventsTo(info *BinLogAnchor, w io.Writer) (n int, err error) { 217 | n = 0 218 | if l.binlog == nil { 219 | //binlog not supported 220 | info.LogFileIndex = 0 221 | info.LogPos = 0 222 | return 223 | } 224 | 225 | index := info.LogFileIndex 226 | offset := info.LogPos 227 | 228 | filePath := l.binlog.FormatLogFilePath(index) 229 | 230 | var f *os.File 231 | f, err = os.Open(filePath) 232 | if os.IsNotExist(err) { 233 | lastIndex := l.binlog.LogFileIndex() 234 | 235 | if index == lastIndex { 236 | //no binlog at all 237 | info.LogPos = 0 238 | } else { 239 | //slave binlog info had lost 240 | info.LogFileIndex = -1 241 | } 242 | } 243 | 244 | if err != nil { 245 | if os.IsNotExist(err) { 246 | err = nil 247 | } 248 | return 249 | } 250 | 251 | defer f.Close() 252 | 253 | var fileSize int64 254 | st, _ := f.Stat() 255 | fileSize = st.Size() 256 | 257 | if fileSize == info.LogPos { 258 | return 259 | } 260 | 261 | if _, err = f.Seek(offset, os.SEEK_SET); err != nil { 262 | //may be invliad seek offset 263 | return 264 | } 265 | 266 | var lastHead *BinLogHead = nil 267 | 268 | head := &BinLogHead{} 269 | 270 | batchNum := 0 271 | 272 | for { 273 | if err = head.Read(f); err != nil { 274 | if err == io.EOF { 275 | //we will try to use next binlog 276 | if index < l.binlog.LogFileIndex() { 277 | info.LogFileIndex += 1 278 | info.LogPos = 0 279 | } 280 | err = nil 281 | return 282 | } else { 283 | return 284 | } 285 | 286 | } 287 | 288 | if lastHead == nil { 289 | lastHead = head 290 | batchNum++ 291 | } else if !lastHead.InSameBatch(head) { 292 | lastHead = head 293 | batchNum++ 294 | if batchNum > maxReplBatchNum || n > maxReplLogSize { 295 | return 296 | } 297 | } 298 | 299 | if err = head.Write(w); err != nil { 300 | return 301 | } 302 | 303 | if _, err = io.CopyN(w, f, int64(head.PayloadLen)); err != nil { 304 | return 305 | } 306 | 307 | n += (head.Len() + int(head.PayloadLen)) 308 | info.LogPos = info.LogPos + int64(head.Len()) + int64(head.PayloadLen) 309 | } 310 | 311 | return 312 | } 313 | -------------------------------------------------------------------------------- /store/iterator.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "bytes" 5 | 6 | "github.com/lunny/nodb/store/driver" 7 | ) 8 | 9 | const ( 10 | IteratorForward uint8 = 0 11 | IteratorBackward uint8 = 1 12 | ) 13 | 14 | const ( 15 | RangeClose uint8 = 0x00 16 | RangeLOpen uint8 = 0x01 17 | RangeROpen uint8 = 0x10 18 | RangeOpen uint8 = 0x11 19 | ) 20 | 21 | // min must less or equal than max 22 | // 23 | // range type: 24 | // 25 | // close: [min, max] 26 | // open: (min, max) 27 | // lopen: (min, max] 28 | // ropen: [min, max) 29 | // 30 | type Range struct { 31 | Min []byte 32 | Max []byte 33 | 34 | Type uint8 35 | } 36 | 37 | type Limit struct { 38 | Offset int 39 | Count int 40 | } 41 | 42 | type Iterator struct { 43 | it driver.IIterator 44 | } 45 | 46 | // Returns a copy of key. 47 | func (it *Iterator) Key() []byte { 48 | k := it.it.Key() 49 | if k == nil { 50 | return nil 51 | } 52 | 53 | return append([]byte{}, k...) 54 | } 55 | 56 | // Returns a copy of value. 57 | func (it *Iterator) Value() []byte { 58 | v := it.it.Value() 59 | if v == nil { 60 | return nil 61 | } 62 | 63 | return append([]byte{}, v...) 64 | } 65 | 66 | // Returns a reference of key. 67 | // you must be careful that it will be changed after next iterate. 68 | func (it *Iterator) RawKey() []byte { 69 | return it.it.Key() 70 | } 71 | 72 | // Returns a reference of value. 73 | // you must be careful that it will be changed after next iterate. 74 | func (it *Iterator) RawValue() []byte { 75 | return it.it.Value() 76 | } 77 | 78 | // Copy key to b, if b len is small or nil, returns a new one. 79 | func (it *Iterator) BufKey(b []byte) []byte { 80 | k := it.RawKey() 81 | if k == nil { 82 | return nil 83 | } 84 | if b == nil { 85 | b = []byte{} 86 | } 87 | 88 | b = b[0:0] 89 | return append(b, k...) 90 | } 91 | 92 | // Copy value to b, if b len is small or nil, returns a new one. 93 | func (it *Iterator) BufValue(b []byte) []byte { 94 | v := it.RawValue() 95 | if v == nil { 96 | return nil 97 | } 98 | 99 | if b == nil { 100 | b = []byte{} 101 | } 102 | 103 | b = b[0:0] 104 | return append(b, v...) 105 | } 106 | 107 | func (it *Iterator) Close() { 108 | if it.it != nil { 109 | it.it.Close() 110 | it.it = nil 111 | } 112 | } 113 | 114 | func (it *Iterator) Valid() bool { 115 | return it.it.Valid() 116 | } 117 | 118 | func (it *Iterator) Next() { 119 | it.it.Next() 120 | } 121 | 122 | func (it *Iterator) Prev() { 123 | it.it.Prev() 124 | } 125 | 126 | func (it *Iterator) SeekToFirst() { 127 | it.it.First() 128 | } 129 | 130 | func (it *Iterator) SeekToLast() { 131 | it.it.Last() 132 | } 133 | 134 | func (it *Iterator) Seek(key []byte) { 135 | it.it.Seek(key) 136 | } 137 | 138 | // Finds by key, if not found, nil returns. 139 | func (it *Iterator) Find(key []byte) []byte { 140 | it.Seek(key) 141 | if it.Valid() { 142 | k := it.RawKey() 143 | if k == nil { 144 | return nil 145 | } else if bytes.Equal(k, key) { 146 | return it.Value() 147 | } 148 | } 149 | 150 | return nil 151 | } 152 | 153 | // Finds by key, if not found, nil returns, else a reference of value returns. 154 | // you must be careful that it will be changed after next iterate. 155 | func (it *Iterator) RawFind(key []byte) []byte { 156 | it.Seek(key) 157 | if it.Valid() { 158 | k := it.RawKey() 159 | if k == nil { 160 | return nil 161 | } else if bytes.Equal(k, key) { 162 | return it.RawValue() 163 | } 164 | } 165 | 166 | return nil 167 | } 168 | 169 | type RangeLimitIterator struct { 170 | it *Iterator 171 | 172 | r *Range 173 | l *Limit 174 | 175 | step int 176 | 177 | //0 for IteratorForward, 1 for IteratorBackward 178 | direction uint8 179 | } 180 | 181 | func (it *RangeLimitIterator) Key() []byte { 182 | return it.it.Key() 183 | } 184 | 185 | func (it *RangeLimitIterator) Value() []byte { 186 | return it.it.Value() 187 | } 188 | 189 | func (it *RangeLimitIterator) RawKey() []byte { 190 | return it.it.RawKey() 191 | } 192 | 193 | func (it *RangeLimitIterator) RawValue() []byte { 194 | return it.it.RawValue() 195 | } 196 | 197 | func (it *RangeLimitIterator) BufKey(b []byte) []byte { 198 | return it.it.BufKey(b) 199 | } 200 | 201 | func (it *RangeLimitIterator) BufValue(b []byte) []byte { 202 | return it.it.BufValue(b) 203 | } 204 | 205 | func (it *RangeLimitIterator) Valid() bool { 206 | if it.l.Offset < 0 { 207 | return false 208 | } else if !it.it.Valid() { 209 | return false 210 | } else if it.l.Count >= 0 && it.step >= it.l.Count { 211 | return false 212 | } 213 | 214 | if it.direction == IteratorForward { 215 | if it.r.Max != nil { 216 | r := bytes.Compare(it.it.RawKey(), it.r.Max) 217 | if it.r.Type&RangeROpen > 0 { 218 | return !(r >= 0) 219 | } else { 220 | return !(r > 0) 221 | } 222 | } 223 | } else { 224 | if it.r.Min != nil { 225 | r := bytes.Compare(it.it.RawKey(), it.r.Min) 226 | if it.r.Type&RangeLOpen > 0 { 227 | return !(r <= 0) 228 | } else { 229 | return !(r < 0) 230 | } 231 | } 232 | } 233 | 234 | return true 235 | } 236 | 237 | func (it *RangeLimitIterator) Next() { 238 | it.step++ 239 | 240 | if it.direction == IteratorForward { 241 | it.it.Next() 242 | } else { 243 | it.it.Prev() 244 | } 245 | } 246 | 247 | func (it *RangeLimitIterator) Close() { 248 | it.it.Close() 249 | } 250 | 251 | func NewRangeLimitIterator(i *Iterator, r *Range, l *Limit) *RangeLimitIterator { 252 | return rangeLimitIterator(i, r, l, IteratorForward) 253 | } 254 | 255 | func NewRevRangeLimitIterator(i *Iterator, r *Range, l *Limit) *RangeLimitIterator { 256 | return rangeLimitIterator(i, r, l, IteratorBackward) 257 | } 258 | 259 | func NewRangeIterator(i *Iterator, r *Range) *RangeLimitIterator { 260 | return rangeLimitIterator(i, r, &Limit{0, -1}, IteratorForward) 261 | } 262 | 263 | func NewRevRangeIterator(i *Iterator, r *Range) *RangeLimitIterator { 264 | return rangeLimitIterator(i, r, &Limit{0, -1}, IteratorBackward) 265 | } 266 | 267 | func rangeLimitIterator(i *Iterator, r *Range, l *Limit, direction uint8) *RangeLimitIterator { 268 | it := new(RangeLimitIterator) 269 | 270 | it.it = i 271 | 272 | it.r = r 273 | it.l = l 274 | it.direction = direction 275 | 276 | it.step = 0 277 | 278 | if l.Offset < 0 { 279 | return it 280 | } 281 | 282 | if direction == IteratorForward { 283 | if r.Min == nil { 284 | it.it.SeekToFirst() 285 | } else { 286 | it.it.Seek(r.Min) 287 | 288 | if r.Type&RangeLOpen > 0 { 289 | if it.it.Valid() && bytes.Equal(it.it.RawKey(), r.Min) { 290 | it.it.Next() 291 | } 292 | } 293 | } 294 | } else { 295 | if r.Max == nil { 296 | it.it.SeekToLast() 297 | } else { 298 | it.it.Seek(r.Max) 299 | 300 | if !it.it.Valid() { 301 | it.it.SeekToLast() 302 | } else { 303 | if !bytes.Equal(it.it.RawKey(), r.Max) { 304 | it.it.Prev() 305 | } 306 | } 307 | 308 | if r.Type&RangeROpen > 0 { 309 | if it.it.Valid() && bytes.Equal(it.it.RawKey(), r.Max) { 310 | it.it.Prev() 311 | } 312 | } 313 | } 314 | } 315 | 316 | for i := 0; i < l.Offset; i++ { 317 | if it.it.Valid() { 318 | if it.direction == IteratorForward { 319 | it.it.Next() 320 | } else { 321 | it.it.Prev() 322 | } 323 | } 324 | } 325 | 326 | return it 327 | } 328 | -------------------------------------------------------------------------------- /store/store_test.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "os" 7 | "testing" 8 | 9 | "github.com/lunny/nodb/config" 10 | "github.com/lunny/nodb/store/driver" 11 | ) 12 | 13 | func TestStore(t *testing.T) { 14 | cfg := new(config.Config) 15 | cfg.DataDir = "/tmp/testdb" 16 | cfg.LMDB.MapSize = 10 * 1024 * 1024 17 | 18 | ns := driver.ListStores() 19 | for _, s := range ns { 20 | cfg.DBName = s 21 | 22 | os.RemoveAll(getStorePath(cfg)) 23 | 24 | db, err := Open(cfg) 25 | if err != nil { 26 | t.Fatal(err) 27 | } 28 | 29 | testStore(db, t) 30 | testClear(db, t) 31 | testTx(db, t) 32 | 33 | db.Close() 34 | } 35 | } 36 | 37 | func testStore(db *DB, t *testing.T) { 38 | testSimple(db, t) 39 | testBatch(db, t) 40 | testIterator(db, t) 41 | testSnapshot(db, t) 42 | } 43 | 44 | func testClear(db *DB, t *testing.T) { 45 | it := db.RangeIterator(nil, nil, RangeClose) 46 | for ; it.Valid(); it.Next() { 47 | db.Delete(it.RawKey()) 48 | } 49 | it.Close() 50 | } 51 | 52 | func testSimple(db *DB, t *testing.T) { 53 | key := []byte("key") 54 | value := []byte("hello world") 55 | if err := db.Put(key, value); err != nil { 56 | t.Fatal(err) 57 | } 58 | 59 | if v, err := db.Get(key); err != nil { 60 | t.Fatal(err) 61 | } else if !bytes.Equal(v, value) { 62 | t.Fatal("not equal") 63 | } 64 | 65 | if err := db.Delete(key); err != nil { 66 | t.Fatal(err) 67 | } 68 | if v, err := db.Get(key); err != nil { 69 | t.Fatal(err) 70 | } else if v != nil { 71 | t.Fatal("must nil") 72 | } 73 | 74 | if err := db.Put(key, nil); err != nil { 75 | t.Fatal(err) 76 | } 77 | 78 | if v, err := db.Get(key); err != nil { 79 | t.Fatal(err) 80 | } else if !bytes.Equal(v, []byte{}) { 81 | t.Fatal("must empty") 82 | } 83 | } 84 | 85 | func testBatch(db *DB, t *testing.T) { 86 | key1 := []byte("key1") 87 | key2 := []byte("key2") 88 | 89 | value := []byte("hello world") 90 | 91 | db.Put(key1, value) 92 | db.Put(key2, value) 93 | 94 | wb := db.NewWriteBatch() 95 | 96 | wb.Delete(key2) 97 | wb.Put(key1, []byte("hello world2")) 98 | 99 | if err := wb.Commit(); err != nil { 100 | t.Fatal(err) 101 | } 102 | 103 | if v, err := db.Get(key2); err != nil { 104 | t.Fatal(err) 105 | } else if v != nil { 106 | t.Fatal("must nil") 107 | } 108 | 109 | if v, err := db.Get(key1); err != nil { 110 | t.Fatal(err) 111 | } else if string(v) != "hello world2" { 112 | t.Fatal(string(v)) 113 | } 114 | 115 | wb.Delete(key1) 116 | 117 | wb.Rollback() 118 | 119 | if v, err := db.Get(key1); err != nil { 120 | t.Fatal(err) 121 | } else if string(v) != "hello world2" { 122 | t.Fatal(string(v)) 123 | } 124 | 125 | wb.Put(key1, nil) 126 | wb.Put(key2, []byte{}) 127 | 128 | if err := wb.Commit(); err != nil { 129 | t.Fatal(err) 130 | } 131 | 132 | if v, err := db.Get(key1); err != nil { 133 | t.Fatal(err) 134 | } else if !bytes.Equal(v, []byte{}) { 135 | t.Fatal("must empty") 136 | } 137 | 138 | if v, err := db.Get(key2); err != nil { 139 | t.Fatal(err) 140 | } else if !bytes.Equal(v, []byte{}) { 141 | t.Fatal("must empty") 142 | } 143 | 144 | db.Delete(key1) 145 | db.Delete(key2) 146 | } 147 | 148 | func checkIterator(it *RangeLimitIterator, cv ...int) error { 149 | v := make([]string, 0, len(cv)) 150 | for ; it.Valid(); it.Next() { 151 | k := it.Key() 152 | v = append(v, string(k)) 153 | } 154 | 155 | it.Close() 156 | 157 | if len(v) != len(cv) { 158 | return fmt.Errorf("len error %d != %d", len(v), len(cv)) 159 | } 160 | 161 | for k, i := range cv { 162 | if fmt.Sprintf("key_%d", i) != v[k] { 163 | return fmt.Errorf("%s, %d", v[k], i) 164 | } 165 | } 166 | 167 | return nil 168 | } 169 | 170 | func testIterator(db *DB, t *testing.T) { 171 | i := db.NewIterator() 172 | for i.SeekToFirst(); i.Valid(); i.Next() { 173 | db.Delete(i.Key()) 174 | } 175 | i.Close() 176 | 177 | for i := 0; i < 10; i++ { 178 | key := []byte(fmt.Sprintf("key_%d", i)) 179 | value := []byte("") 180 | db.Put(key, value) 181 | } 182 | 183 | i = db.NewIterator() 184 | i.SeekToFirst() 185 | 186 | if !i.Valid() { 187 | t.Fatal("must valid") 188 | } else if string(i.Key()) != "key_0" { 189 | t.Fatal(string(i.Key())) 190 | } 191 | i.Close() 192 | 193 | var it *RangeLimitIterator 194 | 195 | k := func(i int) []byte { 196 | return []byte(fmt.Sprintf("key_%d", i)) 197 | } 198 | 199 | it = db.RangeLimitIterator(k(1), k(5), RangeClose, 0, -1) 200 | if err := checkIterator(it, 1, 2, 3, 4, 5); err != nil { 201 | t.Fatal(err) 202 | } 203 | it.Close() 204 | 205 | it = db.RangeLimitIterator(k(1), k(5), RangeClose, 0, -1) 206 | if err := checkIterator(it, 1, 2, 3, 4, 5); err != nil { 207 | t.Fatal(err) 208 | } 209 | it.Close() 210 | 211 | it = db.RangeLimitIterator(k(1), k(5), RangeClose, 1, 3) 212 | if err := checkIterator(it, 2, 3, 4); err != nil { 213 | t.Fatal(err) 214 | } 215 | it.Close() 216 | 217 | it = db.RangeLimitIterator(k(1), k(5), RangeLOpen, 0, -1) 218 | if err := checkIterator(it, 2, 3, 4, 5); err != nil { 219 | t.Fatal(err) 220 | } 221 | it.Close() 222 | 223 | it = db.RangeLimitIterator(k(1), k(5), RangeROpen, 0, -1) 224 | if err := checkIterator(it, 1, 2, 3, 4); err != nil { 225 | t.Fatal(err) 226 | } 227 | it.Close() 228 | 229 | it = db.RangeLimitIterator(k(1), k(5), RangeOpen, 0, -1) 230 | if err := checkIterator(it, 2, 3, 4); err != nil { 231 | t.Fatal(err) 232 | } 233 | it.Close() 234 | 235 | it = db.RevRangeLimitIterator(k(1), k(5), RangeClose, 0, -1) 236 | if err := checkIterator(it, 5, 4, 3, 2, 1); err != nil { 237 | t.Fatal(err) 238 | } 239 | it.Close() 240 | 241 | it = db.RevRangeLimitIterator(k(1), k(5), RangeClose, 1, 3) 242 | if err := checkIterator(it, 4, 3, 2); err != nil { 243 | t.Fatal(err) 244 | } 245 | it.Close() 246 | 247 | it = db.RevRangeLimitIterator(k(1), k(5), RangeLOpen, 0, -1) 248 | if err := checkIterator(it, 5, 4, 3, 2); err != nil { 249 | t.Fatal(err) 250 | } 251 | it.Close() 252 | 253 | it = db.RevRangeLimitIterator(k(1), k(5), RangeROpen, 0, -1) 254 | if err := checkIterator(it, 4, 3, 2, 1); err != nil { 255 | t.Fatal(err) 256 | } 257 | it.Close() 258 | 259 | it = db.RevRangeLimitIterator(k(1), k(5), RangeOpen, 0, -1) 260 | if err := checkIterator(it, 4, 3, 2); err != nil { 261 | t.Fatal(err) 262 | } 263 | it.Close() 264 | } 265 | 266 | func testSnapshot(db *DB, t *testing.T) { 267 | foo := []byte("foo") 268 | bar := []byte("bar") 269 | v1 := []byte("v1") 270 | v2 := []byte("v2") 271 | 272 | db.Put(foo, v1) 273 | db.Put(bar, v1) 274 | 275 | snap, err := db.NewSnapshot() 276 | if err != nil { 277 | t.Fatal(err) 278 | } 279 | 280 | i := snap.NewIterator() 281 | 282 | i.Seek([]byte("foo")) 283 | 284 | if !i.Valid() { 285 | t.Fatal("must valid") 286 | } else if string(i.Value()) != "v1" { 287 | t.Fatal(string(i.Value())) 288 | } 289 | i.Close() 290 | 291 | db.Put(foo, v2) 292 | db.Put(bar, v2) 293 | 294 | if v, err := snap.Get(foo); err != nil { 295 | t.Fatal(err) 296 | } else if string(v) != "v1" { 297 | t.Fatal(string(v)) 298 | } 299 | 300 | if v, err := snap.Get(bar); err != nil { 301 | t.Fatal(err) 302 | } else if string(v) != "v1" { 303 | t.Fatal(string(v)) 304 | } 305 | 306 | if v, err := db.Get(foo); err != nil { 307 | t.Fatal(err) 308 | } else if string(v) != "v2" { 309 | t.Fatal(string(v)) 310 | } 311 | 312 | if v, err := db.Get(bar); err != nil { 313 | t.Fatal(err) 314 | } else if string(v) != "v2" { 315 | t.Fatal(string(v)) 316 | } 317 | 318 | snap.Close() 319 | 320 | if v, err := db.Get(foo); err != nil { 321 | t.Fatal(err) 322 | } else if string(v) != "v2" { 323 | t.Fatal(string(v)) 324 | } 325 | 326 | } 327 | -------------------------------------------------------------------------------- /scan_test.go: -------------------------------------------------------------------------------- 1 | package nodb 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func TestDBScan(t *testing.T) { 8 | db := getTestDB() 9 | 10 | db.FlushAll() 11 | 12 | if v, err := db.Scan(nil, 10, true, ""); err != nil { 13 | t.Fatal(err) 14 | } else if len(v) != 0 { 15 | t.Fatal(len(v)) 16 | } 17 | 18 | db.Set([]byte("a"), []byte{}) 19 | db.Set([]byte("b"), []byte{}) 20 | db.Set([]byte("c"), []byte{}) 21 | 22 | if v, err := db.Scan(nil, 1, true, ""); err != nil { 23 | t.Fatal(err) 24 | } else if len(v) != 1 { 25 | t.Fatal(len(v)) 26 | } 27 | 28 | if v, err := db.Scan([]byte("a"), 2, false, ""); err != nil { 29 | t.Fatal(err) 30 | } else if len(v) != 2 { 31 | t.Fatal(len(v)) 32 | } 33 | 34 | if v, err := db.Scan(nil, 3, true, ""); err != nil { 35 | t.Fatal(err) 36 | } else if len(v) != 3 { 37 | t.Fatal(len(v)) 38 | } 39 | 40 | if v, err := db.Scan(nil, 3, true, "b"); err != nil { 41 | t.Fatal(err) 42 | } else if len(v) != 1 { 43 | t.Fatal(len(v)) 44 | } 45 | 46 | if v, err := db.Scan(nil, 3, true, "."); err != nil { 47 | t.Fatal(err) 48 | } else if len(v) != 3 { 49 | t.Fatal(len(v)) 50 | } 51 | 52 | if v, err := db.Scan(nil, 3, true, "a+"); err != nil { 53 | t.Fatal(err) 54 | } else if len(v) != 1 { 55 | t.Fatal(len(v)) 56 | } 57 | 58 | } 59 | 60 | func TestDBHScan(t *testing.T) { 61 | db := getTestDB() 62 | 63 | db.hFlush() 64 | 65 | k1 := []byte("k1") 66 | db.HSet(k1, []byte("1"), []byte{}) 67 | 68 | k2 := []byte("k2") 69 | db.HSet(k2, []byte("2"), []byte{}) 70 | 71 | k3 := []byte("k3") 72 | db.HSet(k3, []byte("3"), []byte{}) 73 | 74 | if v, err := db.HScan(nil, 1, true, ""); err != nil { 75 | t.Fatal(err) 76 | } else if len(v) != 1 { 77 | t.Fatal("invalid length ", len(v)) 78 | } else if string(v[0]) != "k1" { 79 | t.Fatal("invalid value ", string(v[0])) 80 | } 81 | 82 | if v, err := db.HScan(k1, 2, true, ""); err != nil { 83 | t.Fatal(err) 84 | } else if len(v) != 2 { 85 | t.Fatal("invalid length ", len(v)) 86 | } else if string(v[0]) != "k1" { 87 | t.Fatal("invalid value ", string(v[0])) 88 | } else if string(v[1]) != "k2" { 89 | t.Fatal("invalid value ", string(v[1])) 90 | } 91 | 92 | if v, err := db.HScan(k1, 2, false, ""); err != nil { 93 | t.Fatal(err) 94 | } else if len(v) != 2 { 95 | t.Fatal("invalid length ", len(v)) 96 | } else if string(v[0]) != "k2" { 97 | t.Fatal("invalid value ", string(v[0])) 98 | } else if string(v[1]) != "k3" { 99 | t.Fatal("invalid value ", string(v[1])) 100 | } 101 | 102 | } 103 | 104 | func TestDBZScan(t *testing.T) { 105 | db := getTestDB() 106 | 107 | db.zFlush() 108 | 109 | k1 := []byte("k1") 110 | db.ZAdd(k1, ScorePair{1, []byte("m")}) 111 | 112 | k2 := []byte("k2") 113 | db.ZAdd(k2, ScorePair{2, []byte("m")}) 114 | 115 | k3 := []byte("k3") 116 | db.ZAdd(k3, ScorePair{3, []byte("m")}) 117 | 118 | if v, err := db.ZScan(nil, 1, true, ""); err != nil { 119 | t.Fatal(err) 120 | } else if len(v) != 1 { 121 | t.Fatal("invalid length ", len(v)) 122 | } else if string(v[0]) != "k1" { 123 | t.Fatal("invalid value ", string(v[0])) 124 | } 125 | 126 | if v, err := db.ZScan(k1, 2, true, ""); err != nil { 127 | t.Fatal(err) 128 | } else if len(v) != 2 { 129 | t.Fatal("invalid length ", len(v)) 130 | } else if string(v[0]) != "k1" { 131 | t.Fatal("invalid value ", string(v[0])) 132 | } else if string(v[1]) != "k2" { 133 | t.Fatal("invalid value ", string(v[1])) 134 | } 135 | 136 | if v, err := db.ZScan(k1, 2, false, ""); err != nil { 137 | t.Fatal(err) 138 | } else if len(v) != 2 { 139 | t.Fatal("invalid length ", len(v)) 140 | } else if string(v[0]) != "k2" { 141 | t.Fatal("invalid value ", string(v[0])) 142 | } else if string(v[1]) != "k3" { 143 | t.Fatal("invalid value ", string(v[1])) 144 | } 145 | 146 | } 147 | 148 | func TestDBLScan(t *testing.T) { 149 | db := getTestDB() 150 | 151 | db.lFlush() 152 | 153 | k1 := []byte("k1") 154 | if _, err := db.LPush(k1, []byte("elem")); err != nil { 155 | t.Fatal(err.Error()) 156 | } 157 | 158 | k2 := []byte("k2") 159 | if _, err := db.LPush(k2, []byte("elem")); err != nil { 160 | t.Fatal(err.Error()) 161 | } 162 | 163 | k3 := []byte("k3") 164 | if _, err := db.LPush(k3, []byte("elem")); err != nil { 165 | t.Fatal(err.Error()) 166 | } 167 | 168 | if v, err := db.LScan(nil, 1, true, ""); err != nil { 169 | t.Fatal(err) 170 | } else if len(v) != 1 { 171 | t.Fatal("invalid length ", len(v)) 172 | } else if string(v[0]) != "k1" { 173 | t.Fatal("invalid value ", string(v[0])) 174 | } 175 | 176 | if v, err := db.LScan(k1, 2, true, ""); err != nil { 177 | t.Fatal(err) 178 | } else if len(v) != 2 { 179 | t.Fatal("invalid length ", len(v)) 180 | } else if string(v[0]) != "k1" { 181 | t.Fatal("invalid value ", string(v[0])) 182 | } else if string(v[1]) != "k2" { 183 | t.Fatal("invalid value ", string(v[1])) 184 | } 185 | 186 | if v, err := db.LScan(k1, 2, false, ""); err != nil { 187 | t.Fatal(err) 188 | } else if len(v) != 2 { 189 | t.Fatal("invalid length ", len(v)) 190 | } else if string(v[0]) != "k2" { 191 | t.Fatal("invalid value ", string(v[0])) 192 | } else if string(v[1]) != "k3" { 193 | t.Fatal("invalid value ", string(v[1])) 194 | } 195 | 196 | } 197 | 198 | func TestDBBScan(t *testing.T) { 199 | db := getTestDB() 200 | 201 | db.bFlush() 202 | 203 | k1 := []byte("k1") 204 | if _, err := db.BSetBit(k1, 1, 1); err != nil { 205 | t.Fatal(err.Error()) 206 | } 207 | 208 | k2 := []byte("k2") 209 | if _, err := db.BSetBit(k2, 1, 1); err != nil { 210 | t.Fatal(err.Error()) 211 | } 212 | k3 := []byte("k3") 213 | 214 | if _, err := db.BSetBit(k3, 1, 0); err != nil { 215 | t.Fatal(err.Error()) 216 | } 217 | 218 | if v, err := db.BScan(nil, 1, true, ""); err != nil { 219 | t.Fatal(err) 220 | } else if len(v) != 1 { 221 | t.Fatal("invalid length ", len(v)) 222 | } else if string(v[0]) != "k1" { 223 | t.Fatal("invalid value ", string(v[0])) 224 | } 225 | 226 | if v, err := db.BScan(k1, 2, true, ""); err != nil { 227 | t.Fatal(err) 228 | } else if len(v) != 2 { 229 | t.Fatal("invalid length ", len(v)) 230 | } else if string(v[0]) != "k1" { 231 | t.Fatal("invalid value ", string(v[0])) 232 | } else if string(v[1]) != "k2" { 233 | t.Fatal("invalid value ", string(v[1])) 234 | } 235 | 236 | if v, err := db.BScan(k1, 2, false, ""); err != nil { 237 | t.Fatal(err) 238 | } else if len(v) != 2 { 239 | t.Fatal("invalid length ", len(v)) 240 | } else if string(v[0]) != "k2" { 241 | t.Fatal("invalid value ", string(v[0])) 242 | } else if string(v[1]) != "k3" { 243 | t.Fatal("invalid value ", string(v[1])) 244 | } 245 | 246 | } 247 | 248 | func TestDBSScan(t *testing.T) { 249 | db := getTestDB() 250 | 251 | db.bFlush() 252 | 253 | k1 := []byte("k1") 254 | if _, err := db.SAdd(k1, []byte("1")); err != nil { 255 | t.Fatal(err.Error()) 256 | } 257 | 258 | k2 := []byte("k2") 259 | if _, err := db.SAdd(k2, []byte("1")); err != nil { 260 | t.Fatal(err.Error()) 261 | } 262 | k3 := []byte("k3") 263 | 264 | if _, err := db.SAdd(k3, []byte("1")); err != nil { 265 | t.Fatal(err.Error()) 266 | } 267 | 268 | if v, err := db.SScan(nil, 1, true, ""); err != nil { 269 | t.Fatal(err) 270 | } else if len(v) != 1 { 271 | t.Fatal("invalid length ", len(v)) 272 | } else if string(v[0]) != "k1" { 273 | t.Fatal("invalid value ", string(v[0])) 274 | } 275 | 276 | if v, err := db.SScan(k1, 2, true, ""); err != nil { 277 | t.Fatal(err) 278 | } else if len(v) != 2 { 279 | t.Fatal("invalid length ", len(v)) 280 | } else if string(v[0]) != "k1" { 281 | t.Fatal("invalid value ", string(v[0])) 282 | } else if string(v[1]) != "k2" { 283 | t.Fatal("invalid value ", string(v[1])) 284 | } 285 | 286 | if v, err := db.SScan(k1, 2, false, ""); err != nil { 287 | t.Fatal(err) 288 | } else if len(v) != 2 { 289 | t.Fatal("invalid length ", len(v)) 290 | } else if string(v[0]) != "k2" { 291 | t.Fatal("invalid value ", string(v[0])) 292 | } else if string(v[1]) != "k3" { 293 | t.Fatal("invalid value ", string(v[1])) 294 | } 295 | 296 | } 297 | -------------------------------------------------------------------------------- /t_kv.go: -------------------------------------------------------------------------------- 1 | package nodb 2 | 3 | import ( 4 | "errors" 5 | "time" 6 | ) 7 | 8 | type KVPair struct { 9 | Key []byte 10 | Value []byte 11 | } 12 | 13 | var errKVKey = errors.New("invalid encode kv key") 14 | 15 | func checkKeySize(key []byte) error { 16 | if len(key) > MaxKeySize || len(key) == 0 { 17 | return errKeySize 18 | } 19 | return nil 20 | } 21 | 22 | func checkValueSize(value []byte) error { 23 | if len(value) > MaxValueSize { 24 | return errValueSize 25 | } 26 | 27 | return nil 28 | } 29 | 30 | func (db *DB) encodeKVKey(key []byte) []byte { 31 | ek := make([]byte, len(key)+2) 32 | ek[0] = db.index 33 | ek[1] = KVType 34 | copy(ek[2:], key) 35 | return ek 36 | } 37 | 38 | func (db *DB) decodeKVKey(ek []byte) ([]byte, error) { 39 | if len(ek) < 2 || ek[0] != db.index || ek[1] != KVType { 40 | return nil, errKVKey 41 | } 42 | 43 | return ek[2:], nil 44 | } 45 | 46 | func (db *DB) encodeKVMinKey() []byte { 47 | ek := db.encodeKVKey(nil) 48 | return ek 49 | } 50 | 51 | func (db *DB) encodeKVMaxKey() []byte { 52 | ek := db.encodeKVKey(nil) 53 | ek[len(ek)-1] = KVType + 1 54 | return ek 55 | } 56 | 57 | func (db *DB) incr(key []byte, delta int64) (int64, error) { 58 | if err := checkKeySize(key); err != nil { 59 | return 0, err 60 | } 61 | 62 | var err error 63 | key = db.encodeKVKey(key) 64 | 65 | t := db.kvBatch 66 | 67 | t.Lock() 68 | defer t.Unlock() 69 | 70 | var n int64 71 | n, err = StrInt64(db.bucket.Get(key)) 72 | if err != nil { 73 | return 0, err 74 | } 75 | 76 | n += delta 77 | 78 | t.Put(key, StrPutInt64(n)) 79 | 80 | //todo binlog 81 | 82 | err = t.Commit() 83 | return n, err 84 | } 85 | 86 | // ps : here just focus on deleting the key-value data, 87 | // any other likes expire is ignore. 88 | func (db *DB) delete(t *batch, key []byte) int64 { 89 | key = db.encodeKVKey(key) 90 | t.Delete(key) 91 | return 1 92 | } 93 | 94 | func (db *DB) setExpireAt(key []byte, when int64) (int64, error) { 95 | t := db.kvBatch 96 | t.Lock() 97 | defer t.Unlock() 98 | 99 | if exist, err := db.Exists(key); err != nil || exist == 0 { 100 | return 0, err 101 | } else { 102 | db.expireAt(t, KVType, key, when) 103 | if err := t.Commit(); err != nil { 104 | return 0, err 105 | } 106 | } 107 | return 1, nil 108 | } 109 | 110 | func (db *DB) Decr(key []byte) (int64, error) { 111 | return db.incr(key, -1) 112 | } 113 | 114 | func (db *DB) DecrBy(key []byte, decrement int64) (int64, error) { 115 | return db.incr(key, -decrement) 116 | } 117 | 118 | func (db *DB) Del(keys ...[]byte) (int64, error) { 119 | if len(keys) == 0 { 120 | return 0, nil 121 | } 122 | 123 | codedKeys := make([][]byte, len(keys)) 124 | for i, k := range keys { 125 | codedKeys[i] = db.encodeKVKey(k) 126 | } 127 | 128 | t := db.kvBatch 129 | t.Lock() 130 | defer t.Unlock() 131 | 132 | for i, k := range keys { 133 | t.Delete(codedKeys[i]) 134 | db.rmExpire(t, KVType, k) 135 | } 136 | 137 | err := t.Commit() 138 | return int64(len(keys)), err 139 | } 140 | 141 | func (db *DB) Exists(key []byte) (int64, error) { 142 | if err := checkKeySize(key); err != nil { 143 | return 0, err 144 | } 145 | 146 | var err error 147 | key = db.encodeKVKey(key) 148 | 149 | var v []byte 150 | v, err = db.bucket.Get(key) 151 | if v != nil && err == nil { 152 | return 1, nil 153 | } 154 | 155 | return 0, err 156 | } 157 | 158 | func (db *DB) Get(key []byte) ([]byte, error) { 159 | if err := checkKeySize(key); err != nil { 160 | return nil, err 161 | } 162 | 163 | key = db.encodeKVKey(key) 164 | 165 | return db.bucket.Get(key) 166 | } 167 | 168 | func (db *DB) GetSet(key []byte, value []byte) ([]byte, error) { 169 | if err := checkKeySize(key); err != nil { 170 | return nil, err 171 | } else if err := checkValueSize(value); err != nil { 172 | return nil, err 173 | } 174 | 175 | key = db.encodeKVKey(key) 176 | 177 | t := db.kvBatch 178 | 179 | t.Lock() 180 | defer t.Unlock() 181 | 182 | oldValue, err := db.bucket.Get(key) 183 | if err != nil { 184 | return nil, err 185 | } 186 | 187 | t.Put(key, value) 188 | //todo, binlog 189 | 190 | err = t.Commit() 191 | 192 | return oldValue, err 193 | } 194 | 195 | func (db *DB) Incr(key []byte) (int64, error) { 196 | return db.incr(key, 1) 197 | } 198 | 199 | func (db *DB) IncrBy(key []byte, increment int64) (int64, error) { 200 | return db.incr(key, increment) 201 | } 202 | 203 | func (db *DB) MGet(keys ...[]byte) ([][]byte, error) { 204 | values := make([][]byte, len(keys)) 205 | 206 | it := db.bucket.NewIterator() 207 | defer it.Close() 208 | 209 | for i := range keys { 210 | if err := checkKeySize(keys[i]); err != nil { 211 | return nil, err 212 | } 213 | 214 | values[i] = it.Find(db.encodeKVKey(keys[i])) 215 | } 216 | 217 | return values, nil 218 | } 219 | 220 | func (db *DB) MSet(args ...KVPair) error { 221 | if len(args) == 0 { 222 | return nil 223 | } 224 | 225 | t := db.kvBatch 226 | 227 | var err error 228 | var key []byte 229 | var value []byte 230 | 231 | t.Lock() 232 | defer t.Unlock() 233 | 234 | for i := 0; i < len(args); i++ { 235 | if err := checkKeySize(args[i].Key); err != nil { 236 | return err 237 | } else if err := checkValueSize(args[i].Value); err != nil { 238 | return err 239 | } 240 | 241 | key = db.encodeKVKey(args[i].Key) 242 | 243 | value = args[i].Value 244 | 245 | t.Put(key, value) 246 | 247 | //todo binlog 248 | } 249 | 250 | err = t.Commit() 251 | return err 252 | } 253 | 254 | func (db *DB) Set(key []byte, value []byte) error { 255 | if err := checkKeySize(key); err != nil { 256 | return err 257 | } else if err := checkValueSize(value); err != nil { 258 | return err 259 | } 260 | 261 | var err error 262 | key = db.encodeKVKey(key) 263 | 264 | t := db.kvBatch 265 | 266 | t.Lock() 267 | defer t.Unlock() 268 | 269 | t.Put(key, value) 270 | 271 | err = t.Commit() 272 | 273 | return err 274 | } 275 | 276 | func (db *DB) SetNX(key []byte, value []byte) (int64, error) { 277 | if err := checkKeySize(key); err != nil { 278 | return 0, err 279 | } else if err := checkValueSize(value); err != nil { 280 | return 0, err 281 | } 282 | 283 | var err error 284 | key = db.encodeKVKey(key) 285 | 286 | var n int64 = 1 287 | 288 | t := db.kvBatch 289 | 290 | t.Lock() 291 | defer t.Unlock() 292 | 293 | if v, err := db.bucket.Get(key); err != nil { 294 | return 0, err 295 | } else if v != nil { 296 | n = 0 297 | } else { 298 | t.Put(key, value) 299 | 300 | //todo binlog 301 | 302 | err = t.Commit() 303 | } 304 | 305 | return n, err 306 | } 307 | 308 | func (db *DB) flush() (drop int64, err error) { 309 | t := db.kvBatch 310 | t.Lock() 311 | defer t.Unlock() 312 | return db.flushType(t, KVType) 313 | } 314 | 315 | //if inclusive is true, scan range [key, inf) else (key, inf) 316 | func (db *DB) Scan(key []byte, count int, inclusive bool, match string) ([][]byte, error) { 317 | return db.scan(KVType, key, count, inclusive, match) 318 | } 319 | 320 | func (db *DB) Expire(key []byte, duration int64) (int64, error) { 321 | if duration <= 0 { 322 | return 0, errExpireValue 323 | } 324 | 325 | return db.setExpireAt(key, time.Now().Unix()+duration) 326 | } 327 | 328 | func (db *DB) ExpireAt(key []byte, when int64) (int64, error) { 329 | if when <= time.Now().Unix() { 330 | return 0, errExpireValue 331 | } 332 | 333 | return db.setExpireAt(key, when) 334 | } 335 | 336 | func (db *DB) TTL(key []byte) (int64, error) { 337 | if err := checkKeySize(key); err != nil { 338 | return -1, err 339 | } 340 | 341 | return db.ttl(KVType, key) 342 | } 343 | 344 | func (db *DB) Persist(key []byte) (int64, error) { 345 | if err := checkKeySize(key); err != nil { 346 | return 0, err 347 | } 348 | 349 | t := db.kvBatch 350 | t.Lock() 351 | defer t.Unlock() 352 | n, err := db.rmExpire(t, KVType, key) 353 | if err != nil { 354 | return 0, err 355 | } 356 | 357 | err = t.Commit() 358 | return n, err 359 | } 360 | 361 | func (db *DB) Lock() { 362 | t := db.kvBatch 363 | t.Lock() 364 | } 365 | 366 | func (db *DB) Remove(key []byte) bool { 367 | if len(key) == 0 { 368 | return false 369 | } 370 | t := db.kvBatch 371 | t.Delete(db.encodeKVKey(key)) 372 | _, err := db.rmExpire(t, KVType, key) 373 | if err != nil { 374 | return false 375 | } 376 | return true 377 | } 378 | 379 | func (db *DB) Commit() error { 380 | t := db.kvBatch 381 | return t.Commit() 382 | } 383 | 384 | func (db *DB) Unlock() { 385 | t := db.kvBatch 386 | t.Unlock() 387 | } 388 | -------------------------------------------------------------------------------- /binlog.go: -------------------------------------------------------------------------------- 1 | package nodb 2 | 3 | import ( 4 | "bufio" 5 | "encoding/binary" 6 | "fmt" 7 | "io" 8 | "io/ioutil" 9 | "os" 10 | "path" 11 | "strconv" 12 | "strings" 13 | "sync" 14 | "time" 15 | 16 | "github.com/lunny/log" 17 | "github.com/lunny/nodb/config" 18 | ) 19 | 20 | type BinLogHead struct { 21 | CreateTime uint32 22 | BatchId uint32 23 | PayloadLen uint32 24 | } 25 | 26 | func (h *BinLogHead) Len() int { 27 | return 12 28 | } 29 | 30 | func (h *BinLogHead) Write(w io.Writer) error { 31 | if err := binary.Write(w, binary.BigEndian, h.CreateTime); err != nil { 32 | return err 33 | } 34 | 35 | if err := binary.Write(w, binary.BigEndian, h.BatchId); err != nil { 36 | return err 37 | } 38 | 39 | if err := binary.Write(w, binary.BigEndian, h.PayloadLen); err != nil { 40 | return err 41 | } 42 | 43 | return nil 44 | } 45 | 46 | func (h *BinLogHead) handleReadError(err error) error { 47 | if err == io.EOF { 48 | return io.ErrUnexpectedEOF 49 | } else { 50 | return err 51 | } 52 | } 53 | 54 | func (h *BinLogHead) Read(r io.Reader) error { 55 | var err error 56 | if err = binary.Read(r, binary.BigEndian, &h.CreateTime); err != nil { 57 | return err 58 | } 59 | 60 | if err = binary.Read(r, binary.BigEndian, &h.BatchId); err != nil { 61 | return h.handleReadError(err) 62 | } 63 | 64 | if err = binary.Read(r, binary.BigEndian, &h.PayloadLen); err != nil { 65 | return h.handleReadError(err) 66 | } 67 | 68 | return nil 69 | } 70 | 71 | func (h *BinLogHead) InSameBatch(ho *BinLogHead) bool { 72 | if h.CreateTime == ho.CreateTime && h.BatchId == ho.BatchId { 73 | return true 74 | } else { 75 | return false 76 | } 77 | } 78 | 79 | /* 80 | index file format: 81 | ledis-bin.00001 82 | ledis-bin.00002 83 | ledis-bin.00003 84 | 85 | log file format 86 | 87 | Log: Head|PayloadData 88 | 89 | Head: createTime|batchId|payloadData 90 | 91 | */ 92 | 93 | type BinLog struct { 94 | sync.Mutex 95 | 96 | path string 97 | 98 | cfg *config.BinLogConfig 99 | 100 | logFile *os.File 101 | 102 | logWb *bufio.Writer 103 | 104 | indexName string 105 | logNames []string 106 | lastLogIndex int64 107 | 108 | batchId uint32 109 | 110 | ch chan struct{} 111 | } 112 | 113 | func NewBinLog(cfg *config.Config) (*BinLog, error) { 114 | l := new(BinLog) 115 | 116 | l.cfg = &cfg.BinLog 117 | l.cfg.Adjust() 118 | 119 | l.path = path.Join(cfg.DataDir, "binlog") 120 | 121 | if err := os.MkdirAll(l.path, os.ModePerm); err != nil { 122 | return nil, err 123 | } 124 | 125 | l.logNames = make([]string, 0, 16) 126 | 127 | l.ch = make(chan struct{}) 128 | 129 | if err := l.loadIndex(); err != nil { 130 | return nil, err 131 | } 132 | 133 | return l, nil 134 | } 135 | 136 | func (l *BinLog) flushIndex() error { 137 | data := strings.Join(l.logNames, "\n") 138 | 139 | bakName := fmt.Sprintf("%s.bak", l.indexName) 140 | f, err := os.OpenFile(bakName, os.O_WRONLY|os.O_CREATE, 0666) 141 | if err != nil { 142 | log.Error("create binlog bak index error %s", err.Error()) 143 | return err 144 | } 145 | 146 | if _, err := f.WriteString(data); err != nil { 147 | log.Error("write binlog index error %s", err.Error()) 148 | f.Close() 149 | return err 150 | } 151 | 152 | f.Close() 153 | 154 | if err := os.Rename(bakName, l.indexName); err != nil { 155 | log.Error("rename binlog bak index error %s", err.Error()) 156 | return err 157 | } 158 | 159 | return nil 160 | } 161 | 162 | func (l *BinLog) loadIndex() error { 163 | l.indexName = path.Join(l.path, fmt.Sprintf("ledis-bin.index")) 164 | if _, err := os.Stat(l.indexName); os.IsNotExist(err) { 165 | //no index file, nothing to do 166 | } else { 167 | indexData, err := ioutil.ReadFile(l.indexName) 168 | if err != nil { 169 | return err 170 | } 171 | 172 | lines := strings.Split(string(indexData), "\n") 173 | for _, line := range lines { 174 | line = strings.Trim(line, "\r\n ") 175 | if len(line) == 0 { 176 | continue 177 | } 178 | 179 | if _, err := os.Stat(path.Join(l.path, line)); err != nil { 180 | log.Error("load index line %s error %s", line, err.Error()) 181 | return err 182 | } else { 183 | l.logNames = append(l.logNames, line) 184 | } 185 | } 186 | } 187 | if l.cfg.MaxFileNum > 0 && len(l.logNames) > l.cfg.MaxFileNum { 188 | //remove oldest logfile 189 | if err := l.Purge(len(l.logNames) - l.cfg.MaxFileNum); err != nil { 190 | return err 191 | } 192 | } 193 | 194 | var err error 195 | if len(l.logNames) == 0 { 196 | l.lastLogIndex = 1 197 | } else { 198 | lastName := l.logNames[len(l.logNames)-1] 199 | 200 | if l.lastLogIndex, err = strconv.ParseInt(path.Ext(lastName)[1:], 10, 64); err != nil { 201 | log.Error("invalid logfile name %s", err.Error()) 202 | return err 203 | } 204 | 205 | //like mysql, if server restart, a new binlog will create 206 | l.lastLogIndex++ 207 | } 208 | 209 | return nil 210 | } 211 | 212 | func (l *BinLog) getLogFile() string { 213 | return l.FormatLogFileName(l.lastLogIndex) 214 | } 215 | 216 | func (l *BinLog) openNewLogFile() error { 217 | var err error 218 | lastName := l.getLogFile() 219 | 220 | logPath := path.Join(l.path, lastName) 221 | if l.logFile, err = os.OpenFile(logPath, os.O_CREATE|os.O_WRONLY, 0666); err != nil { 222 | log.Error("open new logfile error %s", err.Error()) 223 | return err 224 | } 225 | 226 | if l.cfg.MaxFileNum > 0 && len(l.logNames) == l.cfg.MaxFileNum { 227 | l.purge(1) 228 | } 229 | 230 | l.logNames = append(l.logNames, lastName) 231 | 232 | if l.logWb == nil { 233 | l.logWb = bufio.NewWriterSize(l.logFile, 1024) 234 | } else { 235 | l.logWb.Reset(l.logFile) 236 | } 237 | 238 | if err = l.flushIndex(); err != nil { 239 | return err 240 | } 241 | 242 | return nil 243 | } 244 | 245 | func (l *BinLog) checkLogFileSize() bool { 246 | if l.logFile == nil { 247 | return false 248 | } 249 | 250 | st, _ := l.logFile.Stat() 251 | if st.Size() >= int64(l.cfg.MaxFileSize) { 252 | l.closeLog() 253 | return true 254 | } 255 | 256 | return false 257 | } 258 | 259 | func (l *BinLog) closeLog() { 260 | l.lastLogIndex++ 261 | 262 | l.logFile.Close() 263 | l.logFile = nil 264 | } 265 | 266 | func (l *BinLog) purge(n int) { 267 | for i := 0; i < n; i++ { 268 | logPath := path.Join(l.path, l.logNames[i]) 269 | os.Remove(logPath) 270 | } 271 | 272 | copy(l.logNames[0:], l.logNames[n:]) 273 | l.logNames = l.logNames[0 : len(l.logNames)-n] 274 | } 275 | 276 | func (l *BinLog) Close() { 277 | if l.logFile != nil { 278 | l.logFile.Close() 279 | l.logFile = nil 280 | } 281 | } 282 | 283 | func (l *BinLog) LogNames() []string { 284 | return l.logNames 285 | } 286 | 287 | func (l *BinLog) LogFileName() string { 288 | return l.getLogFile() 289 | } 290 | 291 | func (l *BinLog) LogFilePos() int64 { 292 | if l.logFile == nil { 293 | return 0 294 | } else { 295 | st, _ := l.logFile.Stat() 296 | return st.Size() 297 | } 298 | } 299 | 300 | func (l *BinLog) LogFileIndex() int64 { 301 | return l.lastLogIndex 302 | } 303 | 304 | func (l *BinLog) FormatLogFileName(index int64) string { 305 | return fmt.Sprintf("ledis-bin.%07d", index) 306 | } 307 | 308 | func (l *BinLog) FormatLogFilePath(index int64) string { 309 | return path.Join(l.path, l.FormatLogFileName(index)) 310 | } 311 | 312 | func (l *BinLog) LogPath() string { 313 | return l.path 314 | } 315 | 316 | func (l *BinLog) Purge(n int) error { 317 | l.Lock() 318 | defer l.Unlock() 319 | 320 | if len(l.logNames) == 0 { 321 | return nil 322 | } 323 | 324 | if n >= len(l.logNames) { 325 | n = len(l.logNames) 326 | //can not purge current log file 327 | if l.logNames[n-1] == l.getLogFile() { 328 | n = n - 1 329 | } 330 | } 331 | 332 | l.purge(n) 333 | 334 | return l.flushIndex() 335 | } 336 | 337 | func (l *BinLog) PurgeAll() error { 338 | l.Lock() 339 | defer l.Unlock() 340 | 341 | l.closeLog() 342 | return l.openNewLogFile() 343 | } 344 | 345 | func (l *BinLog) Log(args ...[]byte) error { 346 | l.Lock() 347 | defer l.Unlock() 348 | 349 | var err error 350 | 351 | if l.logFile == nil { 352 | if err = l.openNewLogFile(); err != nil { 353 | return err 354 | } 355 | } 356 | 357 | head := &BinLogHead{} 358 | 359 | head.CreateTime = uint32(time.Now().Unix()) 360 | head.BatchId = l.batchId 361 | 362 | l.batchId++ 363 | 364 | for _, data := range args { 365 | head.PayloadLen = uint32(len(data)) 366 | 367 | if err := head.Write(l.logWb); err != nil { 368 | return err 369 | } 370 | 371 | if _, err := l.logWb.Write(data); err != nil { 372 | return err 373 | } 374 | } 375 | 376 | if err = l.logWb.Flush(); err != nil { 377 | log.Error("write log error %s", err.Error()) 378 | return err 379 | } 380 | 381 | l.checkLogFileSize() 382 | 383 | close(l.ch) 384 | l.ch = make(chan struct{}) 385 | 386 | return nil 387 | } 388 | 389 | func (l *BinLog) Wait() <-chan struct{} { 390 | return l.ch 391 | } 392 | -------------------------------------------------------------------------------- /t_set_test.go: -------------------------------------------------------------------------------- 1 | package nodb 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | "time" 7 | ) 8 | 9 | func TestSetCodec(t *testing.T) { 10 | db := getTestDB() 11 | 12 | key := []byte("key") 13 | member := []byte("member") 14 | 15 | ek := db.sEncodeSizeKey(key) 16 | if k, err := db.sDecodeSizeKey(ek); err != nil { 17 | t.Fatal(err) 18 | } else if string(k) != "key" { 19 | t.Fatal(string(k)) 20 | } 21 | 22 | ek = db.sEncodeSetKey(key, member) 23 | if k, m, err := db.sDecodeSetKey(ek); err != nil { 24 | t.Fatal(err) 25 | } else if string(k) != "key" { 26 | t.Fatal(string(k)) 27 | } else if string(m) != "member" { 28 | t.Fatal(string(m)) 29 | } 30 | } 31 | 32 | func TestDBSet(t *testing.T) { 33 | db := getTestDB() 34 | 35 | key := []byte("testdb_set_a") 36 | member := []byte("member") 37 | key1 := []byte("testdb_set_a1") 38 | key2 := []byte("testdb_set_a2") 39 | member1 := []byte("testdb_set_m1") 40 | member2 := []byte("testdb_set_m2") 41 | 42 | // if n, err := db.sSetItem(key, []byte("m1")); err != nil { 43 | // t.Fatal(err) 44 | // } else if n != 1 { 45 | // t.Fatal(n) 46 | // } 47 | 48 | // if size, err := db.sIncrSize(key, 1); err != nil { 49 | // t.Fatal(err) 50 | // } else if size != 1 { 51 | // t.Fatal(size) 52 | // } 53 | 54 | if n, err := db.SAdd(key, member); err != nil { 55 | t.Fatal(err) 56 | } else if n != 1 { 57 | t.Fatal(n) 58 | } 59 | 60 | if cnt, err := db.SCard(key); err != nil { 61 | t.Fatal(err) 62 | } else if cnt != 1 { 63 | t.Fatal(cnt) 64 | } 65 | 66 | if n, err := db.SIsMember(key, member); err != nil { 67 | t.Fatal(err) 68 | } else if n != 1 { 69 | t.Fatal(n) 70 | } 71 | 72 | if v, err := db.SMembers(key); err != nil { 73 | t.Fatal(err) 74 | } else if string(v[0]) != "member" { 75 | t.Fatal(string(v[0])) 76 | } 77 | 78 | if n, err := db.SRem(key, member); err != nil { 79 | t.Fatal(err) 80 | } else if n != 1 { 81 | t.Fatal(n) 82 | } 83 | 84 | db.SAdd(key1, member1, member2) 85 | 86 | if n, err := db.SClear(key1); err != nil { 87 | t.Fatal(err) 88 | } else if n != 2 { 89 | t.Fatal(n) 90 | } 91 | 92 | db.SAdd(key1, member1, member2) 93 | db.SAdd(key2, member1, member2, []byte("xxx")) 94 | 95 | if n, _ := db.SCard(key2); n != 3 { 96 | t.Fatal(n) 97 | } 98 | if n, err := db.SMclear(key1, key2); err != nil { 99 | t.Fatal(err) 100 | } else if n != 2 { 101 | t.Fatal(n) 102 | } 103 | 104 | db.SAdd(key2, member1, member2) 105 | if n, err := db.SExpire(key2, 3600); err != nil { 106 | t.Fatal(err) 107 | } else if n != 1 { 108 | t.Fatal(n) 109 | } 110 | 111 | if n, err := db.SExpireAt(key2, time.Now().Unix()+3600); err != nil { 112 | t.Fatal(err) 113 | } else if n != 1 { 114 | t.Fatal(n) 115 | } 116 | 117 | if n, err := db.STTL(key2); err != nil { 118 | t.Fatal(err) 119 | } else if n < 0 { 120 | t.Fatal(n) 121 | } 122 | 123 | if n, err := db.SPersist(key2); err != nil { 124 | t.Fatal(err) 125 | } else if n != 1 { 126 | t.Fatal(n) 127 | } 128 | 129 | } 130 | 131 | func TestSetOperation(t *testing.T) { 132 | db := getTestDB() 133 | testUnion(db, t) 134 | testInter(db, t) 135 | testDiff(db, t) 136 | 137 | } 138 | 139 | func testUnion(db *DB, t *testing.T) { 140 | 141 | key := []byte("testdb_set_union_1") 142 | key1 := []byte("testdb_set_union_2") 143 | key2 := []byte("testdb_set_union_2") 144 | // member1 := []byte("testdb_set_m1") 145 | // member2 := []byte("testdb_set_m2") 146 | 147 | m1 := []byte("m1") 148 | m2 := []byte("m2") 149 | m3 := []byte("m3") 150 | db.SAdd(key, m1, m2) 151 | db.SAdd(key1, m1, m2, m3) 152 | db.SAdd(key2, m2, m3) 153 | if _, err := db.sUnionGeneric(key, key2); err != nil { 154 | t.Fatal(err) 155 | } 156 | 157 | if _, err := db.SUnion(key, key2); err != nil { 158 | t.Fatal(err) 159 | } 160 | 161 | dstkey := []byte("union_dsk") 162 | db.SAdd(dstkey, []byte("x")) 163 | if num, err := db.SUnionStore(dstkey, key1, key2); err != nil { 164 | t.Fatal(err) 165 | } else if num != 3 { 166 | t.Fatal(num) 167 | } 168 | 169 | if _, err := db.SMembers(dstkey); err != nil { 170 | t.Fatal(err) 171 | } 172 | 173 | if n, err := db.SCard(dstkey); err != nil { 174 | t.Fatal(err) 175 | } else if n != 3 { 176 | t.Fatal(n) 177 | } 178 | 179 | v1, _ := db.SUnion(key1, key2) 180 | v2, _ := db.SUnion(key2, key1) 181 | if len(v1) != len(v2) { 182 | t.Fatal(v1, v2) 183 | } 184 | 185 | v1, _ = db.SUnion(key, key1, key2) 186 | v2, _ = db.SUnion(key, key2, key1) 187 | if len(v1) != len(v2) { 188 | t.Fatal(v1, v2) 189 | } 190 | 191 | if v, err := db.SUnion(key, key); err != nil { 192 | t.Fatal(err) 193 | } else if len(v) != 2 { 194 | t.Fatal(v) 195 | } 196 | 197 | empKey := []byte("0") 198 | if v, err := db.SUnion(key, empKey); err != nil { 199 | t.Fatal(err) 200 | } else if len(v) != 2 { 201 | t.Fatal(v) 202 | } 203 | } 204 | 205 | func testInter(db *DB, t *testing.T) { 206 | key1 := []byte("testdb_set_inter_1") 207 | key2 := []byte("testdb_set_inter_2") 208 | key3 := []byte("testdb_set_inter_3") 209 | 210 | m1 := []byte("m1") 211 | m2 := []byte("m2") 212 | m3 := []byte("m3") 213 | m4 := []byte("m4") 214 | 215 | db.SAdd(key1, m1, m2) 216 | db.SAdd(key2, m2, m3, m4) 217 | db.SAdd(key3, m2, m4) 218 | 219 | if v, err := db.sInterGeneric(key1, key2); err != nil { 220 | t.Fatal(err) 221 | } else if len(v) != 1 { 222 | t.Fatal(v) 223 | } 224 | 225 | if v, err := db.SInter(key1, key2); err != nil { 226 | t.Fatal(err) 227 | } else if len(v) != 1 { 228 | t.Fatal(v) 229 | } 230 | 231 | dstKey := []byte("inter_dsk") 232 | if n, err := db.SInterStore(dstKey, key1, key2); err != nil { 233 | t.Fatal(err) 234 | } else if n != 1 { 235 | t.Fatal(n) 236 | } 237 | 238 | k1 := []byte("set_k1") 239 | k2 := []byte("set_k2") 240 | 241 | db.SAdd(k1, m1, m3, m4) 242 | db.SAdd(k2, m2, m3) 243 | if n, err := db.SInterStore([]byte("set_xxx"), k1, k2); err != nil { 244 | t.Fatal(err) 245 | } else if n != 1 { 246 | t.Fatal(n) 247 | } 248 | 249 | v1, _ := db.SInter(key1, key2) 250 | v2, _ := db.SInter(key2, key1) 251 | if len(v1) != len(v2) { 252 | t.Fatal(v1, v2) 253 | } 254 | 255 | v1, _ = db.SInter(key1, key2, key3) 256 | v2, _ = db.SInter(key2, key3, key1) 257 | if len(v1) != len(v2) { 258 | t.Fatal(v1, v2) 259 | } 260 | 261 | if v, err := db.SInter(key1, key1); err != nil { 262 | t.Fatal(err) 263 | } else if len(v) != 2 { 264 | t.Fatal(v) 265 | } 266 | 267 | empKey := []byte("0") 268 | if v, err := db.SInter(key1, empKey); err != nil { 269 | t.Fatal(err) 270 | } else if len(v) != 0 { 271 | t.Fatal(v) 272 | } 273 | 274 | if v, err := db.SInter(empKey, key2); err != nil { 275 | t.Fatal(err) 276 | } else if len(v) != 0 { 277 | t.Fatal(v) 278 | } 279 | } 280 | 281 | func testDiff(db *DB, t *testing.T) { 282 | key0 := []byte("testdb_set_diff_0") 283 | key1 := []byte("testdb_set_diff_1") 284 | key2 := []byte("testdb_set_diff_2") 285 | key3 := []byte("testdb_set_diff_3") 286 | 287 | m1 := []byte("m1") 288 | m2 := []byte("m2") 289 | m3 := []byte("m3") 290 | m4 := []byte("m4") 291 | 292 | db.SAdd(key1, m1, m2) 293 | db.SAdd(key2, m2, m3, m4) 294 | db.SAdd(key3, m3) 295 | 296 | if _, err := db.sDiffGeneric(key1, key2); err != nil { 297 | t.Fatal(err) 298 | } 299 | 300 | if v, err := db.SDiff(key1, key2); err != nil { 301 | t.Fatal(err) 302 | } else if len(v) != 1 { 303 | t.Fatal(v) 304 | } 305 | 306 | dstKey := []byte("diff_dsk") 307 | if n, err := db.SDiffStore(dstKey, key1, key2); err != nil { 308 | t.Fatal(err) 309 | } else if n != 1 { 310 | t.Fatal(n) 311 | } 312 | 313 | if v, err := db.SDiff(key2, key1); err != nil { 314 | t.Fatal(err) 315 | } else if len(v) != 2 { 316 | t.Fatal(v) 317 | } 318 | 319 | if v, err := db.SDiff(key1, key2, key3); err != nil { 320 | t.Fatal(err) 321 | } else if len(v) != 1 { 322 | t.Fatal(v) //return 1 323 | } 324 | 325 | if v, err := db.SDiff(key2, key2); err != nil { 326 | t.Fatal(err) 327 | } else if len(v) != 0 { 328 | t.Fatal(v) 329 | } 330 | 331 | if v, err := db.SDiff(key0, key1); err != nil { 332 | t.Fatal(err) 333 | } else if len(v) != 0 { 334 | t.Fatal(v) 335 | } 336 | 337 | if v, err := db.SDiff(key1, key0); err != nil { 338 | t.Fatal(err) 339 | } else if len(v) != 2 { 340 | t.Fatal(v) 341 | } 342 | } 343 | 344 | func TestSFlush(t *testing.T) { 345 | db := getTestDB() 346 | db.FlushAll() 347 | 348 | for i := 0; i < 2000; i++ { 349 | key := fmt.Sprintf("%d", i) 350 | if _, err := db.SAdd([]byte(key), []byte("v")); err != nil { 351 | t.Fatal(err.Error()) 352 | } 353 | } 354 | 355 | if v, err := db.SScan(nil, 3000, true, ""); err != nil { 356 | t.Fatal(err.Error()) 357 | } else if len(v) != 2000 { 358 | t.Fatal("invalid value ", len(v)) 359 | } 360 | 361 | if n, err := db.sFlush(); err != nil { 362 | t.Fatal(err.Error()) 363 | } else if n != 2000 { 364 | t.Fatal("invalid value ", n) 365 | } 366 | 367 | if v, err := db.SScan(nil, 3000, true, ""); err != nil { 368 | t.Fatal(err.Error()) 369 | } else if len(v) != 0 { 370 | t.Fatal("invalid value length ", len(v)) 371 | } 372 | 373 | } 374 | -------------------------------------------------------------------------------- /t_zset_test.go: -------------------------------------------------------------------------------- 1 | package nodb 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | ) 7 | 8 | const ( 9 | endPos int = -1 10 | ) 11 | 12 | func bin(sz string) []byte { 13 | return []byte(sz) 14 | } 15 | 16 | func pair(memb string, score int) ScorePair { 17 | return ScorePair{int64(score), bin(memb)} 18 | } 19 | 20 | func TestZSetCodec(t *testing.T) { 21 | db := getTestDB() 22 | 23 | key := []byte("key") 24 | member := []byte("member") 25 | 26 | ek := db.zEncodeSizeKey(key) 27 | if k, err := db.zDecodeSizeKey(ek); err != nil { 28 | t.Fatal(err) 29 | } else if string(k) != "key" { 30 | t.Fatal(string(k)) 31 | } 32 | 33 | ek = db.zEncodeSetKey(key, member) 34 | if k, m, err := db.zDecodeSetKey(ek); err != nil { 35 | t.Fatal(err) 36 | } else if string(k) != "key" { 37 | t.Fatal(string(k)) 38 | } else if string(m) != "member" { 39 | t.Fatal(string(m)) 40 | } 41 | 42 | ek = db.zEncodeScoreKey(key, member, 100) 43 | if k, m, s, err := db.zDecodeScoreKey(ek); err != nil { 44 | t.Fatal(err) 45 | } else if string(k) != "key" { 46 | t.Fatal(string(k)) 47 | } else if string(m) != "member" { 48 | t.Fatal(string(m)) 49 | } else if s != 100 { 50 | t.Fatal(s) 51 | } 52 | 53 | } 54 | 55 | func TestDBZSet(t *testing.T) { 56 | db := getTestDB() 57 | 58 | key := bin("testdb_zset_a") 59 | 60 | // {'a':0, 'b':1, 'c':2, 'd':3} 61 | if n, err := db.ZAdd(key, pair("a", 0), pair("b", 1), 62 | pair("c", 2), pair("d", 3)); err != nil { 63 | t.Fatal(err) 64 | } else if n != 4 { 65 | t.Fatal(n) 66 | } 67 | 68 | if n, err := db.ZCount(key, 0, 0XFF); err != nil { 69 | t.Fatal(err) 70 | } else if n != 4 { 71 | t.Fatal(n) 72 | } 73 | 74 | if s, err := db.ZScore(key, bin("d")); err != nil { 75 | t.Fatal(err) 76 | } else if s != 3 { 77 | t.Fatal(s) 78 | } 79 | 80 | if s, err := db.ZScore(key, bin("zzz")); err != ErrScoreMiss || s != InvalidScore { 81 | t.Fatal(fmt.Sprintf("s=[%d] err=[%s]", s, err)) 82 | } 83 | 84 | // {c':2, 'd':3} 85 | if n, err := db.ZRem(key, bin("a"), bin("b")); err != nil { 86 | t.Fatal(err) 87 | } else if n != 2 { 88 | t.Fatal(n) 89 | } 90 | 91 | if n, err := db.ZRem(key, bin("a"), bin("b")); err != nil { 92 | t.Fatal(err) 93 | } else if n != 0 { 94 | t.Fatal(n) 95 | } 96 | 97 | if n, err := db.ZCard(key); err != nil { 98 | t.Fatal(err) 99 | } else if n != 2 { 100 | t.Fatal(n) 101 | } 102 | 103 | // {} 104 | if n, err := db.ZClear(key); err != nil { 105 | t.Fatal(err) 106 | } else if n != 2 { 107 | t.Fatal(n) 108 | } 109 | 110 | if n, err := db.ZCount(key, 0, 0XFF); err != nil { 111 | t.Fatal(err) 112 | } else if n != 0 { 113 | t.Fatal(n) 114 | } 115 | } 116 | 117 | func TestZSetOrder(t *testing.T) { 118 | db := getTestDB() 119 | 120 | key := bin("testdb_zset_order") 121 | 122 | // {'a':0, 'b':1, 'c':2, 'd':3, 'e':4, 'f':5} 123 | membs := [...]string{"a", "b", "c", "d", "e", "f"} 124 | membCnt := len(membs) 125 | 126 | for i := 0; i < membCnt; i++ { 127 | db.ZAdd(key, pair(membs[i], i)) 128 | } 129 | 130 | if n, _ := db.ZCount(key, 0, 0XFFFF); int(n) != membCnt { 131 | t.Fatal(n) 132 | } 133 | 134 | for i := 0; i < membCnt; i++ { 135 | if pos, err := db.ZRank(key, bin(membs[i])); err != nil { 136 | t.Fatal(err) 137 | } else if int(pos) != i { 138 | t.Fatal(pos) 139 | } 140 | 141 | if pos, err := db.ZRevRank(key, bin(membs[i])); err != nil { 142 | t.Fatal(err) 143 | } else if int(pos) != membCnt-i-1 { 144 | t.Fatal(pos) 145 | } 146 | } 147 | 148 | if qMembs, err := db.ZRange(key, 0, endPos); err != nil { 149 | t.Fatal(err) 150 | } else if len(qMembs) != membCnt { 151 | t.Fatal(fmt.Sprintf("%d vs %d", len(qMembs), membCnt)) 152 | } else { 153 | for i := 0; i < membCnt; i++ { 154 | if string(qMembs[i].Member) != membs[i] { 155 | t.Fatal(fmt.Sprintf("[%s] vs [%s]", qMembs[i], membs[i])) 156 | } 157 | } 158 | } 159 | 160 | // {'a':0, 'b':1, 'c':2, 'd':999, 'e':4, 'f':5} 161 | if n, err := db.ZAdd(key, pair("d", 999)); err != nil { 162 | t.Fatal(err) 163 | } else if n != 0 { 164 | t.Fatal(n) 165 | } 166 | 167 | if pos, _ := db.ZRank(key, bin("d")); int(pos) != membCnt-1 { 168 | t.Fatal(pos) 169 | } 170 | 171 | if pos, _ := db.ZRevRank(key, bin("d")); int(pos) != 0 { 172 | t.Fatal(pos) 173 | } 174 | 175 | if pos, _ := db.ZRank(key, bin("e")); int(pos) != 3 { 176 | t.Fatal(pos) 177 | } 178 | 179 | if pos, _ := db.ZRank(key, bin("f")); int(pos) != 4 { 180 | t.Fatal(pos) 181 | } 182 | 183 | if qMembs, err := db.ZRangeByScore(key, 999, 0XFFFF, 0, membCnt); err != nil { 184 | t.Fatal(err) 185 | } else if len(qMembs) != 1 { 186 | t.Fatal(len(qMembs)) 187 | } 188 | 189 | // {'a':0, 'b':1, 'c':2, 'd':999, 'e':6, 'f':5} 190 | if s, err := db.ZIncrBy(key, 2, bin("e")); err != nil { 191 | t.Fatal(err) 192 | } else if s != 6 { 193 | t.Fatal(s) 194 | } 195 | 196 | if pos, _ := db.ZRank(key, bin("e")); int(pos) != 4 { 197 | t.Fatal(pos) 198 | } 199 | 200 | if pos, _ := db.ZRevRank(key, bin("e")); int(pos) != 1 { 201 | t.Fatal(pos) 202 | } 203 | 204 | if datas, _ := db.ZRange(key, 0, endPos); len(datas) != 6 { 205 | t.Fatal(len(datas)) 206 | } else { 207 | scores := []int64{0, 1, 2, 5, 6, 999} 208 | for i := 0; i < len(datas); i++ { 209 | if datas[i].Score != scores[i] { 210 | t.Fatal(fmt.Sprintf("[%d]=%d", i, datas[i])) 211 | } 212 | } 213 | } 214 | 215 | return 216 | } 217 | 218 | func TestZSetPersist(t *testing.T) { 219 | db := getTestDB() 220 | 221 | key := []byte("persist") 222 | db.ZAdd(key, ScorePair{1, []byte("a")}) 223 | 224 | if n, err := db.ZPersist(key); err != nil { 225 | t.Fatal(err) 226 | } else if n != 0 { 227 | t.Fatal(n) 228 | } 229 | 230 | if _, err := db.ZExpire(key, 10); err != nil { 231 | t.Fatal(err) 232 | } 233 | 234 | if n, err := db.ZPersist(key); err != nil { 235 | t.Fatal(err) 236 | } else if n != 1 { 237 | t.Fatal(n) 238 | } 239 | } 240 | 241 | func TestZUnionStore(t *testing.T) { 242 | db := getTestDB() 243 | key1 := []byte("key1") 244 | key2 := []byte("key2") 245 | 246 | db.ZAdd(key1, ScorePair{1, []byte("one")}) 247 | db.ZAdd(key1, ScorePair{1, []byte("two")}) 248 | 249 | db.ZAdd(key2, ScorePair{2, []byte("two")}) 250 | db.ZAdd(key2, ScorePair{2, []byte("three")}) 251 | 252 | keys := [][]byte{key1, key2} 253 | weights := []int64{1, 2} 254 | 255 | out := []byte("out") 256 | 257 | db.ZAdd(out, ScorePair{3, []byte("out")}) 258 | 259 | n, err := db.ZUnionStore(out, keys, weights, AggregateSum) 260 | if err != nil { 261 | t.Fatal(err.Error()) 262 | } 263 | if n != 3 { 264 | t.Fatal("invalid value ", n) 265 | } 266 | 267 | v, err := db.ZScore(out, []byte("two")) 268 | 269 | if err != nil { 270 | t.Fatal(err.Error()) 271 | } 272 | if v != 5 { 273 | t.Fatal("invalid value ", v) 274 | } 275 | 276 | out = []byte("out") 277 | n, err = db.ZUnionStore(out, keys, weights, AggregateMax) 278 | if err != nil { 279 | t.Fatal(err.Error()) 280 | } 281 | if n != 3 { 282 | t.Fatal("invalid value ", n) 283 | } 284 | 285 | v, err = db.ZScore(out, []byte("two")) 286 | 287 | if err != nil { 288 | t.Fatal(err.Error()) 289 | } 290 | if v != 4 { 291 | t.Fatal("invalid value ", v) 292 | } 293 | 294 | n, err = db.ZCount(out, 0, 0XFFFE) 295 | 296 | if err != nil { 297 | t.Fatal(err.Error()) 298 | } 299 | if n != 3 { 300 | t.Fatal("invalid value ", v) 301 | } 302 | 303 | n, err = db.ZCard(out) 304 | 305 | if err != nil { 306 | t.Fatal(err.Error()) 307 | } 308 | if n != 3 { 309 | t.Fatal("invalid value ", n) 310 | } 311 | } 312 | 313 | func TestZInterStore(t *testing.T) { 314 | db := getTestDB() 315 | 316 | key1 := []byte("key1") 317 | key2 := []byte("key2") 318 | 319 | db.ZAdd(key1, ScorePair{1, []byte("one")}) 320 | db.ZAdd(key1, ScorePair{1, []byte("two")}) 321 | 322 | db.ZAdd(key2, ScorePair{2, []byte("two")}) 323 | db.ZAdd(key2, ScorePair{2, []byte("three")}) 324 | 325 | keys := [][]byte{key1, key2} 326 | weights := []int64{2, 3} 327 | out := []byte("out") 328 | 329 | db.ZAdd(out, ScorePair{3, []byte("out")}) 330 | 331 | n, err := db.ZInterStore(out, keys, weights, AggregateSum) 332 | if err != nil { 333 | t.Fatal(err.Error()) 334 | } 335 | if n != 1 { 336 | t.Fatal("invalid value ", n) 337 | } 338 | v, err := db.ZScore(out, []byte("two")) 339 | if err != nil { 340 | t.Fatal(err.Error()) 341 | } 342 | if v != 8 { 343 | t.Fatal("invalid value ", v) 344 | } 345 | 346 | n, err = db.ZInterStore(out, keys, weights, AggregateMin) 347 | if err != nil { 348 | t.Fatal(err.Error()) 349 | } 350 | if n != 1 { 351 | t.Fatal("invalid value ", n) 352 | } 353 | 354 | v, err = db.ZScore(out, []byte("two")) 355 | 356 | if err != nil { 357 | t.Fatal(err.Error()) 358 | } 359 | if v != 2 { 360 | t.Fatal("invalid value ", v) 361 | } 362 | 363 | n, err = db.ZCount(out, 0, 0XFFFF) 364 | if err != nil { 365 | t.Fatal(err.Error()) 366 | } 367 | if n != 1 { 368 | t.Fatal("invalid value ", n) 369 | } 370 | 371 | n, err = db.ZCard(out) 372 | 373 | if err != nil { 374 | t.Fatal(err.Error()) 375 | } 376 | if n != 1 { 377 | t.Fatal("invalid value ", n) 378 | } 379 | } 380 | 381 | func TestZScan(t *testing.T) { 382 | db := getTestDB() 383 | db.FlushAll() 384 | 385 | for i := 0; i < 2000; i++ { 386 | key := fmt.Sprintf("%d", i) 387 | if _, err := db.ZAdd([]byte(key), ScorePair{1, []byte("v")}); err != nil { 388 | t.Fatal(err.Error()) 389 | } 390 | } 391 | 392 | if v, err := db.ZScan(nil, 3000, true, ""); err != nil { 393 | t.Fatal(err.Error()) 394 | } else if len(v) != 2000 { 395 | t.Fatal("invalid value ", len(v)) 396 | } 397 | 398 | if n, err := db.zFlush(); err != nil { 399 | t.Fatal(err.Error()) 400 | } else if n != 2000 { 401 | t.Fatal("invalid value ", n) 402 | } 403 | 404 | if v, err := db.ZScan(nil, 3000, true, ""); err != nil { 405 | t.Fatal(err.Error()) 406 | } else if len(v) != 0 { 407 | t.Fatal("invalid value length ", len(v)) 408 | } 409 | } 410 | -------------------------------------------------------------------------------- /t_ttl_test.go: -------------------------------------------------------------------------------- 1 | package nodb 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | "testing" 7 | "time" 8 | ) 9 | 10 | var m sync.Mutex 11 | 12 | type adaptor struct { 13 | set func([]byte, []byte) (int64, error) 14 | del func([]byte) (int64, error) 15 | exists func([]byte) (int64, error) 16 | 17 | expire func([]byte, int64) (int64, error) 18 | expireAt func([]byte, int64) (int64, error) 19 | ttl func([]byte) (int64, error) 20 | 21 | showIdent func() string 22 | } 23 | 24 | func kvAdaptor(db *DB) *adaptor { 25 | adp := new(adaptor) 26 | adp.showIdent = func() string { 27 | return "kv-adptor" 28 | } 29 | 30 | adp.set = db.SetNX 31 | adp.exists = db.Exists 32 | adp.del = func(k []byte) (int64, error) { 33 | return db.Del(k) 34 | } 35 | 36 | adp.expire = db.Expire 37 | adp.expireAt = db.ExpireAt 38 | adp.ttl = db.TTL 39 | 40 | return adp 41 | } 42 | 43 | func listAdaptor(db *DB) *adaptor { 44 | adp := new(adaptor) 45 | adp.showIdent = func() string { 46 | return "list-adptor" 47 | } 48 | 49 | adp.set = func(k []byte, v []byte) (int64, error) { 50 | eles := make([][]byte, 0) 51 | for i := 0; i < 3; i++ { 52 | e := []byte(String(v) + fmt.Sprintf("_%d", i)) 53 | eles = append(eles, e) 54 | } 55 | 56 | if n, err := db.LPush(k, eles[0], eles[1:]...); err != nil { 57 | return 0, err 58 | } else { 59 | return n, nil 60 | } 61 | } 62 | 63 | adp.exists = func(k []byte) (int64, error) { 64 | if llen, err := db.LLen(k); err != nil || llen <= 0 { 65 | return 0, err 66 | } else { 67 | return 1, nil 68 | } 69 | } 70 | 71 | adp.del = db.LClear 72 | adp.expire = db.LExpire 73 | adp.expireAt = db.LExpireAt 74 | adp.ttl = db.LTTL 75 | 76 | return adp 77 | } 78 | 79 | func hashAdaptor(db *DB) *adaptor { 80 | adp := new(adaptor) 81 | adp.showIdent = func() string { 82 | return "hash-adptor" 83 | } 84 | 85 | adp.set = func(k []byte, v []byte) (int64, error) { 86 | datas := make([]FVPair, 0) 87 | for i := 0; i < 3; i++ { 88 | suffix := fmt.Sprintf("_%d", i) 89 | pair := FVPair{ 90 | Field: []byte(String(k) + suffix), 91 | Value: []byte(String(v) + suffix)} 92 | 93 | datas = append(datas, pair) 94 | } 95 | 96 | if err := db.HMset(k, datas...); err != nil { 97 | return 0, err 98 | } else { 99 | return int64(len(datas)), nil 100 | } 101 | } 102 | 103 | adp.exists = func(k []byte) (int64, error) { 104 | if hlen, err := db.HLen(k); err != nil || hlen <= 0 { 105 | return 0, err 106 | } else { 107 | return 1, nil 108 | } 109 | } 110 | 111 | adp.del = db.HClear 112 | adp.expire = db.HExpire 113 | adp.expireAt = db.HExpireAt 114 | adp.ttl = db.HTTL 115 | 116 | return adp 117 | } 118 | 119 | func zsetAdaptor(db *DB) *adaptor { 120 | adp := new(adaptor) 121 | adp.showIdent = func() string { 122 | return "zset-adptor" 123 | } 124 | 125 | adp.set = func(k []byte, v []byte) (int64, error) { 126 | datas := make([]ScorePair, 0) 127 | for i := 0; i < 3; i++ { 128 | memb := []byte(String(k) + fmt.Sprintf("_%d", i)) 129 | pair := ScorePair{ 130 | Score: int64(i), 131 | Member: memb} 132 | 133 | datas = append(datas, pair) 134 | } 135 | 136 | if n, err := db.ZAdd(k, datas...); err != nil { 137 | return 0, err 138 | } else { 139 | return n, nil 140 | } 141 | } 142 | 143 | adp.exists = func(k []byte) (int64, error) { 144 | if cnt, err := db.ZCard(k); err != nil || cnt <= 0 { 145 | return 0, err 146 | } else { 147 | return 1, nil 148 | } 149 | } 150 | 151 | adp.del = db.ZClear 152 | adp.expire = db.ZExpire 153 | adp.expireAt = db.ZExpireAt 154 | adp.ttl = db.ZTTL 155 | 156 | return adp 157 | } 158 | 159 | func setAdaptor(db *DB) *adaptor { 160 | adp := new(adaptor) 161 | adp.showIdent = func() string { 162 | return "set-adaptor" 163 | } 164 | 165 | adp.set = func(k []byte, v []byte) (int64, error) { 166 | eles := make([][]byte, 0) 167 | for i := 0; i < 3; i++ { 168 | e := []byte(String(v) + fmt.Sprintf("_%d", i)) 169 | eles = append(eles, e) 170 | } 171 | 172 | if n, err := db.SAdd(k, eles...); err != nil { 173 | return 0, err 174 | } else { 175 | return n, nil 176 | } 177 | 178 | } 179 | 180 | adp.exists = func(k []byte) (int64, error) { 181 | if slen, err := db.SCard(k); err != nil || slen <= 0 { 182 | return 0, err 183 | } else { 184 | return 1, nil 185 | } 186 | } 187 | 188 | adp.del = db.SClear 189 | adp.expire = db.SExpire 190 | adp.expireAt = db.SExpireAt 191 | adp.ttl = db.STTL 192 | 193 | return adp 194 | 195 | } 196 | 197 | func bitAdaptor(db *DB) *adaptor { 198 | adp := new(adaptor) 199 | adp.showIdent = func() string { 200 | return "bit-adaptor" 201 | } 202 | 203 | adp.set = func(k []byte, v []byte) (int64, error) { 204 | datas := make([]BitPair, 3) 205 | datas[0] = BitPair{0, 1} 206 | datas[1] = BitPair{2, 1} 207 | datas[2] = BitPair{5, 1} 208 | 209 | if _, err := db.BMSetBit(k, datas...); err != nil { 210 | return 0, err 211 | } else { 212 | return int64(len(datas)), nil 213 | } 214 | } 215 | 216 | adp.exists = func(k []byte) (int64, error) { 217 | var start, end int32 = 0, -1 218 | if blen, err := db.BCount(k, start, end); err != nil || blen <= 0 { 219 | return 0, err 220 | } else { 221 | return 1, nil 222 | } 223 | } 224 | 225 | adp.del = db.BDelete 226 | adp.expire = db.BExpire 227 | adp.expireAt = db.BExpireAt 228 | adp.ttl = db.BTTL 229 | 230 | return adp 231 | } 232 | 233 | func allAdaptors(db *DB) []*adaptor { 234 | adps := make([]*adaptor, 6) 235 | adps[0] = kvAdaptor(db) 236 | adps[1] = listAdaptor(db) 237 | adps[2] = hashAdaptor(db) 238 | adps[3] = zsetAdaptor(db) 239 | adps[4] = setAdaptor(db) 240 | adps[5] = bitAdaptor(db) 241 | return adps 242 | } 243 | 244 | /////////////////////////////////////////////////////// 245 | 246 | func TestExpire(t *testing.T) { 247 | db := getTestDB() 248 | m.Lock() 249 | defer m.Unlock() 250 | 251 | k := []byte("ttl_a") 252 | ek := []byte("ttl_b") 253 | 254 | dbEntries := allAdaptors(db) 255 | for _, entry := range dbEntries { 256 | ident := entry.showIdent() 257 | 258 | entry.set(k, []byte("1")) 259 | 260 | if ok, _ := entry.expire(k, 10); ok != 1 { 261 | t.Fatal(ident, ok) 262 | } 263 | 264 | // err - expire on an inexisting key 265 | if ok, _ := entry.expire(ek, 10); ok != 0 { 266 | t.Fatal(ident, ok) 267 | } 268 | 269 | // err - duration is zero 270 | if ok, err := entry.expire(k, 0); err == nil || ok != 0 { 271 | t.Fatal(ident, fmt.Sprintf("res = %d, err = %s", ok, err)) 272 | } 273 | 274 | // err - duration is negative 275 | if ok, err := entry.expire(k, -10); err == nil || ok != 0 { 276 | t.Fatal(ident, fmt.Sprintf("res = %d, err = %s", ok, err)) 277 | } 278 | } 279 | } 280 | 281 | func TestExpireAt(t *testing.T) { 282 | db := getTestDB() 283 | m.Lock() 284 | defer m.Unlock() 285 | 286 | k := []byte("ttl_a") 287 | ek := []byte("ttl_b") 288 | 289 | dbEntries := allAdaptors(db) 290 | for _, entry := range dbEntries { 291 | ident := entry.showIdent() 292 | now := time.Now().Unix() 293 | 294 | entry.set(k, []byte("1")) 295 | 296 | if ok, _ := entry.expireAt(k, now+5); ok != 1 { 297 | t.Fatal(ident, ok) 298 | } 299 | 300 | // err - expire on an inexisting key 301 | if ok, _ := entry.expireAt(ek, now+5); ok != 0 { 302 | t.Fatal(ident, ok) 303 | } 304 | 305 | // err - expire with the current time 306 | if ok, err := entry.expireAt(k, now); err == nil || ok != 0 { 307 | t.Fatal(ident, fmt.Sprintf("res = %d, err = %s", ok, err)) 308 | } 309 | 310 | // err - expire with the time before 311 | if ok, err := entry.expireAt(k, now-5); err == nil || ok != 0 { 312 | t.Fatal(ident, fmt.Sprintf("res = %d, err = %s", ok, err)) 313 | } 314 | } 315 | } 316 | 317 | func TestTTL(t *testing.T) { 318 | db := getTestDB() 319 | m.Lock() 320 | defer m.Unlock() 321 | 322 | k := []byte("ttl_a") 323 | ek := []byte("ttl_b") 324 | 325 | dbEntries := allAdaptors(db) 326 | for _, entry := range dbEntries { 327 | ident := entry.showIdent() 328 | 329 | entry.set(k, []byte("1")) 330 | entry.expire(k, 2) 331 | 332 | if tRemain, _ := entry.ttl(k); tRemain != 2 { 333 | t.Fatal(ident, tRemain) 334 | } 335 | 336 | // err - check ttl on an inexisting key 337 | if tRemain, _ := entry.ttl(ek); tRemain != -1 { 338 | t.Fatal(ident, tRemain) 339 | } 340 | 341 | entry.del(k) 342 | if tRemain, _ := entry.ttl(k); tRemain != -1 { 343 | t.Fatal(ident, tRemain) 344 | } 345 | } 346 | } 347 | 348 | func TestExpCompose(t *testing.T) { 349 | db := getTestDB() 350 | m.Lock() 351 | defer m.Unlock() 352 | 353 | k0 := []byte("ttl_a") 354 | k1 := []byte("ttl_b") 355 | k2 := []byte("ttl_c") 356 | 357 | dbEntrys := allAdaptors(db) 358 | 359 | for _, entry := range dbEntrys { 360 | ident := entry.showIdent() 361 | 362 | entry.set(k0, k0) 363 | entry.set(k1, k1) 364 | entry.set(k2, k2) 365 | 366 | entry.expire(k0, 5) 367 | entry.expire(k1, 2) 368 | entry.expire(k2, 60) 369 | 370 | if tRemain, _ := entry.ttl(k0); tRemain != 5 { 371 | t.Fatal(ident, tRemain) 372 | } 373 | if tRemain, _ := entry.ttl(k1); tRemain != 2 { 374 | t.Fatal(ident, tRemain) 375 | } 376 | if tRemain, _ := entry.ttl(k2); tRemain != 60 { 377 | t.Fatal(ident, tRemain) 378 | } 379 | } 380 | 381 | // after 1 sec 382 | time.Sleep(1 * time.Second) 383 | 384 | for _, entry := range dbEntrys { 385 | ident := entry.showIdent() 386 | 387 | if tRemain, _ := entry.ttl(k0); tRemain != 4 { 388 | t.Fatal(ident, tRemain) 389 | } 390 | if tRemain, _ := entry.ttl(k1); tRemain != 1 { 391 | t.Fatal(ident, tRemain) 392 | } 393 | } 394 | 395 | // after 2 sec 396 | time.Sleep(2 * time.Second) 397 | 398 | for _, entry := range dbEntrys { 399 | ident := entry.showIdent() 400 | 401 | if tRemain, _ := entry.ttl(k1); tRemain != -1 { 402 | t.Fatal(ident, tRemain) 403 | } 404 | if exist, _ := entry.exists(k1); exist > 0 { 405 | t.Fatal(ident, false) 406 | } 407 | 408 | if tRemain, _ := entry.ttl(k0); tRemain != 2 { 409 | t.Fatal(ident, tRemain) 410 | } 411 | if exist, _ := entry.exists(k0); exist <= 0 { 412 | t.Fatal(ident, false) 413 | } 414 | 415 | // refresh the expiration of key 416 | if tRemain, _ := entry.ttl(k2); !(0 < tRemain && tRemain < 60) { 417 | t.Fatal(ident, tRemain) 418 | } 419 | 420 | if ok, _ := entry.expire(k2, 100); ok != 1 { 421 | t.Fatal(ident, false) 422 | } 423 | 424 | if tRemain, _ := entry.ttl(k2); tRemain != 100 { 425 | t.Fatal(ident, tRemain) 426 | } 427 | 428 | // expire an inexisting key 429 | if ok, _ := entry.expire(k1, 10); ok == 1 { 430 | t.Fatal(ident, false) 431 | } 432 | if tRemain, _ := entry.ttl(k1); tRemain != -1 { 433 | t.Fatal(ident, tRemain) 434 | } 435 | } 436 | 437 | return 438 | } 439 | -------------------------------------------------------------------------------- /t_list.go: -------------------------------------------------------------------------------- 1 | package nodb 2 | 3 | import ( 4 | "encoding/binary" 5 | "errors" 6 | "time" 7 | 8 | "github.com/lunny/nodb/store" 9 | ) 10 | 11 | const ( 12 | listHeadSeq int32 = 1 13 | listTailSeq int32 = 2 14 | 15 | listMinSeq int32 = 1000 16 | listMaxSeq int32 = 1<<31 - 1000 17 | listInitialSeq int32 = listMinSeq + (listMaxSeq-listMinSeq)/2 18 | ) 19 | 20 | var errLMetaKey = errors.New("invalid lmeta key") 21 | var errListKey = errors.New("invalid list key") 22 | var errListSeq = errors.New("invalid list sequence, overflow") 23 | 24 | func (db *DB) lEncodeMetaKey(key []byte) []byte { 25 | buf := make([]byte, len(key)+2) 26 | buf[0] = db.index 27 | buf[1] = LMetaType 28 | 29 | copy(buf[2:], key) 30 | return buf 31 | } 32 | 33 | func (db *DB) lDecodeMetaKey(ek []byte) ([]byte, error) { 34 | if len(ek) < 2 || ek[0] != db.index || ek[1] != LMetaType { 35 | return nil, errLMetaKey 36 | } 37 | 38 | return ek[2:], nil 39 | } 40 | 41 | func (db *DB) lEncodeListKey(key []byte, seq int32) []byte { 42 | buf := make([]byte, len(key)+8) 43 | 44 | pos := 0 45 | buf[pos] = db.index 46 | pos++ 47 | buf[pos] = ListType 48 | pos++ 49 | 50 | binary.BigEndian.PutUint16(buf[pos:], uint16(len(key))) 51 | pos += 2 52 | 53 | copy(buf[pos:], key) 54 | pos += len(key) 55 | 56 | binary.BigEndian.PutUint32(buf[pos:], uint32(seq)) 57 | 58 | return buf 59 | } 60 | 61 | func (db *DB) lDecodeListKey(ek []byte) (key []byte, seq int32, err error) { 62 | if len(ek) < 8 || ek[0] != db.index || ek[1] != ListType { 63 | err = errListKey 64 | return 65 | } 66 | 67 | keyLen := int(binary.BigEndian.Uint16(ek[2:])) 68 | if keyLen+8 != len(ek) { 69 | err = errListKey 70 | return 71 | } 72 | 73 | key = ek[4 : 4+keyLen] 74 | seq = int32(binary.BigEndian.Uint32(ek[4+keyLen:])) 75 | return 76 | } 77 | 78 | func (db *DB) lpush(key []byte, whereSeq int32, args ...[]byte) (int64, error) { 79 | if err := checkKeySize(key); err != nil { 80 | return 0, err 81 | } 82 | 83 | var headSeq int32 84 | var tailSeq int32 85 | var size int32 86 | var err error 87 | 88 | t := db.listBatch 89 | t.Lock() 90 | defer t.Unlock() 91 | 92 | metaKey := db.lEncodeMetaKey(key) 93 | headSeq, tailSeq, size, err = db.lGetMeta(nil, metaKey) 94 | if err != nil { 95 | return 0, err 96 | } 97 | 98 | var pushCnt int = len(args) 99 | if pushCnt == 0 { 100 | return int64(size), nil 101 | } 102 | 103 | var seq int32 = headSeq 104 | var delta int32 = -1 105 | if whereSeq == listTailSeq { 106 | seq = tailSeq 107 | delta = 1 108 | } 109 | 110 | // append elements 111 | if size > 0 { 112 | seq += delta 113 | } 114 | 115 | for i := 0; i < pushCnt; i++ { 116 | ek := db.lEncodeListKey(key, seq+int32(i)*delta) 117 | t.Put(ek, args[i]) 118 | } 119 | 120 | seq += int32(pushCnt-1) * delta 121 | if seq <= listMinSeq || seq >= listMaxSeq { 122 | return 0, errListSeq 123 | } 124 | 125 | // set meta info 126 | if whereSeq == listHeadSeq { 127 | headSeq = seq 128 | } else { 129 | tailSeq = seq 130 | } 131 | 132 | db.lSetMeta(metaKey, headSeq, tailSeq) 133 | 134 | err = t.Commit() 135 | return int64(size) + int64(pushCnt), err 136 | } 137 | 138 | func (db *DB) lpop(key []byte, whereSeq int32) ([]byte, error) { 139 | if err := checkKeySize(key); err != nil { 140 | return nil, err 141 | } 142 | 143 | t := db.listBatch 144 | t.Lock() 145 | defer t.Unlock() 146 | 147 | var headSeq int32 148 | var tailSeq int32 149 | var err error 150 | 151 | metaKey := db.lEncodeMetaKey(key) 152 | headSeq, tailSeq, _, err = db.lGetMeta(nil, metaKey) 153 | if err != nil { 154 | return nil, err 155 | } 156 | 157 | var value []byte 158 | 159 | var seq int32 = headSeq 160 | if whereSeq == listTailSeq { 161 | seq = tailSeq 162 | } 163 | 164 | itemKey := db.lEncodeListKey(key, seq) 165 | value, err = db.bucket.Get(itemKey) 166 | if err != nil { 167 | return nil, err 168 | } 169 | 170 | if whereSeq == listHeadSeq { 171 | headSeq += 1 172 | } else { 173 | tailSeq -= 1 174 | } 175 | 176 | t.Delete(itemKey) 177 | size := db.lSetMeta(metaKey, headSeq, tailSeq) 178 | if size == 0 { 179 | db.rmExpire(t, HashType, key) 180 | } 181 | 182 | err = t.Commit() 183 | return value, err 184 | } 185 | 186 | // ps : here just focus on deleting the list data, 187 | // any other likes expire is ignore. 188 | func (db *DB) lDelete(t *batch, key []byte) int64 { 189 | mk := db.lEncodeMetaKey(key) 190 | 191 | var headSeq int32 192 | var tailSeq int32 193 | var err error 194 | 195 | it := db.bucket.NewIterator() 196 | defer it.Close() 197 | 198 | headSeq, tailSeq, _, err = db.lGetMeta(it, mk) 199 | if err != nil { 200 | return 0 201 | } 202 | 203 | var num int64 = 0 204 | startKey := db.lEncodeListKey(key, headSeq) 205 | stopKey := db.lEncodeListKey(key, tailSeq) 206 | 207 | rit := store.NewRangeIterator(it, &store.Range{startKey, stopKey, store.RangeClose}) 208 | for ; rit.Valid(); rit.Next() { 209 | t.Delete(rit.RawKey()) 210 | num++ 211 | } 212 | 213 | t.Delete(mk) 214 | 215 | return num 216 | } 217 | 218 | func (db *DB) lGetMeta(it *store.Iterator, ek []byte) (headSeq int32, tailSeq int32, size int32, err error) { 219 | var v []byte 220 | if it != nil { 221 | v = it.Find(ek) 222 | } else { 223 | v, err = db.bucket.Get(ek) 224 | } 225 | if err != nil { 226 | return 227 | } else if v == nil { 228 | headSeq = listInitialSeq 229 | tailSeq = listInitialSeq 230 | size = 0 231 | return 232 | } else { 233 | headSeq = int32(binary.LittleEndian.Uint32(v[0:4])) 234 | tailSeq = int32(binary.LittleEndian.Uint32(v[4:8])) 235 | size = tailSeq - headSeq + 1 236 | } 237 | return 238 | } 239 | 240 | func (db *DB) lSetMeta(ek []byte, headSeq int32, tailSeq int32) int32 { 241 | t := db.listBatch 242 | 243 | var size int32 = tailSeq - headSeq + 1 244 | if size < 0 { 245 | // todo : log error + panic 246 | } else if size == 0 { 247 | t.Delete(ek) 248 | } else { 249 | buf := make([]byte, 8) 250 | 251 | binary.LittleEndian.PutUint32(buf[0:4], uint32(headSeq)) 252 | binary.LittleEndian.PutUint32(buf[4:8], uint32(tailSeq)) 253 | 254 | t.Put(ek, buf) 255 | } 256 | 257 | return size 258 | } 259 | 260 | func (db *DB) lExpireAt(key []byte, when int64) (int64, error) { 261 | t := db.listBatch 262 | t.Lock() 263 | defer t.Unlock() 264 | 265 | if llen, err := db.LLen(key); err != nil || llen == 0 { 266 | return 0, err 267 | } else { 268 | db.expireAt(t, ListType, key, when) 269 | if err := t.Commit(); err != nil { 270 | return 0, err 271 | } 272 | } 273 | return 1, nil 274 | } 275 | 276 | func (db *DB) LIndex(key []byte, index int32) ([]byte, error) { 277 | if err := checkKeySize(key); err != nil { 278 | return nil, err 279 | } 280 | 281 | var seq int32 282 | var headSeq int32 283 | var tailSeq int32 284 | var err error 285 | 286 | metaKey := db.lEncodeMetaKey(key) 287 | 288 | it := db.bucket.NewIterator() 289 | defer it.Close() 290 | 291 | headSeq, tailSeq, _, err = db.lGetMeta(it, metaKey) 292 | if err != nil { 293 | return nil, err 294 | } 295 | 296 | if index >= 0 { 297 | seq = headSeq + index 298 | } else { 299 | seq = tailSeq + index + 1 300 | } 301 | 302 | sk := db.lEncodeListKey(key, seq) 303 | v := it.Find(sk) 304 | 305 | return v, nil 306 | } 307 | 308 | func (db *DB) LLen(key []byte) (int64, error) { 309 | if err := checkKeySize(key); err != nil { 310 | return 0, err 311 | } 312 | 313 | ek := db.lEncodeMetaKey(key) 314 | _, _, size, err := db.lGetMeta(nil, ek) 315 | return int64(size), err 316 | } 317 | 318 | func (db *DB) LPop(key []byte) ([]byte, error) { 319 | return db.lpop(key, listHeadSeq) 320 | } 321 | 322 | func (db *DB) LPush(key []byte, arg1 []byte, args ...[]byte) (int64, error) { 323 | var argss = [][]byte{arg1} 324 | argss = append(argss, args...) 325 | return db.lpush(key, listHeadSeq, argss...) 326 | } 327 | 328 | func (db *DB) LRange(key []byte, start int32, stop int32) ([][]byte, error) { 329 | if err := checkKeySize(key); err != nil { 330 | return nil, err 331 | } 332 | 333 | var headSeq int32 334 | var llen int32 335 | var err error 336 | 337 | metaKey := db.lEncodeMetaKey(key) 338 | 339 | it := db.bucket.NewIterator() 340 | defer it.Close() 341 | 342 | if headSeq, _, llen, err = db.lGetMeta(it, metaKey); err != nil { 343 | return nil, err 344 | } 345 | 346 | if start < 0 { 347 | start = llen + start 348 | } 349 | if stop < 0 { 350 | stop = llen + stop 351 | } 352 | if start < 0 { 353 | start = 0 354 | } 355 | 356 | if start > stop || start >= llen { 357 | return [][]byte{}, nil 358 | } 359 | 360 | if stop >= llen { 361 | stop = llen - 1 362 | } 363 | 364 | limit := (stop - start) + 1 365 | headSeq += start 366 | 367 | v := make([][]byte, 0, limit) 368 | 369 | startKey := db.lEncodeListKey(key, headSeq) 370 | rit := store.NewRangeLimitIterator(it, 371 | &store.Range{ 372 | Min: startKey, 373 | Max: nil, 374 | Type: store.RangeClose}, 375 | &store.Limit{ 376 | Offset: 0, 377 | Count: int(limit)}) 378 | 379 | for ; rit.Valid(); rit.Next() { 380 | v = append(v, rit.Value()) 381 | } 382 | 383 | return v, nil 384 | } 385 | 386 | func (db *DB) RPop(key []byte) ([]byte, error) { 387 | return db.lpop(key, listTailSeq) 388 | } 389 | 390 | func (db *DB) RPush(key []byte, arg1 []byte, args ...[]byte) (int64, error) { 391 | var argss = [][]byte{arg1} 392 | argss = append(argss, args...) 393 | return db.lpush(key, listTailSeq, argss...) 394 | } 395 | 396 | func (db *DB) LClear(key []byte) (int64, error) { 397 | if err := checkKeySize(key); err != nil { 398 | return 0, err 399 | } 400 | 401 | t := db.listBatch 402 | t.Lock() 403 | defer t.Unlock() 404 | 405 | num := db.lDelete(t, key) 406 | db.rmExpire(t, ListType, key) 407 | 408 | err := t.Commit() 409 | return num, err 410 | } 411 | 412 | func (db *DB) LMclear(keys ...[]byte) (int64, error) { 413 | t := db.listBatch 414 | t.Lock() 415 | defer t.Unlock() 416 | 417 | for _, key := range keys { 418 | if err := checkKeySize(key); err != nil { 419 | return 0, err 420 | } 421 | 422 | db.lDelete(t, key) 423 | db.rmExpire(t, ListType, key) 424 | 425 | } 426 | 427 | err := t.Commit() 428 | return int64(len(keys)), err 429 | } 430 | 431 | func (db *DB) lFlush() (drop int64, err error) { 432 | t := db.listBatch 433 | t.Lock() 434 | defer t.Unlock() 435 | return db.flushType(t, ListType) 436 | } 437 | 438 | func (db *DB) LExpire(key []byte, duration int64) (int64, error) { 439 | if duration <= 0 { 440 | return 0, errExpireValue 441 | } 442 | 443 | return db.lExpireAt(key, time.Now().Unix()+duration) 444 | } 445 | 446 | func (db *DB) LExpireAt(key []byte, when int64) (int64, error) { 447 | if when <= time.Now().Unix() { 448 | return 0, errExpireValue 449 | } 450 | 451 | return db.lExpireAt(key, when) 452 | } 453 | 454 | func (db *DB) LTTL(key []byte) (int64, error) { 455 | if err := checkKeySize(key); err != nil { 456 | return -1, err 457 | } 458 | 459 | return db.ttl(ListType, key) 460 | } 461 | 462 | func (db *DB) LPersist(key []byte) (int64, error) { 463 | if err := checkKeySize(key); err != nil { 464 | return 0, err 465 | } 466 | 467 | t := db.listBatch 468 | t.Lock() 469 | defer t.Unlock() 470 | 471 | n, err := db.rmExpire(t, ListType, key) 472 | if err != nil { 473 | return 0, err 474 | } 475 | 476 | err = t.Commit() 477 | return n, err 478 | } 479 | 480 | func (db *DB) LScan(key []byte, count int, inclusive bool, match string) ([][]byte, error) { 481 | return db.scan(LMetaType, key, count, inclusive, match) 482 | } 483 | 484 | func (db *DB) lEncodeMinKey() []byte { 485 | return db.lEncodeMetaKey(nil) 486 | } 487 | 488 | func (db *DB) lEncodeMaxKey() []byte { 489 | ek := db.lEncodeMetaKey(nil) 490 | ek[len(ek)-1] = LMetaType + 1 491 | return ek 492 | } 493 | -------------------------------------------------------------------------------- /t_hash.go: -------------------------------------------------------------------------------- 1 | package nodb 2 | 3 | import ( 4 | "encoding/binary" 5 | "errors" 6 | "time" 7 | 8 | "github.com/lunny/nodb/store" 9 | ) 10 | 11 | type FVPair struct { 12 | Field []byte 13 | Value []byte 14 | } 15 | 16 | var errHashKey = errors.New("invalid hash key") 17 | var errHSizeKey = errors.New("invalid hsize key") 18 | 19 | const ( 20 | hashStartSep byte = ':' 21 | hashStopSep byte = hashStartSep + 1 22 | ) 23 | 24 | func checkHashKFSize(key []byte, field []byte) error { 25 | if len(key) > MaxKeySize || len(key) == 0 { 26 | return errKeySize 27 | } else if len(field) > MaxHashFieldSize || len(field) == 0 { 28 | return errHashFieldSize 29 | } 30 | return nil 31 | } 32 | 33 | func (db *DB) hEncodeSizeKey(key []byte) []byte { 34 | buf := make([]byte, len(key)+2) 35 | 36 | buf[0] = db.index 37 | buf[1] = HSizeType 38 | 39 | copy(buf[2:], key) 40 | return buf 41 | } 42 | 43 | func (db *DB) hDecodeSizeKey(ek []byte) ([]byte, error) { 44 | if len(ek) < 2 || ek[0] != db.index || ek[1] != HSizeType { 45 | return nil, errHSizeKey 46 | } 47 | 48 | return ek[2:], nil 49 | } 50 | 51 | func (db *DB) hEncodeHashKey(key []byte, field []byte) []byte { 52 | buf := make([]byte, len(key)+len(field)+1+1+2+1) 53 | 54 | pos := 0 55 | buf[pos] = db.index 56 | pos++ 57 | buf[pos] = HashType 58 | pos++ 59 | 60 | binary.BigEndian.PutUint16(buf[pos:], uint16(len(key))) 61 | pos += 2 62 | 63 | copy(buf[pos:], key) 64 | pos += len(key) 65 | 66 | buf[pos] = hashStartSep 67 | pos++ 68 | copy(buf[pos:], field) 69 | 70 | return buf 71 | } 72 | 73 | func (db *DB) hDecodeHashKey(ek []byte) ([]byte, []byte, error) { 74 | if len(ek) < 5 || ek[0] != db.index || ek[1] != HashType { 75 | return nil, nil, errHashKey 76 | } 77 | 78 | pos := 2 79 | keyLen := int(binary.BigEndian.Uint16(ek[pos:])) 80 | pos += 2 81 | 82 | if keyLen+5 > len(ek) { 83 | return nil, nil, errHashKey 84 | } 85 | 86 | key := ek[pos : pos+keyLen] 87 | pos += keyLen 88 | 89 | if ek[pos] != hashStartSep { 90 | return nil, nil, errHashKey 91 | } 92 | 93 | pos++ 94 | field := ek[pos:] 95 | return key, field, nil 96 | } 97 | 98 | func (db *DB) hEncodeStartKey(key []byte) []byte { 99 | return db.hEncodeHashKey(key, nil) 100 | } 101 | 102 | func (db *DB) hEncodeStopKey(key []byte) []byte { 103 | k := db.hEncodeHashKey(key, nil) 104 | 105 | k[len(k)-1] = hashStopSep 106 | 107 | return k 108 | } 109 | 110 | func (db *DB) hSetItem(key []byte, field []byte, value []byte) (int64, error) { 111 | t := db.hashBatch 112 | 113 | ek := db.hEncodeHashKey(key, field) 114 | 115 | var n int64 = 1 116 | if v, _ := db.bucket.Get(ek); v != nil { 117 | n = 0 118 | } else { 119 | if _, err := db.hIncrSize(key, 1); err != nil { 120 | return 0, err 121 | } 122 | } 123 | 124 | t.Put(ek, value) 125 | return n, nil 126 | } 127 | 128 | // ps : here just focus on deleting the hash data, 129 | // any other likes expire is ignore. 130 | func (db *DB) hDelete(t *batch, key []byte) int64 { 131 | sk := db.hEncodeSizeKey(key) 132 | start := db.hEncodeStartKey(key) 133 | stop := db.hEncodeStopKey(key) 134 | 135 | var num int64 = 0 136 | it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1) 137 | for ; it.Valid(); it.Next() { 138 | t.Delete(it.Key()) 139 | num++ 140 | } 141 | it.Close() 142 | 143 | t.Delete(sk) 144 | return num 145 | } 146 | 147 | func (db *DB) hExpireAt(key []byte, when int64) (int64, error) { 148 | t := db.hashBatch 149 | t.Lock() 150 | defer t.Unlock() 151 | 152 | if hlen, err := db.HLen(key); err != nil || hlen == 0 { 153 | return 0, err 154 | } else { 155 | db.expireAt(t, HashType, key, when) 156 | if err := t.Commit(); err != nil { 157 | return 0, err 158 | } 159 | } 160 | return 1, nil 161 | } 162 | 163 | func (db *DB) HLen(key []byte) (int64, error) { 164 | if err := checkKeySize(key); err != nil { 165 | return 0, err 166 | } 167 | 168 | return Int64(db.bucket.Get(db.hEncodeSizeKey(key))) 169 | } 170 | 171 | func (db *DB) HSet(key []byte, field []byte, value []byte) (int64, error) { 172 | if err := checkHashKFSize(key, field); err != nil { 173 | return 0, err 174 | } else if err := checkValueSize(value); err != nil { 175 | return 0, err 176 | } 177 | 178 | t := db.hashBatch 179 | t.Lock() 180 | defer t.Unlock() 181 | 182 | n, err := db.hSetItem(key, field, value) 183 | if err != nil { 184 | return 0, err 185 | } 186 | 187 | //todo add binlog 188 | 189 | err = t.Commit() 190 | return n, err 191 | } 192 | 193 | func (db *DB) HGet(key []byte, field []byte) ([]byte, error) { 194 | if err := checkHashKFSize(key, field); err != nil { 195 | return nil, err 196 | } 197 | 198 | return db.bucket.Get(db.hEncodeHashKey(key, field)) 199 | } 200 | 201 | func (db *DB) HMset(key []byte, args ...FVPair) error { 202 | t := db.hashBatch 203 | t.Lock() 204 | defer t.Unlock() 205 | 206 | var err error 207 | var ek []byte 208 | var num int64 = 0 209 | for i := 0; i < len(args); i++ { 210 | if err := checkHashKFSize(key, args[i].Field); err != nil { 211 | return err 212 | } else if err := checkValueSize(args[i].Value); err != nil { 213 | return err 214 | } 215 | 216 | ek = db.hEncodeHashKey(key, args[i].Field) 217 | 218 | if v, err := db.bucket.Get(ek); err != nil { 219 | return err 220 | } else if v == nil { 221 | num++ 222 | } 223 | 224 | t.Put(ek, args[i].Value) 225 | } 226 | 227 | if _, err = db.hIncrSize(key, num); err != nil { 228 | return err 229 | } 230 | 231 | //todo add binglog 232 | err = t.Commit() 233 | return err 234 | } 235 | 236 | func (db *DB) HMget(key []byte, args ...[]byte) ([][]byte, error) { 237 | var ek []byte 238 | 239 | it := db.bucket.NewIterator() 240 | defer it.Close() 241 | 242 | r := make([][]byte, len(args)) 243 | for i := 0; i < len(args); i++ { 244 | if err := checkHashKFSize(key, args[i]); err != nil { 245 | return nil, err 246 | } 247 | 248 | ek = db.hEncodeHashKey(key, args[i]) 249 | 250 | r[i] = it.Find(ek) 251 | } 252 | 253 | return r, nil 254 | } 255 | 256 | func (db *DB) HDel(key []byte, args ...[]byte) (int64, error) { 257 | t := db.hashBatch 258 | 259 | var ek []byte 260 | var v []byte 261 | var err error 262 | 263 | t.Lock() 264 | defer t.Unlock() 265 | 266 | it := db.bucket.NewIterator() 267 | defer it.Close() 268 | 269 | var num int64 = 0 270 | for i := 0; i < len(args); i++ { 271 | if err := checkHashKFSize(key, args[i]); err != nil { 272 | return 0, err 273 | } 274 | 275 | ek = db.hEncodeHashKey(key, args[i]) 276 | 277 | v = it.RawFind(ek) 278 | if v == nil { 279 | continue 280 | } else { 281 | num++ 282 | t.Delete(ek) 283 | } 284 | } 285 | 286 | if _, err = db.hIncrSize(key, -num); err != nil { 287 | return 0, err 288 | } 289 | 290 | err = t.Commit() 291 | 292 | return num, err 293 | } 294 | 295 | func (db *DB) hIncrSize(key []byte, delta int64) (int64, error) { 296 | t := db.hashBatch 297 | sk := db.hEncodeSizeKey(key) 298 | 299 | var err error 300 | var size int64 = 0 301 | if size, err = Int64(db.bucket.Get(sk)); err != nil { 302 | return 0, err 303 | } else { 304 | size += delta 305 | if size <= 0 { 306 | size = 0 307 | t.Delete(sk) 308 | db.rmExpire(t, HashType, key) 309 | } else { 310 | t.Put(sk, PutInt64(size)) 311 | } 312 | } 313 | 314 | return size, nil 315 | } 316 | 317 | func (db *DB) HIncrBy(key []byte, field []byte, delta int64) (int64, error) { 318 | if err := checkHashKFSize(key, field); err != nil { 319 | return 0, err 320 | } 321 | 322 | t := db.hashBatch 323 | var ek []byte 324 | var err error 325 | 326 | t.Lock() 327 | defer t.Unlock() 328 | 329 | ek = db.hEncodeHashKey(key, field) 330 | 331 | var n int64 = 0 332 | if n, err = StrInt64(db.bucket.Get(ek)); err != nil { 333 | return 0, err 334 | } 335 | 336 | n += delta 337 | 338 | _, err = db.hSetItem(key, field, StrPutInt64(n)) 339 | if err != nil { 340 | return 0, err 341 | } 342 | 343 | err = t.Commit() 344 | 345 | return n, err 346 | } 347 | 348 | func (db *DB) HGetAll(key []byte) ([]FVPair, error) { 349 | if err := checkKeySize(key); err != nil { 350 | return nil, err 351 | } 352 | 353 | start := db.hEncodeStartKey(key) 354 | stop := db.hEncodeStopKey(key) 355 | 356 | v := make([]FVPair, 0, 16) 357 | 358 | it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1) 359 | for ; it.Valid(); it.Next() { 360 | _, f, err := db.hDecodeHashKey(it.Key()) 361 | if err != nil { 362 | return nil, err 363 | } 364 | 365 | v = append(v, FVPair{Field: f, Value: it.Value()}) 366 | } 367 | 368 | it.Close() 369 | 370 | return v, nil 371 | } 372 | 373 | func (db *DB) HKeys(key []byte) ([][]byte, error) { 374 | if err := checkKeySize(key); err != nil { 375 | return nil, err 376 | } 377 | 378 | start := db.hEncodeStartKey(key) 379 | stop := db.hEncodeStopKey(key) 380 | 381 | v := make([][]byte, 0, 16) 382 | 383 | it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1) 384 | for ; it.Valid(); it.Next() { 385 | _, f, err := db.hDecodeHashKey(it.Key()) 386 | if err != nil { 387 | return nil, err 388 | } 389 | v = append(v, f) 390 | } 391 | 392 | it.Close() 393 | 394 | return v, nil 395 | } 396 | 397 | func (db *DB) HValues(key []byte) ([][]byte, error) { 398 | if err := checkKeySize(key); err != nil { 399 | return nil, err 400 | } 401 | 402 | start := db.hEncodeStartKey(key) 403 | stop := db.hEncodeStopKey(key) 404 | 405 | v := make([][]byte, 0, 16) 406 | 407 | it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1) 408 | for ; it.Valid(); it.Next() { 409 | _, _, err := db.hDecodeHashKey(it.Key()) 410 | if err != nil { 411 | return nil, err 412 | } 413 | 414 | v = append(v, it.Value()) 415 | } 416 | 417 | it.Close() 418 | 419 | return v, nil 420 | } 421 | 422 | func (db *DB) HClear(key []byte) (int64, error) { 423 | if err := checkKeySize(key); err != nil { 424 | return 0, err 425 | } 426 | 427 | t := db.hashBatch 428 | t.Lock() 429 | defer t.Unlock() 430 | 431 | num := db.hDelete(t, key) 432 | db.rmExpire(t, HashType, key) 433 | 434 | err := t.Commit() 435 | return num, err 436 | } 437 | 438 | func (db *DB) HMclear(keys ...[]byte) (int64, error) { 439 | t := db.hashBatch 440 | t.Lock() 441 | defer t.Unlock() 442 | 443 | for _, key := range keys { 444 | if err := checkKeySize(key); err != nil { 445 | return 0, err 446 | } 447 | 448 | db.hDelete(t, key) 449 | db.rmExpire(t, HashType, key) 450 | } 451 | 452 | err := t.Commit() 453 | return int64(len(keys)), err 454 | } 455 | 456 | func (db *DB) hFlush() (drop int64, err error) { 457 | t := db.hashBatch 458 | 459 | t.Lock() 460 | defer t.Unlock() 461 | 462 | return db.flushType(t, HashType) 463 | } 464 | 465 | func (db *DB) HScan(key []byte, count int, inclusive bool, match string) ([][]byte, error) { 466 | return db.scan(HSizeType, key, count, inclusive, match) 467 | } 468 | 469 | func (db *DB) HExpire(key []byte, duration int64) (int64, error) { 470 | if duration <= 0 { 471 | return 0, errExpireValue 472 | } 473 | 474 | return db.hExpireAt(key, time.Now().Unix()+duration) 475 | } 476 | 477 | func (db *DB) HExpireAt(key []byte, when int64) (int64, error) { 478 | if when <= time.Now().Unix() { 479 | return 0, errExpireValue 480 | } 481 | 482 | return db.hExpireAt(key, when) 483 | } 484 | 485 | func (db *DB) HTTL(key []byte) (int64, error) { 486 | if err := checkKeySize(key); err != nil { 487 | return -1, err 488 | } 489 | 490 | return db.ttl(HashType, key) 491 | } 492 | 493 | func (db *DB) HPersist(key []byte) (int64, error) { 494 | if err := checkKeySize(key); err != nil { 495 | return 0, err 496 | } 497 | 498 | t := db.hashBatch 499 | t.Lock() 500 | defer t.Unlock() 501 | 502 | n, err := db.rmExpire(t, HashType, key) 503 | if err != nil { 504 | return 0, err 505 | } 506 | 507 | err = t.Commit() 508 | return n, err 509 | } 510 | -------------------------------------------------------------------------------- /t_set.go: -------------------------------------------------------------------------------- 1 | package nodb 2 | 3 | import ( 4 | "encoding/binary" 5 | "errors" 6 | "time" 7 | 8 | "github.com/lunny/nodb/store" 9 | ) 10 | 11 | var errSetKey = errors.New("invalid set key") 12 | var errSSizeKey = errors.New("invalid ssize key") 13 | 14 | const ( 15 | setStartSep byte = ':' 16 | setStopSep byte = setStartSep + 1 17 | UnionType byte = 51 18 | DiffType byte = 52 19 | InterType byte = 53 20 | ) 21 | 22 | func checkSetKMSize(key []byte, member []byte) error { 23 | if len(key) > MaxKeySize || len(key) == 0 { 24 | return errKeySize 25 | } else if len(member) > MaxSetMemberSize || len(member) == 0 { 26 | return errSetMemberSize 27 | } 28 | return nil 29 | } 30 | 31 | func (db *DB) sEncodeSizeKey(key []byte) []byte { 32 | buf := make([]byte, len(key)+2) 33 | 34 | buf[0] = db.index 35 | buf[1] = SSizeType 36 | 37 | copy(buf[2:], key) 38 | return buf 39 | } 40 | 41 | func (db *DB) sDecodeSizeKey(ek []byte) ([]byte, error) { 42 | if len(ek) < 2 || ek[0] != db.index || ek[1] != SSizeType { 43 | return nil, errSSizeKey 44 | } 45 | 46 | return ek[2:], nil 47 | } 48 | 49 | func (db *DB) sEncodeSetKey(key []byte, member []byte) []byte { 50 | buf := make([]byte, len(key)+len(member)+1+1+2+1) 51 | 52 | pos := 0 53 | buf[pos] = db.index 54 | pos++ 55 | buf[pos] = SetType 56 | pos++ 57 | 58 | binary.BigEndian.PutUint16(buf[pos:], uint16(len(key))) 59 | pos += 2 60 | 61 | copy(buf[pos:], key) 62 | pos += len(key) 63 | 64 | buf[pos] = setStartSep 65 | pos++ 66 | copy(buf[pos:], member) 67 | 68 | return buf 69 | } 70 | 71 | func (db *DB) sDecodeSetKey(ek []byte) ([]byte, []byte, error) { 72 | if len(ek) < 5 || ek[0] != db.index || ek[1] != SetType { 73 | return nil, nil, errSetKey 74 | } 75 | 76 | pos := 2 77 | keyLen := int(binary.BigEndian.Uint16(ek[pos:])) 78 | pos += 2 79 | 80 | if keyLen+5 > len(ek) { 81 | return nil, nil, errSetKey 82 | } 83 | 84 | key := ek[pos : pos+keyLen] 85 | pos += keyLen 86 | 87 | if ek[pos] != hashStartSep { 88 | return nil, nil, errSetKey 89 | } 90 | 91 | pos++ 92 | member := ek[pos:] 93 | return key, member, nil 94 | } 95 | 96 | func (db *DB) sEncodeStartKey(key []byte) []byte { 97 | return db.sEncodeSetKey(key, nil) 98 | } 99 | 100 | func (db *DB) sEncodeStopKey(key []byte) []byte { 101 | k := db.sEncodeSetKey(key, nil) 102 | 103 | k[len(k)-1] = setStopSep 104 | 105 | return k 106 | } 107 | 108 | func (db *DB) sFlush() (drop int64, err error) { 109 | 110 | t := db.setBatch 111 | t.Lock() 112 | defer t.Unlock() 113 | 114 | return db.flushType(t, SetType) 115 | } 116 | 117 | func (db *DB) sDelete(t *batch, key []byte) int64 { 118 | sk := db.sEncodeSizeKey(key) 119 | start := db.sEncodeStartKey(key) 120 | stop := db.sEncodeStopKey(key) 121 | 122 | var num int64 = 0 123 | it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1) 124 | for ; it.Valid(); it.Next() { 125 | t.Delete(it.RawKey()) 126 | num++ 127 | } 128 | 129 | it.Close() 130 | 131 | t.Delete(sk) 132 | return num 133 | } 134 | 135 | func (db *DB) sIncrSize(key []byte, delta int64) (int64, error) { 136 | t := db.setBatch 137 | sk := db.sEncodeSizeKey(key) 138 | 139 | var err error 140 | var size int64 = 0 141 | if size, err = Int64(db.bucket.Get(sk)); err != nil { 142 | return 0, err 143 | } else { 144 | size += delta 145 | if size <= 0 { 146 | size = 0 147 | t.Delete(sk) 148 | db.rmExpire(t, SetType, key) 149 | } else { 150 | t.Put(sk, PutInt64(size)) 151 | } 152 | } 153 | 154 | return size, nil 155 | } 156 | 157 | func (db *DB) sExpireAt(key []byte, when int64) (int64, error) { 158 | t := db.setBatch 159 | t.Lock() 160 | defer t.Unlock() 161 | 162 | if scnt, err := db.SCard(key); err != nil || scnt == 0 { 163 | return 0, err 164 | } else { 165 | db.expireAt(t, SetType, key, when) 166 | if err := t.Commit(); err != nil { 167 | return 0, err 168 | } 169 | 170 | } 171 | 172 | return 1, nil 173 | } 174 | 175 | func (db *DB) sSetItem(key []byte, member []byte) (int64, error) { 176 | t := db.setBatch 177 | ek := db.sEncodeSetKey(key, member) 178 | 179 | var n int64 = 1 180 | if v, _ := db.bucket.Get(ek); v != nil { 181 | n = 0 182 | } else { 183 | if _, err := db.sIncrSize(key, 1); err != nil { 184 | return 0, err 185 | } 186 | } 187 | 188 | t.Put(ek, nil) 189 | return n, nil 190 | } 191 | 192 | func (db *DB) SAdd(key []byte, args ...[]byte) (int64, error) { 193 | t := db.setBatch 194 | t.Lock() 195 | defer t.Unlock() 196 | 197 | var err error 198 | var ek []byte 199 | var num int64 = 0 200 | for i := 0; i < len(args); i++ { 201 | if err := checkSetKMSize(key, args[i]); err != nil { 202 | return 0, err 203 | } 204 | 205 | ek = db.sEncodeSetKey(key, args[i]) 206 | 207 | if v, err := db.bucket.Get(ek); err != nil { 208 | return 0, err 209 | } else if v == nil { 210 | num++ 211 | } 212 | 213 | t.Put(ek, nil) 214 | } 215 | 216 | if _, err = db.sIncrSize(key, num); err != nil { 217 | return 0, err 218 | } 219 | 220 | err = t.Commit() 221 | return num, err 222 | 223 | } 224 | 225 | func (db *DB) SCard(key []byte) (int64, error) { 226 | if err := checkKeySize(key); err != nil { 227 | return 0, err 228 | } 229 | 230 | sk := db.sEncodeSizeKey(key) 231 | 232 | return Int64(db.bucket.Get(sk)) 233 | } 234 | 235 | func (db *DB) sDiffGeneric(keys ...[]byte) ([][]byte, error) { 236 | destMap := make(map[string]bool) 237 | 238 | members, err := db.SMembers(keys[0]) 239 | if err != nil { 240 | return nil, err 241 | } 242 | 243 | for _, m := range members { 244 | destMap[String(m)] = true 245 | } 246 | 247 | for _, k := range keys[1:] { 248 | members, err := db.SMembers(k) 249 | if err != nil { 250 | return nil, err 251 | } 252 | 253 | for _, m := range members { 254 | if _, ok := destMap[String(m)]; !ok { 255 | continue 256 | } else if ok { 257 | delete(destMap, String(m)) 258 | } 259 | } 260 | // O - A = O, O is zero set. 261 | if len(destMap) == 0 { 262 | return nil, nil 263 | } 264 | } 265 | 266 | slice := make([][]byte, len(destMap)) 267 | idx := 0 268 | for k, v := range destMap { 269 | if !v { 270 | continue 271 | } 272 | slice[idx] = []byte(k) 273 | idx++ 274 | } 275 | 276 | return slice, nil 277 | } 278 | 279 | func (db *DB) SDiff(keys ...[]byte) ([][]byte, error) { 280 | v, err := db.sDiffGeneric(keys...) 281 | return v, err 282 | } 283 | 284 | func (db *DB) SDiffStore(dstKey []byte, keys ...[]byte) (int64, error) { 285 | n, err := db.sStoreGeneric(dstKey, DiffType, keys...) 286 | return n, err 287 | } 288 | 289 | func (db *DB) sInterGeneric(keys ...[]byte) ([][]byte, error) { 290 | destMap := make(map[string]bool) 291 | 292 | members, err := db.SMembers(keys[0]) 293 | if err != nil { 294 | return nil, err 295 | } 296 | 297 | for _, m := range members { 298 | destMap[String(m)] = true 299 | } 300 | 301 | for _, key := range keys[1:] { 302 | if err := checkKeySize(key); err != nil { 303 | return nil, err 304 | } 305 | 306 | members, err := db.SMembers(key) 307 | if err != nil { 308 | return nil, err 309 | } else if len(members) == 0 { 310 | return nil, err 311 | } 312 | 313 | tempMap := make(map[string]bool) 314 | for _, member := range members { 315 | if err := checkKeySize(member); err != nil { 316 | return nil, err 317 | } 318 | if _, ok := destMap[String(member)]; ok { 319 | tempMap[String(member)] = true //mark this item as selected 320 | } 321 | } 322 | destMap = tempMap //reduce the size of the result set 323 | if len(destMap) == 0 { 324 | return nil, nil 325 | } 326 | } 327 | 328 | slice := make([][]byte, len(destMap)) 329 | idx := 0 330 | for k, v := range destMap { 331 | if !v { 332 | continue 333 | } 334 | 335 | slice[idx] = []byte(k) 336 | idx++ 337 | } 338 | 339 | return slice, nil 340 | 341 | } 342 | 343 | func (db *DB) SInter(keys ...[]byte) ([][]byte, error) { 344 | v, err := db.sInterGeneric(keys...) 345 | return v, err 346 | 347 | } 348 | 349 | func (db *DB) SInterStore(dstKey []byte, keys ...[]byte) (int64, error) { 350 | n, err := db.sStoreGeneric(dstKey, InterType, keys...) 351 | return n, err 352 | } 353 | 354 | func (db *DB) SIsMember(key []byte, member []byte) (int64, error) { 355 | ek := db.sEncodeSetKey(key, member) 356 | 357 | var n int64 = 1 358 | if v, err := db.bucket.Get(ek); err != nil { 359 | return 0, err 360 | } else if v == nil { 361 | n = 0 362 | } 363 | return n, nil 364 | } 365 | 366 | func (db *DB) SMembers(key []byte) ([][]byte, error) { 367 | if err := checkKeySize(key); err != nil { 368 | return nil, err 369 | } 370 | 371 | start := db.sEncodeStartKey(key) 372 | stop := db.sEncodeStopKey(key) 373 | 374 | v := make([][]byte, 0, 16) 375 | 376 | it := db.bucket.RangeLimitIterator(start, stop, store.RangeROpen, 0, -1) 377 | for ; it.Valid(); it.Next() { 378 | _, m, err := db.sDecodeSetKey(it.Key()) 379 | if err != nil { 380 | return nil, err 381 | } 382 | 383 | v = append(v, m) 384 | } 385 | 386 | it.Close() 387 | 388 | return v, nil 389 | } 390 | 391 | func (db *DB) SRem(key []byte, args ...[]byte) (int64, error) { 392 | t := db.setBatch 393 | t.Lock() 394 | defer t.Unlock() 395 | 396 | var ek []byte 397 | var v []byte 398 | var err error 399 | 400 | it := db.bucket.NewIterator() 401 | defer it.Close() 402 | 403 | var num int64 = 0 404 | for i := 0; i < len(args); i++ { 405 | if err := checkSetKMSize(key, args[i]); err != nil { 406 | return 0, err 407 | } 408 | 409 | ek = db.sEncodeSetKey(key, args[i]) 410 | 411 | v = it.RawFind(ek) 412 | if v == nil { 413 | continue 414 | } else { 415 | num++ 416 | t.Delete(ek) 417 | } 418 | } 419 | 420 | if _, err = db.sIncrSize(key, -num); err != nil { 421 | return 0, err 422 | } 423 | 424 | err = t.Commit() 425 | return num, err 426 | 427 | } 428 | 429 | func (db *DB) sUnionGeneric(keys ...[]byte) ([][]byte, error) { 430 | dstMap := make(map[string]bool) 431 | 432 | for _, key := range keys { 433 | if err := checkKeySize(key); err != nil { 434 | return nil, err 435 | } 436 | 437 | members, err := db.SMembers(key) 438 | if err != nil { 439 | return nil, err 440 | } 441 | 442 | for _, member := range members { 443 | dstMap[String(member)] = true 444 | } 445 | } 446 | 447 | slice := make([][]byte, len(dstMap)) 448 | idx := 0 449 | for k, v := range dstMap { 450 | if !v { 451 | continue 452 | } 453 | slice[idx] = []byte(k) 454 | idx++ 455 | } 456 | 457 | return slice, nil 458 | } 459 | 460 | func (db *DB) SUnion(keys ...[]byte) ([][]byte, error) { 461 | v, err := db.sUnionGeneric(keys...) 462 | return v, err 463 | } 464 | 465 | func (db *DB) SUnionStore(dstKey []byte, keys ...[]byte) (int64, error) { 466 | n, err := db.sStoreGeneric(dstKey, UnionType, keys...) 467 | return n, err 468 | } 469 | 470 | func (db *DB) sStoreGeneric(dstKey []byte, optType byte, keys ...[]byte) (int64, error) { 471 | if err := checkKeySize(dstKey); err != nil { 472 | return 0, err 473 | } 474 | 475 | t := db.setBatch 476 | t.Lock() 477 | defer t.Unlock() 478 | 479 | db.sDelete(t, dstKey) 480 | 481 | var err error 482 | var ek []byte 483 | var v [][]byte 484 | 485 | switch optType { 486 | case UnionType: 487 | v, err = db.sUnionGeneric(keys...) 488 | case DiffType: 489 | v, err = db.sDiffGeneric(keys...) 490 | case InterType: 491 | v, err = db.sInterGeneric(keys...) 492 | } 493 | 494 | if err != nil { 495 | return 0, err 496 | } 497 | 498 | for _, m := range v { 499 | if err := checkSetKMSize(dstKey, m); err != nil { 500 | return 0, err 501 | } 502 | 503 | ek = db.sEncodeSetKey(dstKey, m) 504 | 505 | if _, err := db.bucket.Get(ek); err != nil { 506 | return 0, err 507 | } 508 | 509 | t.Put(ek, nil) 510 | } 511 | 512 | var num = int64(len(v)) 513 | sk := db.sEncodeSizeKey(dstKey) 514 | t.Put(sk, PutInt64(num)) 515 | 516 | if err = t.Commit(); err != nil { 517 | return 0, err 518 | } 519 | return num, nil 520 | } 521 | 522 | func (db *DB) SClear(key []byte) (int64, error) { 523 | if err := checkKeySize(key); err != nil { 524 | return 0, err 525 | } 526 | 527 | t := db.setBatch 528 | t.Lock() 529 | defer t.Unlock() 530 | 531 | num := db.sDelete(t, key) 532 | db.rmExpire(t, SetType, key) 533 | 534 | err := t.Commit() 535 | return num, err 536 | } 537 | 538 | func (db *DB) SMclear(keys ...[]byte) (int64, error) { 539 | t := db.setBatch 540 | t.Lock() 541 | defer t.Unlock() 542 | 543 | for _, key := range keys { 544 | if err := checkKeySize(key); err != nil { 545 | return 0, err 546 | } 547 | 548 | db.sDelete(t, key) 549 | db.rmExpire(t, SetType, key) 550 | } 551 | 552 | err := t.Commit() 553 | return int64(len(keys)), err 554 | } 555 | 556 | func (db *DB) SExpire(key []byte, duration int64) (int64, error) { 557 | if duration <= 0 { 558 | return 0, errExpireValue 559 | } 560 | 561 | return db.sExpireAt(key, time.Now().Unix()+duration) 562 | 563 | } 564 | 565 | func (db *DB) SExpireAt(key []byte, when int64) (int64, error) { 566 | if when <= time.Now().Unix() { 567 | return 0, errExpireValue 568 | } 569 | 570 | return db.sExpireAt(key, when) 571 | 572 | } 573 | 574 | func (db *DB) STTL(key []byte) (int64, error) { 575 | if err := checkKeySize(key); err != nil { 576 | return -1, err 577 | } 578 | 579 | return db.ttl(SetType, key) 580 | } 581 | 582 | func (db *DB) SPersist(key []byte) (int64, error) { 583 | if err := checkKeySize(key); err != nil { 584 | return 0, err 585 | } 586 | 587 | t := db.setBatch 588 | t.Lock() 589 | defer t.Unlock() 590 | 591 | n, err := db.rmExpire(t, SetType, key) 592 | if err != nil { 593 | return 0, err 594 | } 595 | err = t.Commit() 596 | return n, err 597 | } 598 | 599 | func (db *DB) SScan(key []byte, count int, inclusive bool, match string) ([][]byte, error) { 600 | return db.scan(SSizeType, key, count, inclusive, match) 601 | } 602 | --------------------------------------------------------------------------------