├── Cluster ├── clusteredBigCache.go ├── clusteredBigCache_test.go ├── config.go ├── remoteNode.go └── remoteNode_test.go ├── Gopkg.lock ├── Gopkg.toml ├── LICENSE ├── README.md ├── bigcache ├── bigcache.go ├── clock.go ├── config.go ├── encoding.go ├── entry_not_found_error.go ├── fnv.go ├── hash.go ├── iterator.go ├── logger.go ├── queue │ ├── bytes_queue.go │ ├── freeList.go │ └── freeList_test.go ├── shard.go ├── stats.go ├── ttl.go └── utils.go ├── comms └── connection.go ├── message ├── defs.go ├── deleteMessage.go ├── getReqMessage.go ├── getRspMessage.go ├── messages_test.go ├── pingMessage.go ├── pongMessage.go ├── putMessage.go ├── syncReqMessage.go ├── syncRspMessage.go ├── verifyMessge.go └── verifyOK.go ├── test └── cache_test.go └── utils ├── defs.go ├── loggerWrapper.go ├── sliceList.go ├── sliceList_test.go ├── testUtils.go └── utils.go /Cluster/clusteredBigCache.go: -------------------------------------------------------------------------------- 1 | package clusteredBigCache 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "net" 7 | "strconv" 8 | "sync" 9 | 10 | "github.com/oaStuff/clusteredBigCache/bigcache" 11 | "github.com/oaStuff/clusteredBigCache/comms" 12 | "github.com/oaStuff/clusteredBigCache/message" 13 | "github.com/oaStuff/clusteredBigCache/utils" 14 | "time" 15 | ) 16 | 17 | //Constants used to specify various conditions and state within the system 18 | const ( 19 | REPLICATION_MODE_FULL_REPLICATE byte = iota 20 | REPLICATION_MODE_SHARD 21 | 22 | clusterStateStarting byte = iota 23 | clusterStateStarted 24 | clusterStateEnded 25 | 26 | clusterModeACTIVE byte = iota 27 | clusterModePASSIVE 28 | ) 29 | 30 | //CHAN_SIZE for all defined channels 31 | const CHAN_SIZE = 1024 * 64 32 | 33 | //Global errors that can be returned to callers 34 | var ( 35 | ErrNotEnoughReplica = errors.New("not enough replica") 36 | ErrNotFound = errors.New("data not found") 37 | ErrTimedOut = errors.New("not found as a result of timing out") 38 | ErrNotStarted = errors.New("node not started, call Start()") 39 | ) 40 | 41 | //ClusteredBigCacheConfig is configuration for the cache 42 | type ClusteredBigCacheConfig struct { 43 | Id string `json:"id"` 44 | Join bool `json:"join"` 45 | JoinIp string `json:"join_ip"` 46 | LocalAddresses []string `json:"local_addresses"` 47 | LocalPort int `json:"local_port"` 48 | BindAll bool `json:"bind_all"` 49 | ConnectRetries int `json:"connect_retries"` 50 | TerminateOnListenerExit bool `json:"terminate_on_listener_exit"` 51 | ReplicationMode byte `json:"replication_mode"` 52 | ReplicationFactor int `json:"replication_factor"` 53 | WriteAck bool `json:"write_ack"` 54 | DebugMode bool `json:"debug_mode"` 55 | DebugPort int `json:"debug_port"` 56 | ReconnectOnDisconnect bool `json:"reconnect_on_disconnect"` 57 | PingFailureThreshHold int32 `json:"ping_failure_thresh_hold"` 58 | PingInterval int `json:"ping_interval"` 59 | PingTimeout int `json:"ping_timeout"` 60 | ShardSize int `json:"shard_size"` 61 | } 62 | 63 | //ClusteredBigCache definition 64 | type ClusteredBigCache struct { 65 | config *ClusteredBigCacheConfig 66 | cache *bigcache.BigCache 67 | remoteNodes *utils.SliceList 68 | logger utils.AppLogger 69 | serverEndpoint net.Listener 70 | joinQueue chan *message.ProposedPeer 71 | pendingConn sync.Map 72 | nodeIndex int 73 | getRequestChan chan *getRequestDataWrapper 74 | replicationChan chan *replicationMsg 75 | state byte 76 | mode byte 77 | } 78 | 79 | //New creates a new local node 80 | func New(config *ClusteredBigCacheConfig, logger utils.AppLogger) *ClusteredBigCache { 81 | 82 | cfg := bigcache.DefaultConfig() 83 | if config.ShardSize < 16 { 84 | cfg.Shards = 16 85 | } else { 86 | cfg.Shards = config.ShardSize 87 | } 88 | cache, err := bigcache.NewBigCache(cfg) 89 | if err != nil { 90 | panic(err) 91 | } 92 | 93 | return &ClusteredBigCache{ 94 | config: config, 95 | cache: cache, 96 | remoteNodes: utils.NewSliceList(), 97 | logger: logger, 98 | joinQueue: make(chan *message.ProposedPeer, 512), 99 | pendingConn: sync.Map{}, 100 | nodeIndex: 0, 101 | getRequestChan: make(chan *getRequestDataWrapper, CHAN_SIZE), 102 | replicationChan: make(chan *replicationMsg, CHAN_SIZE), 103 | state: clusterStateStarting, 104 | mode: clusterModeACTIVE, 105 | } 106 | } 107 | 108 | //NewPassiveClient creates a new local node that does not store any data locally 109 | func NewPassiveClient(id string, serverEndpoint string, localPort, pingInterval, pingTimeout int, pingFailureThreashold int32, logger utils.AppLogger) *ClusteredBigCache { 110 | 111 | config := DefaultClusterConfig() 112 | config.Id = id 113 | config.Join = true 114 | config.JoinIp = serverEndpoint 115 | config.ReconnectOnDisconnect = true 116 | config.LocalPort = localPort 117 | config.PingInterval = pingInterval 118 | config.PingTimeout = pingTimeout 119 | config.PingFailureThreshHold = pingFailureThreashold 120 | 121 | return &ClusteredBigCache{ 122 | config: config, 123 | cache: nil, 124 | remoteNodes: utils.NewSliceList(), 125 | logger: logger, 126 | joinQueue: make(chan *message.ProposedPeer, 512), 127 | pendingConn: sync.Map{}, 128 | nodeIndex: 0, 129 | getRequestChan: make(chan *getRequestDataWrapper, CHAN_SIZE), 130 | replicationChan: make(chan *replicationMsg, CHAN_SIZE), 131 | state: clusterStateStarting, 132 | mode: clusterModePASSIVE, 133 | } 134 | } 135 | 136 | //check configuration values 137 | func (node *ClusteredBigCache) checkConfig() { 138 | if node.config.LocalPort < 1 { 139 | panic("Local port can not be zero.") 140 | } 141 | 142 | if node.config.ConnectRetries < 1 { 143 | node.config.ConnectRetries = 5 144 | } 145 | 146 | node.config.ReplicationMode = REPLICATION_MODE_FULL_REPLICATE 147 | 148 | } 149 | 150 | func (node *ClusteredBigCache) setReplicationFactor(rf int) { 151 | if rf < 1 { 152 | rf = 1 153 | } 154 | 155 | node.config.ReplicationFactor = rf 156 | } 157 | 158 | //Start this Cluster running 159 | func (node *ClusteredBigCache) Start() error { 160 | 161 | for x := 0; x < 5; x++ { 162 | go node.requestSenderForGET() 163 | go node.replication() 164 | } 165 | 166 | node.checkConfig() 167 | if "" == node.config.Id { 168 | node.config.Id = utils.GenerateNodeId(32) 169 | } 170 | utils.Info(node.logger, "cluster node ID is "+node.config.Id) 171 | 172 | if err := node.bringNodeUp(); err != nil { 173 | return err 174 | } 175 | 176 | go node.connectToExistingNodes() 177 | if true == node.config.Join { //we are to join an existing cluster 178 | if err := node.joinCluster(); err != nil { 179 | return err 180 | } 181 | } 182 | 183 | node.state = clusterStateStarted 184 | time.Sleep(time.Millisecond * 200) //allow things to start up 185 | return nil 186 | } 187 | 188 | //ShutDown shuts this Cluster and all terminate all connections to remoteNodes 189 | func (node *ClusteredBigCache) ShutDown() { 190 | 191 | node.state = clusterStateEnded 192 | for _, v := range node.remoteNodes.Values() { 193 | rn := v.(*remoteNode) 194 | rn.config.ReconnectOnDisconnect = false 195 | rn.tearDown() 196 | } 197 | 198 | close(node.joinQueue) 199 | close(node.getRequestChan) 200 | close(node.replicationChan) 201 | 202 | if node.serverEndpoint != nil { 203 | node.serverEndpoint.Close() 204 | } 205 | } 206 | 207 | //join an existing cluster 208 | func (node *ClusteredBigCache) joinCluster() error { 209 | if "" == node.config.JoinIp { 210 | utils.Critical(node.logger, "the server's IP to join can not be empty.") 211 | return errors.New("the server's IP to join can not be empty since Join is true, there must be a JoinIP") 212 | } 213 | 214 | remoteNode := newRemoteNode(&remoteNodeConfig{IpAddress: node.config.JoinIp, 215 | ConnectRetries: node.config.ConnectRetries, 216 | Sync: true, ReconnectOnDisconnect: node.config.ReconnectOnDisconnect, 217 | PingInterval: node.config.PingInterval, 218 | PingTimeout: node.config.PingTimeout, 219 | PingFailureThreshHold: node.config.PingFailureThreshHold}, 220 | node, node.logger) 221 | remoteNode.join() 222 | return nil 223 | } 224 | 225 | //bring up this Cluster 226 | func (node *ClusteredBigCache) bringNodeUp() error { 227 | 228 | var err error 229 | utils.Info(node.logger, "bringing up node "+node.config.Id) 230 | node.serverEndpoint, err = net.Listen("tcp", ":"+strconv.Itoa(node.config.LocalPort)) 231 | if err != nil { 232 | utils.Error(node.logger, fmt.Sprintf("unable to Listen on port %d. [%s]", node.config.LocalPort, err.Error())) 233 | return err 234 | } 235 | 236 | go node.listen() 237 | return nil 238 | } 239 | 240 | //event function used by remoteNode to announce the disconnection of itself 241 | func (node *ClusteredBigCache) eventRemoteNodeDisconneced(r *remoteNode) { 242 | 243 | node.remoteNodes.Remove(r.config.Id) 244 | } 245 | 246 | //util function to return all know remoteNodes 247 | func (node *ClusteredBigCache) getRemoteNodes() []interface{} { 248 | 249 | return node.remoteNodes.Values() 250 | } 251 | 252 | //event function used by remoteNode to verify itself 253 | func (node *ClusteredBigCache) eventVerifyRemoteNode(remoteNode *remoteNode) bool { 254 | 255 | if node.remoteNodes.Contains(remoteNode.config.Id) { 256 | utils.Warn(node.logger, "clusterBigCache already contains "+remoteNode.config.Id) 257 | return false 258 | } 259 | 260 | node.remoteNodes.Add(remoteNode.config.Id, remoteNode) 261 | utils.Info(node.logger, fmt.Sprintf("added remote node '%s' into group", remoteNode.config.Id)) 262 | node.pendingConn.Delete(remoteNode.config.Id) 263 | 264 | return true 265 | } 266 | 267 | //event function used by remoteNode to notify this node of a connection that failed 268 | func (node *ClusteredBigCache) eventUnableToConnect(config *remoteNodeConfig) { 269 | node.pendingConn.Delete(config.Id) 270 | } 271 | 272 | //listen for new connections to this node 273 | func (node *ClusteredBigCache) listen() { 274 | 275 | utils.Info(node.logger, fmt.Sprintf("node '%s' is up and running", node.config.Id)) 276 | errCount := 0 277 | for { 278 | conn, err := node.serverEndpoint.Accept() 279 | if err != nil { 280 | utils.Error(node.logger, err.Error()) 281 | errCount++ 282 | if errCount >= 5 { 283 | break 284 | } 285 | continue 286 | } 287 | errCount = 0 288 | 289 | //build a new remoteNode from this new connection 290 | tcpConn := conn.(*net.TCPConn) 291 | remoteNode := newRemoteNode(&remoteNodeConfig{IpAddress: tcpConn.RemoteAddr().String(), 292 | ConnectRetries: node.config.ConnectRetries, 293 | Sync: false, ReconnectOnDisconnect: false, 294 | PingInterval: node.config.PingInterval, 295 | PingTimeout: node.config.PingTimeout, 296 | PingFailureThreshHold: node.config.PingFailureThreshHold}, 297 | node, node.logger) 298 | remoteNode.setState(nodeStateHandshake) 299 | remoteNode.setConnection(comms.WrapConnection(tcpConn)) 300 | utils.Info(node.logger, fmt.Sprintf("new connection from remote '%s'", tcpConn.RemoteAddr().String())) 301 | remoteNode.start() 302 | } 303 | utils.Critical(node.logger, "listening loop terminated unexpectedly due to too many errors") 304 | if node.config.TerminateOnListenerExit { 305 | panic("listening loop terminated unexpectedly due to too many errors") 306 | } 307 | } 308 | 309 | //this is a goroutine that takes details from a channel and connect to them if they are not known 310 | //when a remote system connects to this node or when this node connects to a remote system, it will query that system 311 | //for the list of its connected nodes and pushes that list into this channel so that this node can connect forming 312 | //a mesh network in the process 313 | func (node *ClusteredBigCache) connectToExistingNodes() { 314 | 315 | for value := range node.joinQueue { 316 | 317 | if node.state != clusterStateStarted { 318 | continue 319 | } 320 | 321 | if _, ok := node.pendingConn.Load(value.Id); ok { 322 | utils.Warn(node.logger, fmt.Sprintf("remote node '%s' already in connnection pending queue", value.Id)) 323 | continue 324 | } 325 | 326 | //if we are already connected to the remote node just continue 327 | keys := node.remoteNodes.Keys() 328 | if _, ok := keys.Load(value.Id); ok { 329 | continue 330 | } 331 | 332 | //we are here because we don't know this remote node 333 | remoteNode := newRemoteNode(&remoteNodeConfig{IpAddress: value.IpAddress, 334 | ConnectRetries: node.config.ConnectRetries, 335 | Id: value.Id, Sync: false, ReconnectOnDisconnect: node.config.ReconnectOnDisconnect}, node, node.logger) 336 | remoteNode.join() 337 | node.pendingConn.Store(value.Id, value.IpAddress) 338 | } 339 | } 340 | 341 | //Put adds data into the cluster 342 | func (node *ClusteredBigCache) Put(key string, data []byte, duration time.Duration) error { 343 | 344 | if node.state != clusterStateStarted { 345 | return ErrNotStarted 346 | } 347 | 348 | //store it locally first 349 | expiryTime := bigcache.NO_EXPIRY 350 | if node.mode == clusterModeACTIVE { 351 | var err error 352 | expiryTime, err = node.cache.Set(key, data, duration) 353 | if err != nil { 354 | return err 355 | } 356 | } else if node.mode == clusterModePASSIVE { 357 | expiryTime = uint64(time.Now().Unix()) 358 | if duration != time.Duration(bigcache.NO_EXPIRY) { 359 | expiryTime += uint64(duration.Seconds()) 360 | } else { 361 | expiryTime = bigcache.NO_EXPIRY 362 | } 363 | } 364 | 365 | //we are going to do full replication across the cluster 366 | peers := node.remoteNodes.Values() 367 | for x := 0; x < len(peers); x++ { //just replicate serially from left to right 368 | if peers[x].(*remoteNode).mode == clusterModePASSIVE { 369 | continue 370 | } 371 | node.replicationChan <- &replicationMsg{r: peers[x].(*remoteNode), 372 | m: &message.PutMessage{Key: key, Data: data, Expiry: expiryTime}} 373 | } 374 | 375 | return nil 376 | } 377 | 378 | //Get retrieves data from the cluster 379 | func (node *ClusteredBigCache) Get(key string, timeout time.Duration) ([]byte, error) { 380 | if node.state != clusterStateStarted { 381 | return nil, ErrNotStarted 382 | } 383 | 384 | //if present locally then send it 385 | if node.mode == clusterModeACTIVE { 386 | data, err := node.cache.Get(key) 387 | if err == nil { 388 | return data, nil 389 | } 390 | } 391 | 392 | //we did not get the data locally so lets check the cluster 393 | peers := node.getRemoteNodes() 394 | if len(peers) < 1 { 395 | return nil, ErrNotFound 396 | } 397 | replyC := make(chan *getReplyData) 398 | reqData := &getRequestData{key: key, randStr: utils.GenerateNodeId(8), 399 | replyChan: replyC, done: make(chan struct{})} 400 | 401 | for _, peer := range peers { 402 | if peer.(*remoteNode).mode == clusterModePASSIVE { 403 | continue 404 | } 405 | node.getRequestChan <- &getRequestDataWrapper{r: peer.(*remoteNode), g: reqData} 406 | } 407 | 408 | var replyData *getReplyData 409 | select { 410 | case replyData = <-replyC: 411 | case <-time.After(timeout): 412 | return nil, ErrTimedOut 413 | } 414 | 415 | close(reqData.done) 416 | return replyData.data, nil 417 | } 418 | 419 | //Delete removes a key from the cluster 420 | func (node *ClusteredBigCache) Delete(key string) error { 421 | 422 | if node.state != clusterStateStarted { 423 | return ErrNotStarted 424 | } 425 | 426 | //delete locally 427 | if node.mode == clusterModeACTIVE { 428 | node.cache.Delete(key) 429 | } 430 | 431 | peers := node.remoteNodes.Values() 432 | //just send the delete message to everyone 433 | for x := 0; x < len(peers); x++ { 434 | if peers[x].(*remoteNode).mode == clusterModePASSIVE { 435 | continue 436 | } 437 | node.replicationChan <- &replicationMsg{r: peers[x].(*remoteNode), m: &message.DeleteMessage{Key: key}} 438 | } 439 | 440 | return nil 441 | } 442 | 443 | //Statistics returns the cache stats 444 | func (node *ClusteredBigCache) Statistics() string { 445 | if node.mode == clusterModeACTIVE { 446 | stats := node.cache.Stats() 447 | return fmt.Sprintf("%q", stats) 448 | } 449 | 450 | return "No stats for passive mode" 451 | } 452 | 453 | //a goroutine to send get request to members of the cluster 454 | func (node *ClusteredBigCache) requestSenderForGET() { 455 | for value := range node.getRequestChan { 456 | value.r.getData(value.g) 457 | } 458 | } 459 | 460 | //a goroutine used to replicate messages across the cluster 461 | func (node *ClusteredBigCache) replication() { 462 | for msg := range node.replicationChan { 463 | msg.r.sendMessage(msg.m) 464 | } 465 | } 466 | -------------------------------------------------------------------------------- /Cluster/clusteredBigCache_test.go: -------------------------------------------------------------------------------- 1 | package clusteredBigCache 2 | 3 | import ( 4 | "github.com/oaStuff/clusteredBigCache/utils" 5 | "testing" 6 | "time" 7 | ) 8 | 9 | func TestNodeConnecting(t *testing.T) { 10 | 11 | s := utils.NewTestServer(9093, true) 12 | err := s.Start() 13 | if err != nil { 14 | panic(err) 15 | } 16 | defer s.Close() 17 | 18 | node := New(&ClusteredBigCacheConfig{Join: true, LocalPort: 9998, ConnectRetries: 2}, nil) 19 | if err := node.Start(); err == nil { 20 | t.Error("node should not be able to start when 'join' is true and there is no joinIp") 21 | return 22 | } 23 | 24 | node.ShutDown() 25 | node = New(&ClusteredBigCacheConfig{Join: true, LocalPort: 9998, ConnectRetries: 2}, nil) 26 | node.config.JoinIp = "localhost:9093" 27 | if err = node.Start(); err != nil { 28 | t.Error(err) 29 | return 30 | } 31 | 32 | s.SendVerifyMessage("server1") 33 | time.Sleep(time.Second * 3) 34 | if node.remoteNodes.Size() != 1 { 35 | t.Log(node.remoteNodes.Size()) 36 | t.Error("only one node ought to be connected") 37 | } 38 | 39 | if _, ok := node.pendingConn.Load("remote_1"); !ok { 40 | t.Error("there should be a remote_1 server we are trying to connect to") 41 | } 42 | 43 | } 44 | 45 | func TestVerifyRemoteNode(t *testing.T) { 46 | 47 | node := New(&ClusteredBigCacheConfig{Join: true, LocalPort: 9999, ConnectRetries: 0}, nil) 48 | rn := newRemoteNode(&remoteNodeConfig{IpAddress: "localhost:9092", Sync: false, 49 | PingFailureThreshHold: 1, PingInterval: 0}, node, nil) 50 | 51 | if !node.eventVerifyRemoteNode(rn) { 52 | t.Error("remoted node ought to be added") 53 | } 54 | 55 | if node.eventVerifyRemoteNode(rn) { 56 | t.Error("duplicated remote node(with same Id) should not be added twice") 57 | } 58 | 59 | node.eventRemoteNodeDisconneced(rn) 60 | 61 | if !node.eventVerifyRemoteNode(rn) { 62 | t.Error("remote node ought to be added after been removed") 63 | } 64 | 65 | node.ShutDown() 66 | } 67 | 68 | func TestBringingUpNode(t *testing.T) { 69 | 70 | node := New(&ClusteredBigCacheConfig{Join: false, LocalPort: 1799, ConnectRetries: 0}, nil) 71 | if err := node.Start(); err != nil { 72 | t.Log(err) 73 | t.Error("node could not be brougth up") 74 | } 75 | node.ShutDown() 76 | } 77 | 78 | func TestPutData(t *testing.T) { 79 | node1 := New(&ClusteredBigCacheConfig{Join: false, LocalPort: 1989, ConnectRetries: 0}, nil) 80 | node2 := New(&ClusteredBigCacheConfig{Join: true, LocalPort: 1998, JoinIp: "localhost:1989", ConnectRetries: 2}, nil) 81 | 82 | node1.Start() 83 | node2.Start() 84 | 85 | node1.Put("key_1", []byte("data_1"), time.Minute*1) 86 | time.Sleep(time.Millisecond * 200) 87 | result, err := node2.Get("key_1", time.Millisecond*200) 88 | if err != nil { 89 | t.Error(err) 90 | } 91 | 92 | if string(result) != "data_1" { 93 | t.Error("data placed in node1 not the same gotten from node2") 94 | } 95 | 96 | node2.Delete("key_1") 97 | time.Sleep(time.Millisecond * 200) 98 | _, err = node1.Get("key_1", time.Millisecond*200) 99 | if err == nil { 100 | t.Error("error ought to be not found because the key and its data has been deleted") 101 | } 102 | 103 | node1.ShutDown() 104 | node2.ShutDown() 105 | } 106 | 107 | func TestPutDataWithPassiveClient(t *testing.T) { 108 | node1 := New(&ClusteredBigCacheConfig{Join: false, LocalPort: 1979, ConnectRetries: 0}, nil) 109 | node2 := NewPassiveClient("testMachine", "localhost:1979", 1898, 5, 3, 10, nil) 110 | 111 | node1.Start() 112 | node2.Start() 113 | 114 | node1.Put("key_1", []byte("data_1"), time.Minute*1) 115 | time.Sleep(time.Millisecond * 200) 116 | result, err := node2.Get("key_1", time.Millisecond*200) 117 | 118 | if err != nil { 119 | t.Error(err) 120 | } 121 | 122 | if string(result) != "data_1" { 123 | t.Error("data placed in node1 not the same gotten from node2") 124 | } 125 | 126 | node2.Delete("key_1") 127 | time.Sleep(time.Millisecond * 200) 128 | result, err = node1.Get("key_1", time.Millisecond*200) 129 | if err == nil { 130 | t.Error("error ought to be found because the key and its data has been deleted") 131 | } 132 | 133 | node2.Put("key_2", []byte("data_2"), time.Minute*1) 134 | node2.Put("key_3", []byte("data_3"), time.Minute*1) 135 | node2.Put("key_4", []byte("data_4"), 0) 136 | node2.Put("key_45", []byte("data_5"), 0) 137 | time.Sleep(time.Millisecond * 200) 138 | result, err = node1.Get("key_2", time.Millisecond*200) 139 | 140 | if err != nil { 141 | t.Error(err) 142 | } 143 | 144 | if string(result) != "data_2" { 145 | t.Error("data placed in node2 not the same gotten from node1") 146 | } 147 | 148 | node1.ShutDown() 149 | node2.ShutDown() 150 | } 151 | 152 | func TestPassiveMode(t *testing.T) { 153 | 154 | node1 := New(&ClusteredBigCacheConfig{Join: false, LocalPort: 1959, ConnectRetries: 2}, nil) 155 | 156 | client1 := NewPassiveClient("testMachine_1", "localhost:1959", 1897, 5, 3, 10, nil) 157 | client2 := NewPassiveClient("testMachine_2", "localhost:1897", 1996, 5, 3, 10, nil) 158 | 159 | node1.Start() 160 | client1.Start() 161 | client2.Start() 162 | 163 | time.Sleep(time.Millisecond * 300) 164 | if (client1.remoteNodes.Size() != 1) || (client2.remoteNodes.Size() != 0) { 165 | t.Error("node with mode PASSIVE should not be able to connect to each other") 166 | } 167 | 168 | node1.ShutDown() 169 | client1.ShutDown() 170 | client2.ShutDown() 171 | } 172 | 173 | func TestBadShardConfig(t *testing.T) { 174 | defer func() { recover() }() 175 | 176 | node1 := New(&ClusteredBigCacheConfig{Join: false, LocalPort: 1659, ConnectRetries: 2, ShardSize: 19}, nil) 177 | err := node1.Start() 178 | if err == nil { 179 | t.Error("node ought to fail because of bad configuration") 180 | } 181 | 182 | } 183 | 184 | func TestBadPortConfig(t *testing.T) { 185 | defer func() { recover() }() 186 | 187 | node1 := New(&ClusteredBigCacheConfig{Join: false, LocalPort: 0, ConnectRetries: 0, ShardSize: 10}, nil) 188 | err := node1.Start() 189 | if err == nil { 190 | t.Error("node ought to fail because of bad configuration") 191 | } 192 | 193 | } 194 | 195 | func TestSamePortError(t *testing.T) { 196 | defer func() { recover() }() 197 | 198 | node1 := New(&ClusteredBigCacheConfig{Join: false, LocalPort: 2048, ConnectRetries: 0, ShardSize: 10}, nil) 199 | node2 := New(&ClusteredBigCacheConfig{Join: false, LocalPort: 2048, ConnectRetries: 0, ShardSize: 10}, nil) 200 | 201 | node1.Start() 202 | node2.Start() 203 | 204 | time.Sleep(time.Millisecond * 300) 205 | 206 | node1.ShutDown() 207 | node2.ShutDown() 208 | } 209 | 210 | func TestClusteredBigCache_Statistics(t *testing.T) { 211 | node1 := New(&ClusteredBigCacheConfig{Join: false, LocalPort: 1179, ConnectRetries: 0}, nil) 212 | node2 := NewPassiveClient("testMachine", "localhost:1979", 2898, 5, 3, 10, nil) 213 | 214 | node1.Start() 215 | node2.Start() 216 | 217 | time.Sleep(time.Millisecond * 200) 218 | 219 | t.Log(node1.Statistics()) 220 | if "No stats for passive mode" != node2.Statistics() { 221 | t.Error("passive client node does not have statistics info") 222 | } 223 | 224 | node1.ShutDown() 225 | node2.ShutDown() 226 | } 227 | -------------------------------------------------------------------------------- /Cluster/config.go: -------------------------------------------------------------------------------- 1 | package clusteredBigCache 2 | 3 | import "github.com/oaStuff/clusteredBigCache/message" 4 | 5 | type getReplyData struct { 6 | data []byte 7 | } 8 | 9 | type getRequestData struct { 10 | key string 11 | randStr string 12 | replyChan chan *getReplyData 13 | done chan struct{} 14 | } 15 | 16 | type getRequestDataWrapper struct { 17 | g *getRequestData 18 | r *remoteNode 19 | } 20 | 21 | type replicationMsg struct { 22 | r *remoteNode 23 | m message.NodeMessage 24 | } 25 | 26 | //DefaultClusterConfig creates a new default configuration 27 | func DefaultClusterConfig() *ClusteredBigCacheConfig { 28 | 29 | return &ClusteredBigCacheConfig{ 30 | Join: false, 31 | BindAll: true, 32 | LocalPort: 9911, 33 | ConnectRetries: 5, 34 | TerminateOnListenerExit: false, 35 | ReplicationFactor: 1, 36 | WriteAck: true, 37 | ReplicationMode: REPLICATION_MODE_FULL_REPLICATE, 38 | ReconnectOnDisconnect: false, 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /Cluster/remoteNode.go: -------------------------------------------------------------------------------- 1 | package clusteredBigCache 2 | 3 | import ( 4 | "fmt" 5 | "net" 6 | "strconv" 7 | "sync" 8 | "sync/atomic" 9 | "time" 10 | 11 | "encoding/binary" 12 | "errors" 13 | "github.com/oaStuff/clusteredBigCache/comms" 14 | "github.com/oaStuff/clusteredBigCache/message" 15 | "github.com/oaStuff/clusteredBigCache/utils" 16 | "github.com/oklog/run" 17 | ) 18 | 19 | const ( 20 | nodeStateConnecting = iota 21 | nodeStateConnected 22 | nodeStateDisconnected 23 | nodeStateHandshake 24 | ) 25 | 26 | type remoteNodeState uint8 27 | 28 | type nodeMetrics struct { 29 | pingSent uint64 30 | pingRecieved uint64 31 | pongSent uint64 32 | pongRecieved uint64 33 | dropedMsg uint64 34 | } 35 | 36 | // remote node configuration 37 | type remoteNodeConfig struct { 38 | Id string `json:"id"` 39 | IpAddress string `json:"ip_address"` 40 | PingFailureThreshHold int32 `json:"ping_failure_thresh_hold"` 41 | PingInterval int `json:"ping_interval"` 42 | PingTimeout int `json:"ping_timeout"` 43 | ConnectRetries int `json:"connect_retries"` 44 | ServicePort string `json:"service_port"` 45 | Sync bool `json:"sync"` 46 | ReconnectOnDisconnect bool `json:"reconnect_on_disconnect"` 47 | } 48 | 49 | // remote node definition 50 | type remoteNode struct { 51 | config *remoteNodeConfig 52 | metrics *nodeMetrics 53 | connection *comms.Connection 54 | parentNode *ClusteredBigCache 55 | inboundMsgQueue chan *message.NodeWireMessage 56 | outboundMsgQueue chan message.NodeMessage 57 | logger utils.AppLogger 58 | state remoteNodeState 59 | stateLock sync.Mutex 60 | done chan struct{} 61 | pingTimer *time.Ticker //used to send ping message to remote 62 | pingTimeout *time.Timer //used to monitor ping response 63 | pingFailure int32 //count the number of pings without response 64 | pendingGet *sync.Map 65 | mode byte 66 | wg *sync.WaitGroup 67 | } 68 | 69 | //check configurations for sensible defaults 70 | func checkConfig(logger utils.AppLogger, config *remoteNodeConfig) { 71 | 72 | if config.PingInterval < 1 { 73 | config.PingInterval = 5 74 | } 75 | 76 | if config.PingTimeout < 1 { 77 | config.PingTimeout = 3 78 | } 79 | 80 | if config.PingTimeout > config.PingInterval { 81 | utils.Warn(logger, "ping timeout is greater than ping interval, pings will NEVER timeout") 82 | } 83 | 84 | if config.PingFailureThreshHold == 0 { 85 | config.PingFailureThreshHold = 5 86 | } 87 | } 88 | 89 | //create a new remoteNode object 90 | func newRemoteNode(config *remoteNodeConfig, parent *ClusteredBigCache, logger utils.AppLogger) *remoteNode { 91 | checkConfig(logger, config) 92 | return &remoteNode{ 93 | config: config, 94 | inboundMsgQueue: make(chan *message.NodeWireMessage, CHAN_SIZE), 95 | outboundMsgQueue: make(chan message.NodeMessage, CHAN_SIZE), 96 | done: make(chan struct{}), 97 | state: nodeStateDisconnected, 98 | stateLock: sync.Mutex{}, 99 | parentNode: parent, 100 | logger: logger, 101 | metrics: &nodeMetrics{}, 102 | pendingGet: &sync.Map{}, 103 | wg: &sync.WaitGroup{}, 104 | } 105 | } 106 | 107 | //set the state of this remoteNode. always use this method because of the lock 108 | func (r *remoteNode) setState(state remoteNodeState) { 109 | r.stateLock.Lock() 110 | defer r.stateLock.Unlock() 111 | 112 | r.state = state 113 | } 114 | 115 | //just set the connection for this remoteNode 116 | func (r *remoteNode) setConnection(conn *comms.Connection) { 117 | r.connection = conn 118 | } 119 | 120 | //shut down this object 121 | func (r *remoteNode) shutDown() { 122 | defer func() { recover() }() 123 | 124 | select { 125 | case r.done <- struct{}{}: 126 | default: 127 | } 128 | } 129 | 130 | //startup this remoteNode 131 | func (r *remoteNode) start() { 132 | r.wg.Add(1) //temporary increment 133 | var g run.Group //uses run.Group 134 | { 135 | g.Add(func() error { //this is to terminate this remoteNode from its parent 136 | <-r.done 137 | return errors.New("terminating") 138 | }, func(err error) { 139 | close(r.done) 140 | }) 141 | } 142 | { 143 | done := make(chan struct{}) 144 | g.Add(func() error { //this is for the ping timer 145 | r.pingTimer = time.NewTicker(time.Second * time.Duration(r.config.PingInterval)) 146 | r.sendMessage(&message.PingMessage{}) //send the first ping message 147 | exit := false 148 | r.wg.Add(1) 149 | for { 150 | select { 151 | case <-r.pingTimer.C: 152 | atomic.AddUint64(&r.metrics.pingSent, 1) 153 | if !r.pingTimeout.Stop() { 154 | select { 155 | case <-r.pingTimeout.C: 156 | default: 157 | } 158 | } 159 | r.pingTimeout.Reset(time.Second * time.Duration(r.config.PingTimeout)) 160 | r.sendMessage(&message.PingMessage{}) 161 | case <-done: //we have this so that the goroutine would not linger after this node disconnects because 162 | exit = true //of the blocking channel in the above case statement 163 | break 164 | } 165 | 166 | if exit { 167 | break 168 | } 169 | } 170 | utils.Info(r.logger, fmt.Sprintf("shutting down ping timer goroutine for '%s'", r.config.Id)) 171 | r.wg.Done() 172 | return errors.New("terminating timer goroutine") 173 | }, func(err error) { 174 | close(done) 175 | }) 176 | } 177 | { 178 | done := make(chan struct{}) 179 | g.Add(func() error { //this is for the ping response (pong) timer 180 | r.pingTimeout = time.NewTimer(time.Second * time.Duration(r.config.PingTimeout)) 181 | fault := false 182 | exit := false 183 | r.wg.Add(1) 184 | for { 185 | select { 186 | case <-r.pingTimeout.C: 187 | if r.state != nodeStateHandshake { 188 | utils.Warn(r.logger, 189 | fmt.Sprintf("no ping response within configured time frame from remote node '%s'", r.config.Id)) 190 | } else { 191 | utils.Warn(r.logger, "remote node not verified, therefore ping failing") 192 | } 193 | atomic.AddInt32(&r.pingFailure, 1) 194 | if r.pingFailure >= r.config.PingFailureThreshHold { 195 | r.pingTimeout.Stop() 196 | fault = true 197 | break 198 | } 199 | case <-done: //we have this so that the goroutine would not linger after this node disconnects because 200 | exit = true //of the blocking channel in the above case statement 201 | break 202 | } 203 | 204 | if exit || fault { 205 | break 206 | } 207 | } 208 | 209 | if fault { 210 | //the remote node is assumed to be 'dead' since it has not responded to recent ping request 211 | utils.Warn(r.logger, fmt.Sprintf("shutting down connection to remote node '%s' due to no ping response", r.config.Id)) 212 | } 213 | utils.Info(r.logger, fmt.Sprintf("shutting down ping timeout goroutine for '%s'", r.config.Id)) 214 | r.wg.Done() 215 | return errors.New("terminating timeout goroutine") 216 | }, func(err error) { 217 | close(done) 218 | }) 219 | } 220 | { 221 | g.Add(func() error { //this is for the network consumer. ie reading data off the network 222 | r.wg.Add(1) 223 | r.networkConsumer() 224 | r.wg.Done() 225 | return errors.New("terminating networkConsumer") 226 | }, func(err error) { 227 | r.setState(nodeStateDisconnected) 228 | }) 229 | } 230 | { 231 | g.Add(func() error { //this is for handle and dispatching messages 232 | r.wg.Add(1) 233 | r.handleMessage() 234 | r.wg.Done() 235 | return errors.New("terminating handleMessage") 236 | }, func(err error) { 237 | close(r.inboundMsgQueue) 238 | r.setState(nodeStateDisconnected) 239 | }) 240 | } 241 | { 242 | g.Add(func() error { //this is for sending messages on the network 243 | r.wg.Add(1) 244 | r.networkSender() 245 | r.wg.Done() 246 | return errors.New("terminating networkSender") 247 | }, func(err error) { 248 | close(r.outboundMsgQueue) 249 | r.setState(nodeStateDisconnected) 250 | }) 251 | } 252 | { 253 | g.Add(func() error { //this is for gracefully bringing down this remoteNode 254 | r.wg.Wait() 255 | r.tearDown() 256 | return errors.New("terminating tearDown") 257 | }, func(err error) { 258 | r.connection.Close() 259 | r.setState(nodeStateDisconnected) 260 | r.wg.Done() //decrement the temporary increment 261 | }) 262 | } 263 | 264 | go func() { 265 | utils.Warn(r.logger, fmt.Sprintf("remoteNode '%s' shutting down, caused by: %q", r.config.Id, g.Run())) 266 | }() 267 | r.sendVerify() 268 | } 269 | 270 | //join a cluster. this will be called if 'join' in the config is set to true 271 | func (r *remoteNode) join() { 272 | utils.Info(r.logger, "joining remote node via "+r.config.IpAddress) 273 | 274 | go func() { //goroutine will try to connect to the cluster until it succeeds or max tries reached 275 | r.setState(nodeStateConnecting) 276 | var err error 277 | tries := 0 278 | for { 279 | if err = r.connect(); err == nil { 280 | break 281 | } 282 | utils.Error(r.logger, err.Error()) 283 | time.Sleep(time.Second * 3) 284 | if r.config.ConnectRetries > 0 { 285 | tries++ 286 | if tries >= r.config.ConnectRetries { 287 | utils.Warn(r.logger, fmt.Sprintf("unable to connect to remote node '%s' after max retires", r.config.IpAddress)) 288 | r.parentNode.eventUnableToConnect(r.config) 289 | return 290 | } 291 | } 292 | } 293 | utils.Info(r.logger, "connected to node via "+r.config.IpAddress) 294 | r.start() 295 | }() 296 | } 297 | 298 | //handles to low level connection to remote node 299 | func (r *remoteNode) connect() error { 300 | var err error 301 | utils.Info(r.logger, "connecting to "+r.config.IpAddress) 302 | r.connection, err = comms.NewConnection(r.config.IpAddress, time.Second*5) 303 | if err != nil { 304 | return err 305 | } 306 | 307 | r.setState(nodeStateHandshake) 308 | 309 | return nil 310 | } 311 | 312 | //this is a goroutine dedicated in reading data from the network 313 | //it does this by reading a 6bytes header which is 314 | // byte 1 - 4 == length of data 315 | // byte 5 & 6 == message code 316 | // the rest of the data based on length is the message body 317 | func (r *remoteNode) networkConsumer() { 318 | 319 | for (r.state == nodeStateConnected) || (r.state == nodeStateHandshake) { 320 | 321 | header, err := r.connection.ReadData(6, 0) //read 6 byte header 322 | if nil != err { 323 | utils.Critical(r.logger, fmt.Sprintf("remote node '%s' has disconnected", r.config.Id)) 324 | break 325 | } 326 | 327 | dataLength := binary.LittleEndian.Uint32(header) - 2 //subtracted 2 becos of message code 328 | msgCode := binary.LittleEndian.Uint16(header[4:]) 329 | var data []byte 330 | if dataLength > 0 { 331 | data, err = r.connection.ReadData(uint(dataLength), 0) 332 | if nil != err { 333 | utils.Critical(r.logger, fmt.Sprintf("remote node '%s' has disconnected", r.config.Id)) 334 | break 335 | } 336 | } 337 | r.queueInboundMessage(&message.NodeWireMessage{Code: msgCode, Data: data}) //queue message to be processed 338 | } 339 | utils.Info(r.logger, fmt.Sprintf("network consumer loop terminated... %s", r.config.Id)) 340 | 341 | } 342 | 343 | //just queue the message in the outbound channel 344 | func (r *remoteNode) sendMessage(msg message.NodeMessage) { 345 | defer func() { recover() }() 346 | 347 | if r.state == nodeStateDisconnected { 348 | return 349 | } 350 | 351 | select { 352 | case <-r.done: 353 | case r.outboundMsgQueue <- msg: 354 | } 355 | } 356 | 357 | //this sends a message on the network 358 | //it builds the message using the following protocol 359 | // bytes 1 & 2 == total length of the data (including the 2 byte message code) 360 | // bytes 3 & 4 == message code 361 | // bytes 5 upwards == message content 362 | func (r *remoteNode) networkSender() { 363 | 364 | for m := range r.outboundMsgQueue { 365 | if r.state == nodeStateDisconnected { 366 | continue 367 | } 368 | msg := m.Serialize() 369 | data := make([]byte, 6+len(msg.Data)) // 6 ==> 4bytes for length of message, 2bytes for message code 370 | binary.LittleEndian.PutUint32(data, uint32(len(msg.Data)+2)) //the 2 is for the message code 371 | binary.LittleEndian.PutUint16(data[4:], msg.Code) 372 | copy(data[6:], msg.Data) 373 | if err := r.connection.SendData(data); err != nil { 374 | utils.Critical(r.logger, fmt.Sprintf("unexpected error while sending %s data [%s]", message.MsgCodeToString(msg.Code), err)) 375 | break 376 | } 377 | } 378 | utils.Info(r.logger, "terminated network sender for "+r.config.Id) 379 | } 380 | 381 | //bring down the remote node. should not be called from outside networkConsumer() 382 | func (r *remoteNode) tearDown() { 383 | 384 | r.parentNode.eventRemoteNodeDisconneced(r) 385 | jq := r.parentNode.joinQueue 386 | if r.config.ReconnectOnDisconnect { 387 | jq <- &message.ProposedPeer{Id: r.config.Id, IpAddress: r.config.IpAddress} 388 | } 389 | 390 | if r.pingTimeout != nil { 391 | r.pingTimeout.Stop() 392 | } 393 | if r.pingTimer != nil { 394 | r.pingTimer.Stop() 395 | } 396 | 397 | r.pendingGet = nil 398 | utils.Info(r.logger, fmt.Sprintf("remote node '%s' completely shutdown", r.config.Id)) 399 | } 400 | 401 | //just queue the message in a channel 402 | func (r *remoteNode) queueInboundMessage(msg *message.NodeWireMessage) { 403 | defer func() { recover() }() 404 | 405 | if r.state == nodeStateHandshake { //when in the handshake state only accept MsgVERIFY and MsgVERIFYOK messages 406 | code := msg.Code 407 | if (code != message.MsgVERIFY) && (code != message.MsgVERIFYOK) { 408 | r.metrics.dropedMsg++ 409 | return 410 | } 411 | } 412 | 413 | if r.state != nodeStateDisconnected { 414 | select { 415 | case <-r.done: 416 | case r.inboundMsgQueue <- msg: 417 | } 418 | } 419 | } 420 | 421 | //message handler 422 | func (r *remoteNode) handleMessage() { 423 | 424 | for msg := range r.inboundMsgQueue { 425 | if r.state == nodeStateDisconnected { 426 | continue 427 | } 428 | switch msg.Code { 429 | case message.MsgVERIFY: 430 | if !r.handleVerify(msg) { 431 | return 432 | } 433 | case message.MsgVERIFYOK: 434 | r.handleVerifyOK() 435 | case message.MsgPING: 436 | r.handlePing() 437 | case message.MsgPONG: 438 | r.handlePong() 439 | case message.MsgSyncRsp: 440 | r.handleSyncResponse(msg) 441 | case message.MsgSyncReq: 442 | r.handleSyncRequest(msg) 443 | case message.MsgGETReq: 444 | r.handleGetRequest(msg) 445 | case message.MsgGETRsp: 446 | r.handleGetResponse(msg) 447 | case message.MsgPUT: 448 | r.handlePut(msg) 449 | case message.MsgDEL: 450 | r.handleDelete(msg) 451 | } 452 | } 453 | 454 | utils.Info(r.logger, fmt.Sprintf("terminated message handler goroutine for '%s'", r.config.Id)) 455 | 456 | } 457 | 458 | //send a verify message. this is always the first message to be sent once a connection is established. 459 | func (r *remoteNode) sendVerify() { 460 | verifyMsgRsp := message.VerifyMessage{Id: r.parentNode.config.Id, 461 | ServicePort: strconv.Itoa(r.parentNode.config.LocalPort), Mode: r.parentNode.mode} 462 | r.sendMessage(&verifyMsgRsp) 463 | } 464 | 465 | //use the verify message been sent by a remote node to configure the node in this system 466 | func (r *remoteNode) handleVerify(msg *message.NodeWireMessage) bool { 467 | 468 | verifyMsgRsp := message.VerifyMessage{} 469 | verifyMsgRsp.DeSerialize(msg) 470 | 471 | r.config.Id = verifyMsgRsp.Id 472 | r.config.ServicePort = verifyMsgRsp.ServicePort 473 | r.mode = verifyMsgRsp.Mode 474 | 475 | //check if connecting node and this node are both in passive mode 476 | if verifyMsgRsp.Mode == clusterModePASSIVE { 477 | if r.parentNode.mode == clusterModePASSIVE { //passive nodes are not allowed to connect to each other 478 | utils.Warn(r.logger, fmt.Sprintf("node '%s' and '%s' are both passive nodes, shuting down the connection", r.parentNode.config.Id, verifyMsgRsp.Id)) 479 | return false 480 | } 481 | } 482 | 483 | if !r.parentNode.eventVerifyRemoteNode(r) { //seek parent's node approval on this 484 | utils.Warn(r.logger, fmt.Sprintf("node already has remote node '%s' so shutdown new connection", r.config.Id)) 485 | return false 486 | } 487 | 488 | if verifyMsgRsp.Mode == clusterModePASSIVE { //if the node is a passive node dont reconnect on disconnect 489 | r.config.ReconnectOnDisconnect = false 490 | } 491 | 492 | r.setState(nodeStateConnected) 493 | r.sendMessage(&message.VerifyOKMessage{}) //must reply back with a verify OK message if all goes well 494 | 495 | return true 496 | } 497 | 498 | //handles verify OK from a remote node. this allows this system to sync with remote node 499 | func (r *remoteNode) handleVerifyOK() { 500 | go func() { 501 | count := 0 502 | for r.state == nodeStateHandshake { 503 | time.Sleep(time.Second * 1) 504 | count++ 505 | if count >= 5 { 506 | utils.Warn(r.logger, fmt.Sprintf("node '%s' state refused to change out of handshake", r.config.Id)) 507 | break 508 | } 509 | } 510 | if count < 5 { 511 | if r.config.Sync { //only sync if you are joining the cluster 512 | r.sendMessage(&message.SyncReqMessage{Mode: r.parentNode.mode}) 513 | } 514 | } 515 | }() 516 | } 517 | 518 | //handles ping message from a remote node 519 | func (r *remoteNode) handlePing() { 520 | atomic.AddUint64(&r.metrics.pingRecieved, 1) 521 | r.sendMessage(&message.PongMessage{}) 522 | atomic.AddUint64(&r.metrics.pongSent, 1) 523 | } 524 | 525 | //handle a pong message from the remote node, reset flags 526 | func (r *remoteNode) handlePong() { 527 | atomic.AddUint64(&r.metrics.pongRecieved, 1) 528 | if !r.pingTimeout.Stop() { //stop the timer since we got a response 529 | select { 530 | case <-r.pingTimeout.C: 531 | default: 532 | } 533 | } 534 | atomic.StoreInt32(&r.pingFailure, 0) //reset failure counter since we got a response 535 | } 536 | 537 | //build and send a sync message 538 | func (r *remoteNode) sendSyncResponse(msg *message.SyncReqMessage) { 539 | values := r.parentNode.getRemoteNodes() //call this because of the lock that needs to be held by parentNode 540 | nodeList := make([]message.ProposedPeer, 0) 541 | for _, v := range values { 542 | n := v.(*remoteNode) 543 | if n.config.Id == r.config.Id { 544 | continue 545 | } 546 | if (n.mode == clusterModePASSIVE) && (msg.Mode == clusterModePASSIVE) { 547 | continue 548 | } 549 | host, _, _ := net.SplitHostPort(n.config.IpAddress) 550 | nodeList = append(nodeList, message.ProposedPeer{Id: n.config.Id, IpAddress: net.JoinHostPort(host, n.config.ServicePort)}) 551 | } 552 | 553 | if len(nodeList) > 0 { 554 | r.sendMessage(&message.SyncRspMessage{List: nodeList, ReplicationFactor: r.parentNode.config.ReplicationFactor}) 555 | } 556 | } 557 | 558 | //handles sync request by just sending a sync response 559 | func (r *remoteNode) handleSyncRequest(msg *message.NodeWireMessage) { 560 | m := &message.SyncReqMessage{} 561 | m.DeSerialize(msg) 562 | r.sendSyncResponse(m) 563 | } 564 | 565 | //accept the sync response and send to parentNode for processing 566 | func (r *remoteNode) handleSyncResponse(msg *message.NodeWireMessage) { 567 | syncMsg := message.SyncRspMessage{} 568 | syncMsg.DeSerialize(msg) 569 | r.parentNode.setReplicationFactor(syncMsg.ReplicationFactor) 570 | length := len(syncMsg.List) 571 | for x := 0; x < length; x++ { 572 | r.parentNode.joinQueue <- &syncMsg.List[x] 573 | } 574 | } 575 | 576 | func (r *remoteNode) getData(reqData *getRequestData) { 577 | if r.state == nodeStateDisconnected { 578 | return 579 | } 580 | randStr := reqData.randStr 581 | r.pendingGet.Store(reqData.key+randStr, reqData) 582 | r.sendMessage(&message.GetReqMessage{Key: reqData.key, PendingKey: reqData.key + randStr}) 583 | } 584 | 585 | func (r *remoteNode) handleGetRequest(msg *message.NodeWireMessage) { 586 | reqMsg := message.GetReqMessage{} 587 | reqMsg.DeSerialize(msg) 588 | data, _ := r.parentNode.cache.Get(reqMsg.Key) 589 | r.sendMessage(&message.GetRspMessage{PendingKey: reqMsg.PendingKey, Data: data}) 590 | } 591 | 592 | func (r *remoteNode) handleGetResponse(msg *message.NodeWireMessage) { 593 | rspMsg := message.GetRspMessage{} 594 | rspMsg.DeSerialize(msg) 595 | origReq, ok := r.pendingGet.Load(rspMsg.PendingKey) 596 | if !ok { 597 | utils.Error(r.logger, "handling get response without finding the pending key") 598 | return 599 | } 600 | 601 | r.pendingGet.Delete(rspMsg.PendingKey) 602 | if len(rspMsg.Data) < 1 { 603 | return 604 | } 605 | 606 | //some other remote node might have sent the data so we do not want to block forever on the channel hence the select 607 | reqData := origReq.(*getRequestData) 608 | select { 609 | case <-reqData.done: 610 | case reqData.replyChan <- &getReplyData{data: rspMsg.Data}: 611 | default: 612 | } 613 | } 614 | 615 | func (r *remoteNode) handlePut(msg *message.NodeWireMessage) { 616 | 617 | putMsg := message.PutMessage{} 618 | putMsg.DeSerialize(msg) 619 | if putMsg.Expiry == 0 { 620 | r.parentNode.cache.Set(putMsg.Key, putMsg.Data, 0) 621 | } else { 622 | t1 := time.Unix(int64(putMsg.Expiry), 0) 623 | t2 := t1.Sub(time.Now()) 624 | r.parentNode.cache.Set(putMsg.Key, putMsg.Data, t2) 625 | } 626 | } 627 | 628 | func (r *remoteNode) handleDelete(msg *message.NodeWireMessage) { 629 | delMsg := message.DeleteMessage{} 630 | delMsg.DeSerialize(msg) 631 | r.parentNode.cache.Delete(delMsg.Key) 632 | } 633 | -------------------------------------------------------------------------------- /Cluster/remoteNode_test.go: -------------------------------------------------------------------------------- 1 | package clusteredBigCache 2 | 3 | import ( 4 | "github.com/oaStuff/clusteredBigCache/utils" 5 | "testing" 6 | "time" 7 | ) 8 | 9 | func TestCheckConfig(t *testing.T) { 10 | config := &remoteNodeConfig{} 11 | checkConfig(nil, config) 12 | 13 | if config.PingInterval != 5 { 14 | t.Error("config ping interval ought to be 5") 15 | } 16 | 17 | if config.PingTimeout != 3 { 18 | t.Error("config ping timeout ought to be 3") 19 | } 20 | 21 | if config.PingFailureThreshHold != 5 { 22 | t.Error("config ping failure threshold ought to be 5") 23 | } 24 | } 25 | 26 | func TestRemoteNode(t *testing.T) { 27 | 28 | svr := utils.NewTestServer(9091, true) 29 | svr.Start() 30 | defer svr.Close() 31 | 32 | node := New(&ClusteredBigCacheConfig{LocalPort: 9999, ConnectRetries: 2}, nil) 33 | rn := newRemoteNode(&remoteNodeConfig{IpAddress: "localhost:9091", Sync: false}, node, nil) 34 | 35 | err := rn.connect() 36 | if err != nil { 37 | t.Error(err) 38 | return 39 | } 40 | 41 | if rn.state != nodeStateHandshake { 42 | t.Error("node ought to be in handshaking state") 43 | return 44 | } 45 | 46 | rn.start() 47 | defer rn.tearDown() 48 | 49 | svr.SendVerifyMessage("server1") 50 | time.Sleep(time.Millisecond * 300) 51 | if rn.state != nodeStateConnected { 52 | t.Error("node ought to be in connected state") 53 | } 54 | 55 | if node.remoteNodes.Size() != 1 { 56 | t.Error("only one node ought to be connected") 57 | } 58 | 59 | if node.remoteNodes.Values()[0].(*remoteNode).config.Id != "server1" { 60 | t.Error("unknown id in remoteNode data") 61 | } 62 | 63 | rn.shutDown() 64 | node.ShutDown() 65 | } 66 | 67 | func TestNoPingResponseDisconnt(t *testing.T) { 68 | svr := utils.NewTestServer(9092, false) 69 | err := svr.Start() 70 | if err != nil { 71 | panic(err) 72 | } 73 | defer svr.Close() 74 | 75 | node := New(&ClusteredBigCacheConfig{LocalPort: 9999, ConnectRetries: 2}, nil) 76 | rn := newRemoteNode(&remoteNodeConfig{IpAddress: "localhost:9092", Sync: false, 77 | PingFailureThreshHold: 1, PingInterval: 0}, node, nil) 78 | err = rn.connect() 79 | if err != nil { 80 | t.Error(err) 81 | return 82 | } 83 | 84 | if rn.state != nodeStateHandshake { 85 | t.Error("node ought to be in handshaking state") 86 | } 87 | 88 | rn.start() 89 | defer rn.tearDown() 90 | svr.SendVerifyMessage("server_1") 91 | 92 | time.Sleep(time.Second * 6) 93 | if rn.state != nodeStateDisconnected { 94 | t.Error("node ought to be in disconnected state") 95 | } 96 | 97 | if rn.metrics.pongRecieved != 0 { 98 | t.Error("pong ought not to have been received") 99 | } 100 | 101 | rn.shutDown() 102 | node.ShutDown() 103 | } 104 | 105 | func TestPinging(t *testing.T) { 106 | s := utils.NewTestServer(8999, true) 107 | err := s.Start() 108 | if err != nil { 109 | panic(err) 110 | } 111 | defer s.Close() 112 | 113 | node := New(&ClusteredBigCacheConfig{LocalPort: 8999, ConnectRetries: 2, PingTimeout: 1, PingInterval: 2}, nil) 114 | time.Sleep(time.Millisecond * 200) 115 | rn := newRemoteNode(&remoteNodeConfig{IpAddress: "localhost:8999", Sync: true, 116 | PingFailureThreshHold: 10, PingInterval: 2, PingTimeout: 1}, node, nil) 117 | err = rn.connect() 118 | if err != nil { 119 | t.Error(err) 120 | return 121 | } 122 | 123 | if rn.state != nodeStateHandshake { 124 | t.Error("node ought to be in handshaking state") 125 | } 126 | 127 | rn.start() 128 | defer rn.tearDown() 129 | s.SendVerifyMessage("server1") 130 | 131 | time.Sleep(time.Second * 3) 132 | 133 | if rn.metrics.pongRecieved < 1 { 134 | t.Error("pong ought to have been received") 135 | } 136 | 137 | if rn.metrics.pingRecieved < 1 { 138 | t.Error("pinging facility not working approprately") 139 | } 140 | 141 | rn.shutDown() 142 | node.ShutDown() 143 | } 144 | -------------------------------------------------------------------------------- /Gopkg.lock: -------------------------------------------------------------------------------- 1 | # This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. 2 | 3 | 4 | [[projects]] 5 | name = "github.com/emirpasic/gods" 6 | packages = ["containers","sets","sets/hashset","trees","trees/avltree","utils"] 7 | revision = "f6c17b524822278a87e3b3bd809fec33b51f5b46" 8 | version = "v1.9.0" 9 | 10 | [[projects]] 11 | branch = "master" 12 | name = "github.com/gin-contrib/sse" 13 | packages = ["."] 14 | revision = "22d885f9ecc78bf4ee5d72b937e4bbcdc58e8cae" 15 | 16 | [[projects]] 17 | name = "github.com/gin-gonic/gin" 18 | packages = [".","binding","render"] 19 | revision = "d459835d2b077e44f7c9b453505ee29881d5d12d" 20 | version = "v1.2" 21 | 22 | [[projects]] 23 | branch = "master" 24 | name = "github.com/golang/protobuf" 25 | packages = ["proto"] 26 | revision = "1e59b77b52bf8e4b449a57e6f79f21226d571845" 27 | 28 | [[projects]] 29 | name = "github.com/mattn/go-isatty" 30 | packages = ["."] 31 | revision = "0360b2af4f38e8d38c7fce2a9f4e702702d73a39" 32 | version = "v0.0.3" 33 | 34 | [[projects]] 35 | name = "github.com/oklog/run" 36 | packages = ["."] 37 | revision = "4dadeb3030eda0273a12382bb2348ffc7c9d1a39" 38 | version = "v1.0.0" 39 | 40 | [[projects]] 41 | branch = "master" 42 | name = "github.com/ugorji/go" 43 | packages = ["codec"] 44 | revision = "9831f2c3ac1068a78f50999a30db84270f647af6" 45 | 46 | [[projects]] 47 | branch = "master" 48 | name = "golang.org/x/sys" 49 | packages = ["unix"] 50 | revision = "810d7000345868fc619eb81f46307107118f4ae1" 51 | 52 | [[projects]] 53 | name = "gopkg.in/go-playground/validator.v8" 54 | packages = ["."] 55 | revision = "5f1438d3fca68893a817e4a66806cea46a9e4ebf" 56 | version = "v8.18.2" 57 | 58 | [[projects]] 59 | branch = "v2" 60 | name = "gopkg.in/yaml.v2" 61 | packages = ["."] 62 | revision = "d670f9405373e636a5a2765eea47fac0c9bc91a4" 63 | 64 | [solve-meta] 65 | analyzer-name = "dep" 66 | analyzer-version = 1 67 | inputs-digest = "7685fac617f33357b38ad41bff7dc6c6d9b17b9ba868abf4064786f98a69f673" 68 | solver-name = "gps-cdcl" 69 | solver-version = 1 70 | -------------------------------------------------------------------------------- /Gopkg.toml: -------------------------------------------------------------------------------- 1 | 2 | # Gopkg.toml example 3 | # 4 | # Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md 5 | # for detailed Gopkg.toml documentation. 6 | # 7 | # required = ["github.com/user/thing/cmd/thing"] 8 | # ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] 9 | # 10 | # [[constraint]] 11 | # name = "github.com/user/project" 12 | # version = "1.0.0" 13 | # 14 | # [[constraint]] 15 | # name = "github.com/user/project2" 16 | # branch = "dev" 17 | # source = "github.com/myfork/project2" 18 | # 19 | # [[override]] 20 | # name = "github.com/x/y" 21 | # version = "2.4.0" 22 | 23 | 24 | [[constraint]] 25 | name = "github.com/emirpasic/gods" 26 | version = "1.9.0" 27 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2017 aihui zhu 4 | 5 | Permission is hereby granted, free of charge, to any person 6 | obtaining a copy of this software and associated documentation 7 | files (the "Software"), to deal in the Software without 8 | restriction, including without limitation the rights to use, 9 | copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the 11 | Software is furnished to do so, subject to the following 12 | conditions: 13 | 14 | The above copyright notice and this permission notice shall be 15 | included in all copies or substantial portions of the Software. 16 | 17 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 18 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES 19 | OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 20 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 21 | HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, 22 | WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 23 | FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 24 | OTHER DEALINGS IN THE SOFTWARE. 25 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | clusteredBigCache 2 | ================= 3 | 4 | [![Go Report Card](https://goreportcard.com/badge/github.com/oastuff/clusteredBigCache)](https://goreportcard.com/report/github.com/oastuff/clusteredBigCache) 5 | 6 | This is a library based on [bigcache](https://github.com/allegro/bigcache) with some modifications to support 7 | * clustering and 8 | * individual item expiration 9 | 10 | Bigcache is an excellent piece of software but the fact that items could only expire based on a predefined 11 | value was not just too appealing. Bigcache had to be modified to support individual expiration of items using 12 | a single timer. This happens by you specifying a time value as you add items to the cache. 13 | Running two or more instances of an application that would require some level of caching would normally 14 | default to memcache or redis which are external applications adding to the mix of services required for your 15 | application to run. 16 | 17 | With clusteredBigCache there is no requirement to run an external application to provide caching for multiple 18 | instances of your application. The library handles caching as well as clustering the caches between multiple 19 | instances of your application providing you with simple library APIs (by just calling functions) to store and 20 | get your values. 21 | 22 | With clusteredBigCache, when you store a value in one instance of your application and every other instance 23 | or any other application for that matter that you configure to form/join your "cluster" will 24 | see that exact same value. 25 | 26 | ## Installing 27 | 28 | ### Using *go get* 29 | 30 | $ go get github.com/oaStuff/clusteredBigCache 31 | 32 | ## Samples 1 33 | 34 | This is the application responsible for storing data into the cache 35 | 36 | ```go 37 | package main 38 | 39 | import ( 40 | "fmt" 41 | "bufio" 42 | "os" 43 | "github.com/oaStuff/clusteredBigCache/Cluster" 44 | "strings" 45 | "time" 46 | ) 47 | 48 | // 49 | //main function 50 | // 51 | func main() { 52 | fmt.Println("starting...") 53 | cache := clusteredBigCache.New(clusteredBigCache.DefaultClusterConfig(), nil) 54 | count := 1 55 | cache.Start() 56 | 57 | reader := bufio.NewReader(os.Stdin) 58 | var data string 59 | for strings.ToLower(data) != "exit" { 60 | fmt.Print("enter data : ") 61 | data, _ = reader.ReadString('\n') 62 | data = strings.TrimSpace(data) 63 | err := cache.Put(fmt.Sprintf("key_%d", count), []byte(data), time.Minute * 60) 64 | if err != nil { 65 | panic(err) 66 | } 67 | fmt.Printf("'%s' stored under key 'key_%d'\n", data, count) 68 | count++ 69 | } 70 | } 71 | 72 | ``` 73 | 74 | ##### Explanation: 75 | 76 | The above application captures data from the keyboard and stores them inside clusteredBigCache 77 | starting with keys 'key_1', 'key_2'...'key_n'. As the user types and presses the enter key the data is stored in 78 | the cache. 79 | 80 | `cache := clusteredBigCache.New(clusteredBigCache.DefaultClusterConfig(), nil)` 81 | This statement will create the cache using the default configuration. This configuration has default 82 | values for *LocalPort = 9911*, *Join = false* amongst others. If you intend to use this library for applications 83 | that will run on the same machine, you will have to give unique values for *LocalPort* 84 | 85 | `cache.Start()` This **must** be called before using any other method on this cache. 86 | 87 | `err := cache.Put(fmt.Sprintf("key_%d", count), []byte(data), time.Minute * 60)`. You set values in the cache 88 | giving it a key, the data as a `[]byte` slice and the expiration or time to live (ttl) for that key/value within the cache. 89 | When the key/value pair reaches its expiration time, they are removed automatically. 90 | 91 | 92 | ## Samples 2 93 | 94 | This is the application responsible for reading data of the cache. This can be run on the same or different machine 95 | on the network. 96 | 97 | ```go 98 | package main 99 | 100 | import ( 101 | "github.com/oaStuff/clusteredBigCache/Cluster" 102 | "bufio" 103 | "os" 104 | "strings" 105 | "fmt" 106 | "time" 107 | ) 108 | 109 | // 110 | // 111 | func main() { 112 | config := clusteredBigCache.DefaultClusterConfig() 113 | config.LocalPort = 8888 114 | config.Join = true 115 | config.JoinIp = "127.0.0.1:9911" 116 | cache := clusteredBigCache.New(config, nil) 117 | err := cache.Start() 118 | if err != nil { 119 | panic(err) 120 | } 121 | 122 | reader := bufio.NewReader(os.Stdin) 123 | var data string 124 | for strings.ToLower(data) != "exit" { 125 | fmt.Print("enter key : ") 126 | data, _ = reader.ReadString('\n') 127 | data = strings.TrimSpace(data) 128 | value, err := cache.Get(data, time.Millisecond * 160) 129 | if err != nil { 130 | fmt.Println(err) 131 | continue 132 | } 133 | fmt.Printf("you got '%s' from the cache\n", value) 134 | } 135 | } 136 | 137 | ``` 138 | 139 | ##### Explanation: 140 | 141 | The above application reads a string from the keyboard which should represent a key for a value in clusteredBigCache. 142 | If a user enters the corresponding keys shown in sample1 above ('key_1', 'key_2'...'key_n'), the corresponding values 143 | will be returned. 144 | 145 | ```go 146 | config := clusteredBigCache.DefaultClusterConfig() 147 | config.LocalPort = 8888 148 | config.Join = true 149 | config.JoinIp = "127.0.0.1:9911" 150 | cache := clusteredBigCache.New(config, nil) 151 | err := cache.Start() 152 | ``` 153 | 154 | The above uses the default configuration to create a config and modifies what it actually needs. 155 | `config.LocalPort = 8888` has to be changed since this application will run on the same machine with the sample1 156 | application. This is to avoid 'port already in use' errors. 157 | 158 | `config.Join = true`. For an application to join another 159 | application or applications using clusteredBigCache, it **must** set *config.Join* value to *true* and set `config.JoinIP` to 160 | the IP address of one of the systems using clusteredBigCache eg `config.Join = "127.0.0.1:9911`. This example says that this application 161 | wants to join another application using clusteredBigCache at IP address *127.0.0.1* and port number *9911*. 162 | 163 | `cache := clusteredBigCache.New(config, nil)` creates the cache and `cache.Start()` must be called to start everything running. 164 | 165 | __**NB**__ 166 | 167 | After `cache.Start()` is called the library tries to connect to the specified IP address using the specified port. 168 | When successfully connected, it create a cluster of applications using clusteredBigCache as a single cache. ie all applications connected will see every value 169 | every application sets in the cache. 170 | 171 | #### Sample way to parse config in an app 172 | 173 | ```go 174 | join := flag.String("join", "", "ipAddr:port number of remote server") 175 | localPort := flag.Int("port", 6060, "local server port to bind to") 176 | 177 | 178 | flag.Parse() 179 | 180 | config := clusteredBigCache.DefaultClusterConfig() 181 | if *join != "" { 182 | config.JoinIp = *join 183 | config.Join = true 184 | } 185 | config.LocalPort = *localPort 186 | ``` 187 | 188 | Your application could pass parameters to it in any form and make use of them in configuring clusteredBigCache. The 189 | above sample just only catered for `join` and `localport`. If you want network connections between machine to be reconnected 190 | in the event of a disconnection, you will have to set `config.ReconnectOnDisconnect = true`. 191 | 192 | #### Logging within the library 193 | 194 | clusteredBigCache takes a second parameter is its New() function for logging. 195 | This function expects an interface of 196 | ```go 197 | type AppLogger interface { 198 | Info(msg string) 199 | Warn(msg string) 200 | Critical(msg string) 201 | Error(msg string) 202 | } 203 | ``` 204 | 205 | You could easily just wrap any logger within a `struct` and provide this interface method for that struct and simple 206 | delegate calls to the underlining logger or better still just wrap a logger function to provide the interface like example bellow 207 | 208 | ```go 209 | type myLogger func(...interface{}) 210 | 211 | func (log myLogger) Info(msg string) { 212 | log(msg) 213 | } 214 | 215 | func (log myLogger) Warn(msg string) { 216 | log(msg) 217 | } 218 | 219 | func (log myLogger) Error(msg string) { 220 | log(msg) 221 | } 222 | 223 | func (log myLogger) Critical(msg string) { 224 | log(msg) 225 | } 226 | 227 | 228 | cache := clusteredBigCache.New(config, myLogger(log.Println)) 229 | 230 | ``` 231 | 232 | ### Using Passive client 233 | Passive client are nodes in the clusteredBigCache network that do not store any data locally but functions all the same 234 | like every other node. To create a passive client you simply call `clusteredBigCache.NewPassiveClient("linux_box_100","localhost:9090", 8885, 0, 0, 0, nil)` 235 | This will connect to an existing cluster at address *localhost:9090* and join the cluster. the *linux_box_100* is the node's id. 236 | This can be an empty string if you want an auto generated id. Every other function can be performed 237 | on the returned object. 238 | 239 | ##### credits 240 | Core cache system from [bigcache](https://github.com/allegro/bigcache) 241 | 242 | Data structures from [emirpasic](https://github.com/emirpasic/gods) 243 | 244 | ### LICENSE 245 | MIT. 246 | 247 | 248 | 249 | 250 | -------------------------------------------------------------------------------- /bigcache/bigcache.go: -------------------------------------------------------------------------------- 1 | package bigcache 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | ) 7 | 8 | const ( 9 | minimumEntriesInShard = 10 // Minimum number of entries in single shard 10 | ) 11 | 12 | // BigCache is fast, concurrent, evicting cache created to keep big number of entries without impact on performance. 13 | // It keeps entries on heap but omits GC for them. To achieve that operations on bytes arrays take place, 14 | // therefore entries (de)serialization in front of the cache will be needed in most use cases. 15 | type BigCache struct { 16 | shards []*cacheShard 17 | lifeWindow uint64 18 | clock clock 19 | hash Hasher 20 | config Config 21 | shardMask uint64 22 | maxShardSize uint32 23 | } 24 | 25 | // NewBigCache initialize new instance of BigCache 26 | func NewBigCache(config Config) (*BigCache, error) { 27 | return newBigCache(config, &systemClock{}) 28 | } 29 | 30 | func newBigCache(config Config, clock clock) (*BigCache, error) { 31 | 32 | if !isPowerOfTwo(config.Shards) { 33 | return nil, fmt.Errorf("Shards number must be power of two") 34 | } 35 | 36 | if config.Hasher == nil { 37 | config.Hasher = newDefaultHasher() 38 | } 39 | 40 | cache := &BigCache{ 41 | shards: make([]*cacheShard, config.Shards), 42 | lifeWindow: uint64(config.LifeWindow.Seconds()), 43 | clock: clock, 44 | hash: config.Hasher, 45 | config: config, 46 | shardMask: uint64(config.Shards - 1), 47 | maxShardSize: uint32(config.maximumShardSize()), 48 | } 49 | 50 | var onRemove func(wrappedEntry []byte) 51 | if config.OnRemove == nil { 52 | onRemove = cache.notProvidedOnRemove 53 | } else { 54 | onRemove = cache.providedOnRemove 55 | } 56 | 57 | for i := 0; i < config.Shards; i++ { 58 | cache.shards[i] = initNewShard(config, onRemove, clock, uint64(i+1)) 59 | } 60 | 61 | return cache, nil 62 | } 63 | 64 | // Get reads entry for the key 65 | func (c *BigCache) Get(key string) ([]byte, error) { 66 | hashedKey := c.hash.Sum64(key) 67 | shard := c.getShard(hashedKey) 68 | return shard.get(key, hashedKey) 69 | } 70 | 71 | // Set saves entry under the key 72 | func (c *BigCache) Set(key string, entry []byte, duration time.Duration) (uint64, error) { 73 | hashedKey := c.hash.Sum64(key) 74 | shard := c.getShard(hashedKey) 75 | return shard.set(key, hashedKey, entry, duration) 76 | } 77 | 78 | // Delete removes the key 79 | func (c *BigCache) Delete(key string) error { 80 | hashedKey := c.hash.Sum64(key) 81 | shard := c.getShard(hashedKey) 82 | return shard.del(key, hashedKey) 83 | } 84 | 85 | // Reset empties all cache shards 86 | func (c *BigCache) Reset() error { 87 | for _, shard := range c.shards { 88 | shard.reset(c.config) 89 | } 90 | return nil 91 | } 92 | 93 | // Len computes number of entries in cache 94 | func (c *BigCache) Len() int { 95 | var len int 96 | for _, shard := range c.shards { 97 | len += shard.len() 98 | } 99 | return len 100 | } 101 | 102 | // Stats returns cache's statistics 103 | func (c *BigCache) Stats() Stats { 104 | var s Stats 105 | for _, shard := range c.shards { 106 | tmp := shard.getStats() 107 | s.Hits += tmp.Hits 108 | s.Misses += tmp.Misses 109 | s.DelHits += tmp.DelHits 110 | s.DelMisses += tmp.DelMisses 111 | s.Collisions += tmp.Collisions 112 | s.EvictCount += tmp.EvictCount 113 | } 114 | return s 115 | } 116 | 117 | // Iterator returns iterator function to iterate over EntryInfo's from whole cache. 118 | func (c *BigCache) Iterator() *EntryInfoIterator { 119 | return newIterator(c) 120 | } 121 | 122 | func (c *BigCache) onEvict(oldestEntry []byte, currentTimestamp uint64, evict func() error) bool { 123 | oldestTimestamp := readTimestampFromEntry(oldestEntry) 124 | if currentTimestamp-oldestTimestamp > c.lifeWindow { 125 | evict() 126 | return true 127 | } 128 | return false 129 | } 130 | 131 | func (c *BigCache) getShard(hashedKey uint64) (shard *cacheShard) { 132 | return c.shards[hashedKey&c.shardMask] 133 | } 134 | 135 | func (c *BigCache) providedOnRemove(wrappedEntry []byte) { 136 | c.config.OnRemove(readKeyFromEntry(wrappedEntry), readEntry(wrappedEntry)) 137 | } 138 | 139 | func (c *BigCache) notProvidedOnRemove(wrappedEntry []byte) { 140 | } 141 | -------------------------------------------------------------------------------- /bigcache/clock.go: -------------------------------------------------------------------------------- 1 | package bigcache 2 | 3 | import "time" 4 | 5 | type clock interface { 6 | epoch() int64 7 | } 8 | 9 | type systemClock struct { 10 | } 11 | 12 | func (c systemClock) epoch() int64 { 13 | return time.Now().Unix() 14 | } 15 | -------------------------------------------------------------------------------- /bigcache/config.go: -------------------------------------------------------------------------------- 1 | package bigcache 2 | 3 | import "time" 4 | 5 | // Config for BigCache 6 | type Config struct { 7 | // Number of cache shards, value must be a power of two 8 | Shards int 9 | // Time after which entry can be evicted 10 | LifeWindow time.Duration 11 | // Interval between removing expired entries (clean up). 12 | // If set to <= 0 then no action is performed. Setting to < 1 second is counterproductive — bigcache has a one second resolution. 13 | CleanWindow time.Duration 14 | // Max number of entries in life window. Used only to calculate initial size for cache shards. 15 | // When proper value is set then additional memory allocation does not occur. 16 | MaxEntriesInWindow int 17 | // Max size of entry in bytes. Used only to calculate initial size for cache shards. 18 | MaxEntrySize int 19 | // Verbose mode prints information about new memory allocation 20 | Verbose bool 21 | // Hasher used to map between string keys and unsigned 64bit integers, by default fnv64 hashing is used. 22 | Hasher Hasher 23 | // HardMaxCacheSize is a limit for cache size in MB. Cache will not allocate more memory than this limit. 24 | // It can protect application from consuming all available memory on machine, therefore from running OOM Killer. 25 | // Default value is 0 which means unlimited size. When the limit is higher than 0 and reached then 26 | // the oldest entries are overridden for the new ones. 27 | HardMaxCacheSize int 28 | // OnRemove is a callback fired when the oldest entry is removed because of its expiration time or no space left 29 | // for the new entry. Default value is nil which means no callback and it prevents from unwrapping the oldest entry. 30 | OnRemove func(key string, entry []byte) 31 | 32 | // Logger is a logging interface and used in combination with `Verbose` 33 | // Defaults to `DefaultLogger()` 34 | Logger Logger 35 | } 36 | 37 | // DefaultConfig initializes config with default values. 38 | // When load for BigCache can be predicted in advance then it is better to use custom config. 39 | func DefaultConfig() Config { 40 | return Config{ 41 | Shards: 16, 42 | LifeWindow: time.Hour * 72, //not actually used 43 | CleanWindow: 0, 44 | MaxEntriesInWindow: 1000 * 10 * 60, 45 | MaxEntrySize: 500, 46 | Verbose: true, 47 | Hasher: newDefaultHasher(), 48 | HardMaxCacheSize: 0, 49 | Logger: DefaultLogger(), 50 | } 51 | } 52 | 53 | // initialShardSize computes initial shard size 54 | func (c Config) initialShardSize() int { 55 | return max(c.MaxEntriesInWindow/c.Shards, minimumEntriesInShard) 56 | } 57 | 58 | // maximumShardSize computes maximum shard size 59 | func (c Config) maximumShardSize() int { 60 | maxShardSize := 0 61 | 62 | if c.HardMaxCacheSize > 0 { 63 | maxShardSize = convertMBToBytes(c.HardMaxCacheSize) / c.Shards 64 | } 65 | 66 | return maxShardSize 67 | } 68 | 69 | //SetShard sets the number of shards 70 | func (c *Config) SetShard(num int) { 71 | c.Shards = num 72 | } 73 | -------------------------------------------------------------------------------- /bigcache/encoding.go: -------------------------------------------------------------------------------- 1 | package bigcache 2 | 3 | import ( 4 | "encoding/binary" 5 | "reflect" 6 | "unsafe" 7 | ) 8 | 9 | const ( 10 | timestampSizeInBytes = 8 // Number of bytes used for timestamp 11 | hashSizeInBytes = 8 // Number of bytes used for hash 12 | keySizeInBytes = 2 // Number of bytes used for size of entry key 13 | headersSizeInBytes = timestampSizeInBytes + hashSizeInBytes + keySizeInBytes // Number of bytes used for all headers 14 | ) 15 | 16 | func wrapEntry(timestamp uint64, hash uint64, key string, entry []byte, buffer *[]byte) []byte { 17 | keyLength := len(key) 18 | blobLength := len(entry) + headersSizeInBytes + keyLength 19 | 20 | if blobLength > len(*buffer) { 21 | *buffer = make([]byte, blobLength) 22 | } 23 | blob := *buffer 24 | 25 | binary.LittleEndian.PutUint64(blob, timestamp) 26 | binary.LittleEndian.PutUint64(blob[timestampSizeInBytes:], hash) 27 | binary.LittleEndian.PutUint16(blob[timestampSizeInBytes+hashSizeInBytes:], uint16(keyLength)) 28 | copy(blob[headersSizeInBytes:], key) 29 | copy(blob[headersSizeInBytes+keyLength:], entry) 30 | 31 | return blob[:blobLength] 32 | } 33 | 34 | func readEntry(data []byte) []byte { 35 | length := binary.LittleEndian.Uint16(data[timestampSizeInBytes+hashSizeInBytes:]) 36 | 37 | // copy on read 38 | dst := make([]byte, len(data)-int(headersSizeInBytes+length)) 39 | copy(dst, data[headersSizeInBytes+length:]) 40 | 41 | return dst 42 | } 43 | 44 | func readTimestampFromEntry(data []byte) uint64 { 45 | return binary.LittleEndian.Uint64(data) 46 | } 47 | 48 | func readKeyFromEntry(data []byte) string { 49 | length := binary.LittleEndian.Uint16(data[timestampSizeInBytes+hashSizeInBytes:]) 50 | 51 | // copy on read 52 | dst := make([]byte, length) 53 | copy(dst, data[headersSizeInBytes:headersSizeInBytes+length]) 54 | 55 | return bytesToString(dst) 56 | } 57 | 58 | func bytesToString(b []byte) string { 59 | bytesHeader := (*reflect.SliceHeader)(unsafe.Pointer(&b)) 60 | strHeader := reflect.StringHeader{Data: bytesHeader.Data, Len: bytesHeader.Len} 61 | return *(*string)(unsafe.Pointer(&strHeader)) 62 | } 63 | 64 | func readHashFromEntry(data []byte) uint64 { 65 | return binary.LittleEndian.Uint64(data[timestampSizeInBytes:]) 66 | } 67 | 68 | func resetKeyFromEntry(data []byte) { 69 | binary.LittleEndian.PutUint64(data[timestampSizeInBytes:], 0) 70 | } 71 | -------------------------------------------------------------------------------- /bigcache/entry_not_found_error.go: -------------------------------------------------------------------------------- 1 | package bigcache 2 | 3 | import "fmt" 4 | 5 | // EntryNotFoundError is an error type struct which is returned when entry was not found for provided key 6 | type EntryNotFoundError struct { 7 | message string 8 | } 9 | 10 | func notFound(key string) error { 11 | return &EntryNotFoundError{fmt.Sprintf("Entry %q not found", key)} 12 | } 13 | 14 | // Error returned when entry does not exist. 15 | func (e EntryNotFoundError) Error() string { 16 | return e.message 17 | } 18 | -------------------------------------------------------------------------------- /bigcache/fnv.go: -------------------------------------------------------------------------------- 1 | package bigcache 2 | 3 | // newDefaultHasher returns a new 64-bit FNV-1a Hasher which makes no memory allocations. 4 | // Its Sum64 method will lay the value out in big-endian byte order. 5 | // See https://en.wikipedia.org/wiki/Fowler–Noll–Vo_hash_function 6 | func newDefaultHasher() Hasher { 7 | return fnv64a{} 8 | } 9 | 10 | type fnv64a struct{} 11 | 12 | const ( 13 | // offset64 FNVa offset basis. See https://en.wikipedia.org/wiki/Fowler–Noll–Vo_hash_function#FNV-1a_hash 14 | offset64 = 14695981039346656037 15 | // prime64 FNVa prime value. See https://en.wikipedia.org/wiki/Fowler–Noll–Vo_hash_function#FNV-1a_hash 16 | prime64 = 1099511628211 17 | ) 18 | 19 | // Sum64 gets the string and returns its uint64 hash value. 20 | func (f fnv64a) Sum64(key string) uint64 { 21 | var hash uint64 = offset64 22 | for i := 0; i < len(key); i++ { 23 | hash ^= uint64(key[i]) 24 | hash *= prime64 25 | } 26 | 27 | return hash 28 | } 29 | -------------------------------------------------------------------------------- /bigcache/hash.go: -------------------------------------------------------------------------------- 1 | package bigcache 2 | 3 | // Hasher is responsible for generating unsigned, 64 bit hash of provided string. Hasher should minimize collisions 4 | // (generating same hash for different strings) and while performance is also important fast functions are preferable (i.e. 5 | // you can use FarmHash family). 6 | type Hasher interface { 7 | Sum64(string) uint64 8 | } 9 | -------------------------------------------------------------------------------- /bigcache/iterator.go: -------------------------------------------------------------------------------- 1 | package bigcache 2 | 3 | import "sync" 4 | 5 | type iteratorError string 6 | 7 | func (e iteratorError) Error() string { 8 | return string(e) 9 | } 10 | 11 | // ErrInvalidIteratorState is reported when iterator is in invalid state 12 | const ErrInvalidIteratorState = iteratorError("Iterator is in invalid state. Use SetNext() to move to next position") 13 | 14 | // ErrCannotRetrieveEntry is reported when entry cannot be retrieved from underlying 15 | const ErrCannotRetrieveEntry = iteratorError("Could not retrieve entry from cache") 16 | 17 | var emptyEntryInfo = EntryInfo{} 18 | 19 | // EntryInfo holds informations about entry in the cache 20 | type EntryInfo struct { 21 | timestamp uint64 22 | hash uint64 23 | key string 24 | value []byte 25 | } 26 | 27 | // Key returns entry's underlying key 28 | func (e EntryInfo) Key() string { 29 | return e.key 30 | } 31 | 32 | // Hash returns entry's hash value 33 | func (e EntryInfo) Hash() uint64 { 34 | return e.hash 35 | } 36 | 37 | // Timestamp returns entry's timestamp (time of insertion) 38 | func (e EntryInfo) Timestamp() uint64 { 39 | return e.timestamp 40 | } 41 | 42 | // Value returns entry's underlying value 43 | func (e EntryInfo) Value() []byte { 44 | return e.value 45 | } 46 | 47 | // EntryInfoIterator allows to iterate over entries in the cache 48 | type EntryInfoIterator struct { 49 | mutex sync.Mutex 50 | cache *BigCache 51 | currentShard int 52 | currentIndex int 53 | elements []uint32 54 | elementsCount int 55 | valid bool 56 | } 57 | 58 | // SetNext moves to next element and returns true if it exists. 59 | func (it *EntryInfoIterator) SetNext() bool { 60 | it.mutex.Lock() 61 | 62 | it.valid = false 63 | it.currentIndex++ 64 | 65 | if it.elementsCount > it.currentIndex { 66 | it.valid = true 67 | it.mutex.Unlock() 68 | return true 69 | } 70 | 71 | for i := it.currentShard + 1; i < it.cache.config.Shards; i++ { 72 | it.elements, it.elementsCount = it.cache.shards[i].copyKeys() 73 | 74 | // Non empty shard - stick with it 75 | if it.elementsCount > 0 { 76 | it.currentIndex = 0 77 | it.currentShard = i 78 | it.valid = true 79 | it.mutex.Unlock() 80 | return true 81 | } 82 | } 83 | it.mutex.Unlock() 84 | return false 85 | } 86 | 87 | func newIterator(cache *BigCache) *EntryInfoIterator { 88 | elements, count := cache.shards[0].copyKeys() 89 | 90 | return &EntryInfoIterator{ 91 | cache: cache, 92 | currentShard: 0, 93 | currentIndex: -1, 94 | elements: elements, 95 | elementsCount: count, 96 | } 97 | } 98 | 99 | // Value returns current value from the iterator 100 | func (it *EntryInfoIterator) Value() (EntryInfo, error) { 101 | it.mutex.Lock() 102 | 103 | if !it.valid { 104 | it.mutex.Unlock() 105 | return emptyEntryInfo, ErrInvalidIteratorState 106 | } 107 | 108 | entry, err := it.cache.shards[it.currentShard].getEntry(int(it.elements[it.currentIndex])) 109 | 110 | if err != nil { 111 | it.mutex.Unlock() 112 | return emptyEntryInfo, ErrCannotRetrieveEntry 113 | } 114 | it.mutex.Unlock() 115 | 116 | return EntryInfo{ 117 | timestamp: readTimestampFromEntry(entry), 118 | hash: readHashFromEntry(entry), 119 | key: readKeyFromEntry(entry), 120 | value: readEntry(entry), 121 | }, nil 122 | } 123 | -------------------------------------------------------------------------------- /bigcache/logger.go: -------------------------------------------------------------------------------- 1 | package bigcache 2 | 3 | import ( 4 | "log" 5 | "os" 6 | ) 7 | 8 | // Logger is invoked when `Config.Verbose=true` 9 | type Logger interface { 10 | Printf(format string, v ...interface{}) 11 | } 12 | 13 | // this is a safeguard, breaking on compile time in case 14 | // `log.Logger` does not adhere to our `Logger` interface. 15 | // see https://golang.org/doc/faq#guarantee_satisfies_interface 16 | var _ Logger = &log.Logger{} 17 | 18 | // DefaultLogger returns a `Logger` implementation 19 | // backed by stdlib's log 20 | func DefaultLogger() *log.Logger { 21 | return log.New(os.Stdout, "", log.LstdFlags) 22 | } 23 | 24 | func newLogger(custom Logger) Logger { 25 | if custom != nil { 26 | return custom 27 | } 28 | 29 | return DefaultLogger() 30 | } 31 | -------------------------------------------------------------------------------- /bigcache/queue/bytes_queue.go: -------------------------------------------------------------------------------- 1 | package queue 2 | 3 | import ( 4 | "encoding/binary" 5 | "log" 6 | "time" 7 | ) 8 | 9 | const ( 10 | // Number of bytes used to keep information about entry size 11 | headerEntrySize = 4 12 | // Bytes before left margin are not used. Zero index means element does not exist in queue, useful while reading slice from index 13 | leftMarginIndex = 1 14 | // Minimum empty blob size in bytes. Empty blob fills space between tail and head in additional memory allocation. 15 | // It keeps entries indexes unchanged 16 | minimumEmptyBlobSize = 32 + headerEntrySize 17 | ) 18 | 19 | // BytesQueue is a non-thread safe queue type of fifo based on bytes array. 20 | // For every push operation index of entry is returned. It can be used to read the entry later 21 | type BytesQueue struct { 22 | array []byte 23 | capacity int 24 | maxCapacity int 25 | head int 26 | tail int 27 | count int 28 | rightMargin int 29 | headerBuffer []byte 30 | verbose bool 31 | initialCapacity int 32 | freelist *freeList 33 | } 34 | 35 | type queueError struct { 36 | message string 37 | } 38 | 39 | // NewBytesQueue initialize new bytes queue. 40 | // Initial capacity is used in bytes array allocation 41 | // When verbose flag is set then information about memory allocation are printed 42 | func NewBytesQueue(initialCapacity int, maxCapacity int, verbose bool) *BytesQueue { 43 | return &BytesQueue{ 44 | array: make([]byte, initialCapacity), 45 | capacity: initialCapacity, 46 | maxCapacity: maxCapacity, 47 | headerBuffer: make([]byte, headerEntrySize), 48 | tail: leftMarginIndex, 49 | head: leftMarginIndex, 50 | rightMargin: leftMarginIndex, 51 | verbose: verbose, 52 | initialCapacity: initialCapacity, 53 | freelist: newFreeList(), 54 | } 55 | } 56 | 57 | // Reset removes all entries from queue 58 | func (q *BytesQueue) Reset() { 59 | // Just reset indexes 60 | q.tail = leftMarginIndex 61 | q.head = leftMarginIndex 62 | q.rightMargin = leftMarginIndex 63 | q.count = 0 64 | } 65 | 66 | // Push copies entry at the end of queue and moves tail pointer. Allocates more space if needed. 67 | // Returns index for pushed data or error if maximum size queue limit is reached. 68 | func (q *BytesQueue) Push(data []byte) (int, error) { 69 | dataLen := len(data) 70 | 71 | idx := q.freelist.find(dataLen + headerEntrySize) 72 | if -1 != idx { 73 | tmpTail := q.tail 74 | q.tail = idx 75 | binary.LittleEndian.PutUint32(q.headerBuffer, uint32(dataLen)) 76 | q.copy(q.headerBuffer, headerEntrySize) 77 | q.copy(data, dataLen) 78 | q.count++ 79 | q.tail = tmpTail 80 | return idx, nil 81 | } 82 | 83 | if q.availableSpaceAfterTail() < dataLen+headerEntrySize { 84 | if q.availableSpaceBeforeHead() >= dataLen+headerEntrySize { 85 | q.tail = leftMarginIndex 86 | } else if q.capacity+headerEntrySize+dataLen >= q.maxCapacity && q.maxCapacity > 0 { 87 | return -1, &queueError{"Full queue. Maximum size limit reached."} 88 | } else { 89 | q.allocateAdditionalMemory(dataLen + headerEntrySize) 90 | } 91 | } 92 | 93 | index := q.tail 94 | q.push(data, dataLen) 95 | 96 | return index, nil 97 | } 98 | 99 | func (q *BytesQueue) allocateAdditionalMemory(minimum int) { 100 | start := time.Now() 101 | if q.capacity < minimum { 102 | q.capacity += minimum 103 | } 104 | q.capacity = q.capacity * 2 105 | if q.capacity > q.maxCapacity && q.maxCapacity > 0 { 106 | q.capacity = q.maxCapacity 107 | } 108 | 109 | oldArray := q.array 110 | q.array = make([]byte, q.capacity) 111 | 112 | if leftMarginIndex != q.rightMargin { 113 | copy(q.array, oldArray[:q.rightMargin]) 114 | 115 | if q.tail < q.head { 116 | emptyBlobLen := q.head - q.tail - headerEntrySize 117 | q.push(make([]byte, emptyBlobLen), emptyBlobLen) 118 | q.head = leftMarginIndex 119 | q.tail = q.rightMargin 120 | } 121 | } 122 | 123 | if q.verbose { 124 | log.Printf("Allocated new queue in %s; Capacity: %d \n", time.Since(start), q.capacity) 125 | } 126 | } 127 | 128 | func (q *BytesQueue) push(data []byte, len int) { 129 | binary.LittleEndian.PutUint32(q.headerBuffer, uint32(len)) 130 | q.copy(q.headerBuffer, headerEntrySize) 131 | 132 | q.copy(data, len) 133 | 134 | if q.tail > q.head { 135 | q.rightMargin = q.tail 136 | } 137 | 138 | q.count++ 139 | } 140 | 141 | func (q *BytesQueue) copy(data []byte, len int) { 142 | q.tail += copy(q.array[q.tail:], data[:len]) 143 | } 144 | 145 | // Pop reads the oldest entry from queue and moves head pointer to the next one 146 | func (q *BytesQueue) Pop() ([]byte, error) { 147 | data, size, err := q.peek(q.head) 148 | if err != nil { 149 | return nil, err 150 | } 151 | 152 | q.head += headerEntrySize + size 153 | q.count-- 154 | 155 | if q.head == q.rightMargin { 156 | q.head = leftMarginIndex 157 | if q.tail == q.rightMargin { 158 | q.tail = leftMarginIndex 159 | } 160 | q.rightMargin = q.tail 161 | } 162 | 163 | return data, nil 164 | } 165 | 166 | // Peek reads the oldest entry from list without moving head pointer 167 | func (q *BytesQueue) Peek() ([]byte, error) { 168 | data, _, err := q.peek(q.head) 169 | return data, err 170 | } 171 | 172 | // Get reads entry from index 173 | func (q *BytesQueue) Get(index int) ([]byte, error) { 174 | data, _, err := q.peek(index) 175 | return data, err 176 | } 177 | 178 | // Capacity returns number of allocated bytes for queue 179 | func (q *BytesQueue) Capacity() int { 180 | return q.capacity 181 | } 182 | 183 | // Len returns number of entries kept in queue 184 | func (q *BytesQueue) Len() int { 185 | return q.count 186 | } 187 | 188 | // Error returns error message 189 | func (e *queueError) Error() string { 190 | return e.message 191 | } 192 | 193 | func (q *BytesQueue) peek(index int) ([]byte, int, error) { 194 | 195 | if q.count == 0 { 196 | return nil, 0, &queueError{"Empty queue"} 197 | } 198 | 199 | if index <= 0 { 200 | return nil, 0, &queueError{"Index must be grater than zero. Invalid index."} 201 | } 202 | 203 | if index+headerEntrySize >= len(q.array) { 204 | return nil, 0, &queueError{"Index out of range"} 205 | } 206 | 207 | blockSize := int(binary.LittleEndian.Uint32(q.array[index : index+headerEntrySize])) 208 | return q.array[index+headerEntrySize : index+headerEntrySize+blockSize], blockSize, nil 209 | } 210 | 211 | func (q *BytesQueue) availableSpaceAfterTail() int { 212 | if q.tail >= q.head { 213 | return q.capacity - q.tail 214 | } 215 | return q.head - q.tail - minimumEmptyBlobSize 216 | } 217 | 218 | func (q *BytesQueue) availableSpaceBeforeHead() int { 219 | if q.tail >= q.head { 220 | return q.head - leftMarginIndex - minimumEmptyBlobSize 221 | } 222 | return q.head - q.tail - minimumEmptyBlobSize 223 | } 224 | 225 | //Delete removes an index and associated values from the queue 226 | func (q *BytesQueue) Delete(index int) error { 227 | _, size, err := q.peek(index) 228 | if err != nil { 229 | return err 230 | } 231 | 232 | q.freelist.add(index, size+headerEntrySize) 233 | q.count-- 234 | return nil 235 | } 236 | -------------------------------------------------------------------------------- /bigcache/queue/freeList.go: -------------------------------------------------------------------------------- 1 | package queue 2 | 3 | import ( 4 | "github.com/emirpasic/gods/trees/avltree" 5 | "github.com/emirpasic/gods/utils" 6 | ) 7 | 8 | //item that will trace free space in the parent slice 9 | type itemPos struct { 10 | actualSize int 11 | parentIndex int 12 | } 13 | 14 | type posArray []*itemPos 15 | 16 | //freelist data structure 17 | type freeList struct { 18 | sizeList [8]posArray 19 | indexTree *avltree.Tree 20 | } 21 | 22 | //create a new freeList 23 | func newFreeList() *freeList { 24 | fl := &freeList{ 25 | indexTree: avltree.NewWith(utils.IntComparator), 26 | } 27 | 28 | for x := 0; x < 8; x++ { 29 | fl.sizeList[x] = make(posArray, 0, 256) 30 | } 31 | 32 | return fl 33 | } 34 | 35 | //add is to add an index of a parent slice along with its size into the freelist. 36 | //this is done by checking if previous items in the freelist is adjacent to the new one been added 37 | //if so combine them into a single entry 38 | func (list *freeList) add(index, size int) error { 39 | //NOTE: every added entry will create 2 entries in the indexTree 40 | //this is to help identify its start and end positions. 41 | 42 | item := &itemPos{parentIndex: index, actualSize: size} 43 | if d, found := list.indexTree.Get(index - 1); found { //check if there is an adjacent entry to the 'left' of index 44 | v := d.(*itemPos) 45 | list.indexTree.Remove(index - 1) 46 | list.indexTree.Remove(v.parentIndex) 47 | list.removeFromSizeList(v) 48 | 49 | item.parentIndex = v.parentIndex 50 | item.actualSize = v.actualSize + size 51 | } 52 | if d, found := list.indexTree.Get(index + size); found { //check if there is an adjacent entry to the 'right' of index 53 | v := d.(*itemPos) 54 | list.indexTree.Remove(index + size) 55 | list.indexTree.Remove(v.parentIndex + v.actualSize - 1) 56 | list.removeFromSizeList(v) 57 | 58 | item.parentIndex = index 59 | item.actualSize = size + v.actualSize 60 | } 61 | 62 | //store it into the indexTree. both its start and end position 63 | list.indexTree.Put(item.parentIndex, item) 64 | list.indexTree.Put(item.parentIndex+item.actualSize-1, item) 65 | list.putIntoSizeList(item) 66 | 67 | return nil 68 | } 69 | 70 | func (list *freeList) removeFromSizeList(v *itemPos) { 71 | var pos int 72 | switch { 73 | case v.actualSize < 64: 74 | pos = listPos(v, list.sizeList[0]) 75 | if pos != -1 { 76 | copy(list.sizeList[0][pos:], list.sizeList[0][(pos+1):]) 77 | list.sizeList[0] = list.sizeList[0][:len(list.sizeList[0])-1] 78 | } 79 | case v.actualSize < 128: 80 | pos = listPos(v, list.sizeList[1]) 81 | if pos != -1 { 82 | copy(list.sizeList[1][pos:], list.sizeList[1][(pos+1):]) 83 | list.sizeList[1] = list.sizeList[1][:len(list.sizeList[1])-1] 84 | } 85 | case v.actualSize < 256: 86 | pos = listPos(v, list.sizeList[2]) 87 | if pos != -1 { 88 | copy(list.sizeList[2][pos:], list.sizeList[2][(pos+1):]) 89 | list.sizeList[2] = list.sizeList[2][:len(list.sizeList[2])-1] 90 | } 91 | case v.actualSize < 512: 92 | pos = listPos(v, list.sizeList[3]) 93 | if pos != -1 { 94 | copy(list.sizeList[3][pos:], list.sizeList[3][(pos+1):]) 95 | list.sizeList[3] = list.sizeList[3][:len(list.sizeList[3])-1] 96 | } 97 | case v.actualSize < 1024: 98 | pos = listPos(v, list.sizeList[4]) 99 | if pos != -1 { 100 | copy(list.sizeList[4][pos:], list.sizeList[4][(pos+1):]) 101 | list.sizeList[4] = list.sizeList[4][:len(list.sizeList[4])-1] 102 | } 103 | case v.actualSize < 2048: 104 | pos = listPos(v, list.sizeList[5]) 105 | if pos != -1 { 106 | copy(list.sizeList[5][pos:], list.sizeList[5][(pos+1):]) 107 | list.sizeList[5] = list.sizeList[5][:len(list.sizeList[5])-1] 108 | } 109 | case v.actualSize < 4096: 110 | pos = listPos(v, list.sizeList[6]) 111 | if pos != -1 { 112 | copy(list.sizeList[6][pos:], list.sizeList[6][(pos+1):]) 113 | list.sizeList[6] = list.sizeList[6][:len(list.sizeList[6])-1] 114 | } 115 | case v.actualSize > 4096: 116 | pos = listPos(v, list.sizeList[7]) 117 | if pos != -1 { 118 | copy(list.sizeList[7][pos:], list.sizeList[7][(pos+1):]) 119 | list.sizeList[7] = list.sizeList[7][:len(list.sizeList[7])-1] 120 | } 121 | } 122 | } 123 | 124 | func (list *freeList) putIntoSizeList(item *itemPos) { 125 | switch { 126 | case item.actualSize < 64: 127 | list.sizeList[0] = append(list.sizeList[0], item) 128 | case item.actualSize < 128: 129 | list.sizeList[1] = append(list.sizeList[1], item) 130 | case item.actualSize < 256: 131 | list.sizeList[2] = append(list.sizeList[2], item) 132 | case item.actualSize < 512: 133 | list.sizeList[3] = append(list.sizeList[3], item) 134 | case item.actualSize < 1024: 135 | list.sizeList[4] = append(list.sizeList[4], item) 136 | case item.actualSize < 2048: 137 | list.sizeList[5] = append(list.sizeList[5], item) 138 | case item.actualSize < 4096: 139 | list.sizeList[6] = append(list.sizeList[6], item) 140 | case item.actualSize > 4096: 141 | list.sizeList[7] = append(list.sizeList[7], item) 142 | } 143 | } 144 | 145 | func listPos(v *itemPos, arr posArray) int { 146 | for x := 0; x < len(arr); x++ { 147 | if v == arr[x] { 148 | return x 149 | } 150 | } 151 | 152 | return -1 153 | } 154 | 155 | func (list *freeList) find(size int) int { 156 | 157 | index := 0 158 | buckSize := 0 159 | switch { 160 | case size < 64: 161 | index = 0 162 | buckSize = 64 163 | case size < 128: 164 | index = 1 165 | buckSize = 128 166 | case size < 256: 167 | index = 2 168 | buckSize = 256 169 | case size < 512: 170 | index = 3 171 | buckSize = 512 172 | case size < 1024: 173 | index = 4 174 | buckSize = 1024 175 | case size < 2048: 176 | index = 5 177 | buckSize = 2048 178 | case size < 4096: 179 | index = 6 180 | buckSize = 4096 181 | case size > 4096: 182 | index = 7 183 | buckSize = 4097 184 | } 185 | 186 | for idx := index; idx < 8; idx++ { 187 | if v, found := list.findInSizeList(idx, size, buckSize); found { 188 | return v 189 | } 190 | if idx < 6 { 191 | buckSize *= 2 192 | } else { 193 | buckSize = 4097 194 | } 195 | 196 | } 197 | 198 | return -1 199 | } 200 | 201 | func (list *freeList) findInSizeList(idx int, size int, buckSize int) (int, bool) { 202 | for x := 0; x < len(list.sizeList[idx]); x++ { 203 | tmp := list.sizeList[idx][x] 204 | if size <= tmp.actualSize { 205 | list.indexTree.Remove(tmp.parentIndex) 206 | if size == tmp.actualSize { 207 | copy(list.sizeList[idx][x:], list.sizeList[idx][(x+1):]) 208 | list.sizeList[idx] = list.sizeList[idx][:len(list.sizeList[idx])-1] 209 | list.indexTree.Remove(tmp.actualSize + tmp.parentIndex - 1) 210 | return tmp.parentIndex, true 211 | } 212 | 213 | if (tmp.actualSize - size) >= buckSize { 214 | ret := tmp.parentIndex 215 | tmp.parentIndex += size 216 | tmp.actualSize -= size 217 | list.indexTree.Put(tmp.parentIndex, tmp) 218 | return ret, true 219 | } 220 | 221 | { 222 | list.indexTree.Remove(tmp.parentIndex + tmp.actualSize - 1) 223 | copy(list.sizeList[idx][x:], list.sizeList[idx][(x+1):]) 224 | list.sizeList[idx] = list.sizeList[idx][:len(list.sizeList[idx])-1] 225 | list.add(tmp.parentIndex+size, tmp.actualSize-size) 226 | return tmp.parentIndex, true 227 | } 228 | } 229 | } 230 | 231 | return -1, false 232 | } 233 | -------------------------------------------------------------------------------- /bigcache/queue/freeList_test.go: -------------------------------------------------------------------------------- 1 | package queue 2 | 3 | import ( 4 | "math/rand" 5 | "testing" 6 | ) 7 | 8 | func TestFreeList_add(t *testing.T) { 9 | list := newFreeList() 10 | list.add(0, 63) 11 | if len(list.sizeList[0]) != 1 { 12 | t.Error("sizeList index 0 should be 1 in length") 13 | } 14 | 15 | list.add(1024, 600) 16 | if len(list.sizeList[4]) != 1 { 17 | t.Error("sizeList index 4 should be 1 in length") 18 | } 19 | 20 | list.add(63, 100) 21 | if len(list.sizeList[0]) > 0 { 22 | t.Error("sizeList index 0 should not be 1 in length") 23 | } 24 | 25 | } 26 | 27 | func TestFreeList_find(t *testing.T) { 28 | list := newFreeList() 29 | list.add(0, 63) 30 | if len(list.sizeList[0]) != 1 { 31 | t.Error("sizeList index 0 should be 1 in length") 32 | } 33 | 34 | list.add(1024, 600) 35 | if len(list.sizeList[4]) != 1 { 36 | t.Error("sizeList index 4 should be 1 in length") 37 | } 38 | 39 | list.add(63, 100) 40 | if len(list.sizeList[0]) > 0 { 41 | t.Error("sizeList index 0 should not be 1 in length") 42 | } 43 | 44 | idx := list.find(1024 * 1024) 45 | if idx != -1 { 46 | t.Error("there should not be big enough room for this request") 47 | } 48 | 49 | t.Log(list.sizeList) 50 | idx = list.find(500) 51 | t.Log(idx) 52 | t.Log(list.sizeList) 53 | t.Log(list.sizeList[1][0]) 54 | } 55 | 56 | var gList *freeList 57 | 58 | func init() { 59 | gList = newFreeList() 60 | for x := 0; x < 1024; x++ { 61 | gList.add(x, rand.Intn(1024*1024*1024)) 62 | } 63 | } 64 | 65 | func BenchmarkFreelist(b *testing.B) { 66 | 67 | for x := 0; x < b.N; x++ { 68 | gList.find(rand.Intn(1024 * 1024)) 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /bigcache/shard.go: -------------------------------------------------------------------------------- 1 | package bigcache 2 | 3 | import ( 4 | "github.com/emirpasic/gods/sets/hashset" 5 | "github.com/oaStuff/clusteredBigCache/bigcache/queue" 6 | "sync" 7 | "sync/atomic" 8 | "time" 9 | ) 10 | 11 | //NO_EXPIRY means data placed into a shard will never expire 12 | const NO_EXPIRY uint64 = 0 13 | 14 | type cacheShard struct { 15 | sharedNum uint64 16 | hashmap map[uint64]uint32 17 | entries queue.BytesQueue 18 | lock sync.RWMutex 19 | entryBuffer []byte 20 | onRemove func(wrappedEntry []byte) 21 | 22 | isVerbose bool 23 | logger Logger 24 | clock clock 25 | lifeWindow uint64 26 | 27 | stats Stats 28 | ttlTable *ttlManager 29 | } 30 | 31 | type onRemoveCallback func(wrappedEntry []byte) 32 | 33 | func (s *cacheShard) get(key string, hashedKey uint64) ([]byte, error) { 34 | s.lock.RLock() 35 | itemIndex := s.hashmap[hashedKey] 36 | 37 | if itemIndex == 0 { 38 | s.lock.RUnlock() 39 | s.miss() 40 | return nil, notFound(key) 41 | } 42 | 43 | wrappedEntry, err := s.entries.Get(int(itemIndex)) 44 | if err != nil { 45 | s.lock.RUnlock() 46 | s.miss() 47 | return nil, err 48 | } 49 | if entryKey := readKeyFromEntry(wrappedEntry); key != entryKey { 50 | if s.isVerbose { 51 | s.logger.Printf("Collision detected. Both %q and %q have the same hash %x", key, entryKey, hashedKey) 52 | } 53 | s.lock.RUnlock() 54 | s.collision() 55 | return nil, notFound(key) 56 | } 57 | s.lock.RUnlock() 58 | s.hit() 59 | return readEntry(wrappedEntry), nil 60 | } 61 | 62 | func (s *cacheShard) set(key string, hashedKey uint64, entry []byte, duration time.Duration) (uint64, error) { 63 | expiryTimestamp := uint64(s.clock.epoch()) 64 | if duration != time.Duration(NO_EXPIRY) { 65 | expiryTimestamp += uint64(duration.Seconds()) 66 | } else { 67 | expiryTimestamp = NO_EXPIRY 68 | } 69 | 70 | s.lock.Lock() 71 | 72 | if previousIndex := s.hashmap[hashedKey]; previousIndex != 0 { 73 | if previousEntry, err := s.entries.Get(int(previousIndex)); err == nil { 74 | timestamp := readTimestampFromEntry(previousEntry) 75 | s.ttlTable.remove(timestamp, key) 76 | resetKeyFromEntry(previousEntry) 77 | s.delete(previousIndex) 78 | } 79 | } 80 | 81 | w := wrapEntry(expiryTimestamp, hashedKey, key, entry, &s.entryBuffer) 82 | 83 | var err error 84 | if index, err := s.entries.Push(w); err == nil { 85 | s.hashmap[hashedKey] = uint32(index) 86 | s.lock.Unlock() 87 | if duration != time.Duration(NO_EXPIRY) { 88 | s.ttlTable.put(expiryTimestamp, key) 89 | } 90 | return expiryTimestamp, nil 91 | } 92 | 93 | return 0, err 94 | } 95 | 96 | func (s *cacheShard) evictDel(timeStamp uint64, set *hashset.Set) error { 97 | s.lock.Lock() 98 | 99 | for _, v := range set.Values() { // remove all data in the hashset for this eviction 100 | s.stats.EvictCount++ 101 | keyHash := s.ttlTable.ShardHasher.Sum64(v.(string)) 102 | itemIndex := s.hashmap[keyHash] 103 | 104 | if itemIndex == 0 { 105 | s.delmiss() 106 | continue 107 | } 108 | 109 | wrappedEntry, err := s.entries.Get(int(itemIndex)) 110 | if err != nil { 111 | s.delmiss() 112 | continue 113 | } 114 | 115 | storedTimestamp := readTimestampFromEntry(wrappedEntry) 116 | if storedTimestamp == timeStamp { 117 | delete(s.hashmap, keyHash) 118 | s.onRemove(wrappedEntry) 119 | resetKeyFromEntry(wrappedEntry) 120 | s.delete(itemIndex) 121 | s.delhit() 122 | } 123 | 124 | } 125 | 126 | s.lock.Unlock() 127 | 128 | return nil 129 | } 130 | 131 | func (s *cacheShard) del(key string, hashedKey uint64) error { 132 | s.lock.Lock() 133 | itemIndex := s.hashmap[hashedKey] 134 | 135 | if itemIndex == 0 { 136 | s.lock.Unlock() 137 | s.delmiss() 138 | return notFound(key) 139 | } 140 | 141 | wrappedEntry, err := s.entries.Get(int(itemIndex)) 142 | if err != nil { 143 | s.lock.Unlock() 144 | s.delmiss() 145 | return err 146 | } 147 | 148 | delete(s.hashmap, hashedKey) 149 | s.onRemove(wrappedEntry) 150 | resetKeyFromEntry(wrappedEntry) 151 | s.delete(itemIndex) 152 | s.lock.Unlock() 153 | 154 | timestamp := readTimestampFromEntry(wrappedEntry) 155 | s.ttlTable.remove(timestamp, key) 156 | 157 | s.delhit() 158 | return nil 159 | } 160 | 161 | func (s *cacheShard) onEvict(oldestEntry []byte, currentTimestamp uint64, evict func() error) bool { 162 | oldestTimestamp := readTimestampFromEntry(oldestEntry) 163 | if currentTimestamp-oldestTimestamp > s.lifeWindow { 164 | evict() 165 | return true 166 | } 167 | return false 168 | } 169 | 170 | //func (s *cacheShard) cleanUp(currentTimestamp uint64) { 171 | // s.lock.Lock() 172 | // for { 173 | // if oldestEntry, err := s.entries.Peek(); err != nil { 174 | // break 175 | // } else if evicted := s.onEvict(oldestEntry, currentTimestamp, s.removeOldestEntry); !evicted { 176 | // break 177 | // } 178 | // } 179 | // s.lock.Unlock() 180 | //} 181 | 182 | func (s *cacheShard) getOldestEntry() ([]byte, error) { 183 | return s.entries.Peek() 184 | } 185 | 186 | func (s *cacheShard) getEntry(index int) ([]byte, error) { 187 | return s.entries.Get(index) 188 | } 189 | 190 | func (s *cacheShard) copyKeys() (keys []uint32, next int) { 191 | keys = make([]uint32, len(s.hashmap)) 192 | 193 | s.lock.RLock() 194 | 195 | for _, index := range s.hashmap { 196 | keys[next] = index 197 | next++ 198 | } 199 | 200 | s.lock.RUnlock() 201 | return keys, next 202 | } 203 | 204 | func (s *cacheShard) removeOldestEntry() error { 205 | oldest, err := s.entries.Pop() 206 | if err == nil { 207 | hash := readHashFromEntry(oldest) 208 | delete(s.hashmap, hash) 209 | s.onRemove(oldest) 210 | return nil 211 | } 212 | return err 213 | } 214 | 215 | func (s *cacheShard) reset(config Config) { 216 | s.lock.Lock() 217 | s.hashmap = make(map[uint64]uint32, config.initialShardSize()) 218 | s.entryBuffer = make([]byte, config.MaxEntrySize+headersSizeInBytes) 219 | s.entries.Reset() 220 | s.ttlTable.reset() 221 | s.lock.Unlock() 222 | } 223 | 224 | func (s *cacheShard) len() int { 225 | s.lock.RLock() 226 | res := len(s.hashmap) 227 | s.lock.RUnlock() 228 | return res 229 | } 230 | 231 | func (s *cacheShard) getStats() Stats { 232 | return s.stats 233 | } 234 | 235 | func (s *cacheShard) hit() { 236 | atomic.AddInt64(&s.stats.Hits, 1) 237 | } 238 | 239 | func (s *cacheShard) miss() { 240 | atomic.AddInt64(&s.stats.Misses, 1) 241 | } 242 | 243 | func (s *cacheShard) delhit() { 244 | atomic.AddInt64(&s.stats.DelHits, 1) 245 | } 246 | 247 | func (s *cacheShard) delmiss() { 248 | atomic.AddInt64(&s.stats.DelMisses, 1) 249 | } 250 | 251 | func (s *cacheShard) collision() { 252 | atomic.AddInt64(&s.stats.Collisions, 1) 253 | } 254 | 255 | func (s *cacheShard) delete(index uint32) { 256 | s.entries.Delete(int(index)) 257 | } 258 | 259 | //func (s *cacheShard) Delete(index uint32) { 260 | // diff, _ := s.entries.Delete(int(index)) 261 | // 262 | // for i,_ := range s.hashmap { 263 | // idx := s.hashmap[i] 264 | // if idx > index { 265 | // s.hashmap[i] = idx - uint32(diff) 266 | // } 267 | // } 268 | // 269 | // s.fList.adjustIndexes(index, diff) 270 | //} 271 | //func (s *cacheShard) doCompact() { 272 | // for range time.NewTicker(time.Second * 10).C { 273 | // s.lock.Lock() 274 | // for x := 0; x < 512; x++ { 275 | // index, err := s.fList.Pop() 276 | // if err != nil { 277 | // break 278 | // } 279 | // s.Delete(uint32(index)) 280 | // } 281 | // s.lock.Unlock() 282 | // } 283 | //} 284 | 285 | func initNewShard(config Config, callback onRemoveCallback, clock clock, num uint64) *cacheShard { 286 | shard := &cacheShard{ 287 | hashmap: make(map[uint64]uint32, config.initialShardSize()), 288 | entries: *queue.NewBytesQueue(config.initialShardSize()*config.MaxEntrySize, config.maximumShardSize(), config.Verbose), 289 | entryBuffer: make([]byte, config.MaxEntrySize+headersSizeInBytes), 290 | onRemove: callback, 291 | 292 | isVerbose: config.Verbose, 293 | logger: newLogger(config.Logger), 294 | clock: clock, 295 | lifeWindow: uint64(config.LifeWindow.Seconds()), 296 | sharedNum: num, 297 | } 298 | 299 | shard.ttlTable = newTtlManager(shard, config.Hasher) 300 | 301 | //go shard.doCompact() 302 | 303 | return shard 304 | } 305 | -------------------------------------------------------------------------------- /bigcache/stats.go: -------------------------------------------------------------------------------- 1 | package bigcache 2 | 3 | // Stats stores cache statistics 4 | type Stats struct { 5 | // Hits is a number of successfully found keys 6 | Hits int64 7 | // Misses is a number of not found keys 8 | Misses int64 9 | // DelHits is a number of successfully deleted keys 10 | DelHits int64 11 | // DelMisses is a number of not deleted keys 12 | DelMisses int64 13 | // Collisions is a number of happened key-collisions 14 | Collisions int64 15 | 16 | EvictCount int64 17 | } 18 | -------------------------------------------------------------------------------- /bigcache/ttl.go: -------------------------------------------------------------------------------- 1 | package bigcache 2 | 3 | import ( 4 | "github.com/emirpasic/gods/sets/hashset" 5 | "github.com/emirpasic/gods/trees/avltree" 6 | "github.com/emirpasic/gods/utils" 7 | "sync" 8 | "time" 9 | ) 10 | 11 | type ttlManager struct { 12 | shard *cacheShard 13 | timeTree *avltree.Tree 14 | treeLock sync.Mutex 15 | timer *time.Timer 16 | ShardHasher Hasher 17 | } 18 | 19 | func newTtlManager(shard *cacheShard, hasher Hasher) *ttlManager { 20 | ttl := &ttlManager{ 21 | shard: shard, 22 | timeTree: avltree.NewWith(utils.UInt64Comparator), 23 | treeLock: sync.Mutex{}, 24 | timer: time.NewTimer(time.Hour * 10), //fire the time on time. just for initialization 25 | ShardHasher: hasher, 26 | } 27 | 28 | go ttl.eviction() 29 | 30 | return ttl 31 | } 32 | 33 | // Put item in the tree with key = timestamp 34 | // Manage collision with hashSet 35 | func (ttl *ttlManager) put(timestamp uint64, cacheKey string) { 36 | 37 | ttl.treeLock.Lock() 38 | defer ttl.treeLock.Unlock() 39 | 40 | if data, found := ttl.timeTree.Get(timestamp); found { //a hashset already exist, add to it 41 | data.(*hashset.Set).Add(cacheKey) 42 | return 43 | 44 | } 45 | 46 | set := hashset.New() //new value, create a hashset and use it 47 | set.Add(cacheKey) 48 | ttl.timeTree.Put(timestamp, set) 49 | 50 | if ttl.timeTree.Size() == 1 { 51 | ttl.resetTimer(timestamp) 52 | return 53 | } 54 | 55 | //is this timestamp at the head, if so then it displaced the smallest one so reset timer 56 | if key, _ := ttl.peek(); key == timestamp { 57 | ttl.resetTimer(timestamp) 58 | } 59 | 60 | } 61 | 62 | //Remove a timestamp and key pair from the tree 63 | func (ttl *ttlManager) remove(timestamp uint64, key string) { 64 | 65 | ttl.treeLock.Lock() 66 | defer ttl.treeLock.Unlock() 67 | 68 | if data, found := ttl.timeTree.Get(timestamp); found { // data is in hashset, only remove that one 69 | set := data.(*hashset.Set) 70 | set.Remove(key) 71 | if set.Empty() { 72 | if k, _ := ttl.peek(); k == timestamp { 73 | ttl.timeTree.Remove(timestamp) 74 | if k, _ = ttl.peek(); k != nil { 75 | ttl.resetTimer(k.(uint64)) 76 | } 77 | return 78 | } 79 | ttl.timeTree.Remove(timestamp) //remove everything if empty 80 | } 81 | } 82 | 83 | } 84 | 85 | //reset everything to default 86 | func (ttl *ttlManager) reset() { 87 | ttl.treeLock.Lock() 88 | ttl.timeTree.Clear() 89 | ttl.stopTimer() 90 | ttl.treeLock.Unlock() 91 | } 92 | 93 | //goroutine that handles eviction 94 | func (ttl *ttlManager) eviction() { 95 | 96 | for range ttl.timer.C { 97 | 98 | ttl.treeLock.Lock() //acquire tree lock 99 | if ttl.timeTree.Size() < 1 { //if tree is empty move on 100 | ttl.treeLock.Unlock() 101 | continue 102 | } 103 | 104 | key, value := ttl.peek() //peek the value at the head(the tree keys are ordered) 105 | ttl.timeTree.Remove(key) 106 | 107 | ttl.treeLock.Unlock() 108 | ttl.evict(key.(uint64), value) 109 | 110 | for { //loop through to ensure all expired keys are removed in this single step 111 | 112 | ttl.treeLock.Lock() 113 | if ttl.timeTree.Size() < 1 { 114 | break 115 | } 116 | 117 | key, value = ttl.peek() 118 | nextExpiryTime := key.(uint64) 119 | interval := nextExpiryTime - uint64(time.Now().Unix()) 120 | if 0 == interval { 121 | ttl.timeTree.Remove(key) 122 | ttl.treeLock.Unlock() 123 | ttl.evict(key.(uint64), value) 124 | } else { 125 | ttl.resetTimer(nextExpiryTime) //TODO: should this not just call Reset() directly? seen that the timer just fired 126 | break // TODO: and would be in the expired state 127 | } 128 | } 129 | 130 | ttl.treeLock.Unlock() 131 | } 132 | } 133 | 134 | //Reset the eviction timer 135 | func (ttl *ttlManager) resetTimer(timeStamp uint64) { 136 | 137 | interval := timeStamp - uint64(time.Now().Unix()) 138 | if !ttl.timer.Stop() { 139 | select { 140 | case <-ttl.timer.C: 141 | default: 142 | } 143 | } 144 | ttl.timer.Reset(time.Second * time.Duration(interval)) 145 | } 146 | 147 | //Stop the eviction timer 148 | func (ttl *ttlManager) stopTimer() { 149 | if !ttl.timer.Stop() { 150 | select { 151 | case <-ttl.timer.C: 152 | default: 153 | } 154 | } 155 | } 156 | 157 | //Grab the data at the beginning of the iterator 158 | func (ttl *ttlManager) peek() (interface{}, interface{}) { 159 | 160 | it := ttl.timeTree.Iterator() 161 | it.Next() 162 | return it.Key(), it.Value() 163 | } 164 | 165 | //Do the eviction from the tree and parent shard 166 | func (ttl *ttlManager) evict(timeStamp uint64, value interface{}) { 167 | 168 | set := value.(*hashset.Set) 169 | ttl.shard.evictDel(timeStamp, set) 170 | } 171 | -------------------------------------------------------------------------------- /bigcache/utils.go: -------------------------------------------------------------------------------- 1 | package bigcache 2 | 3 | func max(a, b int) int { 4 | if a > b { 5 | return a 6 | } 7 | return b 8 | } 9 | 10 | func convertMBToBytes(value int) int { 11 | return value * 1024 * 1024 12 | } 13 | 14 | func isPowerOfTwo(number int) bool { 15 | return (number & (number - 1)) == 0 16 | } 17 | -------------------------------------------------------------------------------- /comms/connection.go: -------------------------------------------------------------------------------- 1 | package comms 2 | 3 | import ( 4 | "bufio" 5 | "errors" 6 | "io" 7 | "net" 8 | "strings" 9 | "sync" 10 | "time" 11 | ) 12 | 13 | var ( 14 | errConnectionUnusable = errors.New("connection is not usable") 15 | errTimeout = errors.New("i/o timeout") 16 | ) 17 | 18 | //Connection defines a connection to a remote peer 19 | type Connection struct { 20 | Remote string 21 | Uid string 22 | conn *net.TCPConn 23 | buffReader *bufio.Reader 24 | readTimeout time.Duration 25 | Usable bool 26 | writeLock sync.Mutex 27 | } 28 | 29 | //NewConnection Create a new tcp connection and connects to the remote entity 30 | func NewConnection(endpoint string, connectionTimeout time.Duration) (*Connection, error) { 31 | 32 | c := &Connection{} 33 | conn, err := net.DialTimeout("tcp", endpoint, connectionTimeout) 34 | if err != nil { 35 | return nil, err 36 | } 37 | 38 | c.conn = conn.(*net.TCPConn) 39 | 40 | c.Uid = c.conn.LocalAddr().String() 41 | c.Remote = endpoint 42 | c.conn.SetKeepAlive(true) 43 | //c.conn.SetReadBuffer(1024 * 1024) 44 | //c.conn.SetWriteBuffer(1024 * 1024) 45 | c.buffReader = bufio.NewReader(c) 46 | c.Usable = true 47 | c.writeLock = sync.Mutex{} 48 | 49 | return c, nil 50 | } 51 | 52 | //WrapConnection wraps a tcp conn pointer into this struct 53 | func WrapConnection(conn *net.TCPConn) *Connection { 54 | 55 | c := &Connection{} 56 | c.conn = conn 57 | c.Uid = c.conn.LocalAddr().String() 58 | c.Remote = conn.RemoteAddr().String() 59 | c.conn.SetKeepAlive(true) 60 | //c.conn.SetReadBuffer(1024 * 1024) 61 | //c.conn.SetWriteBuffer(1024 * 1024) 62 | c.buffReader = bufio.NewReader(c) 63 | c.Usable = true 64 | c.writeLock = sync.Mutex{} 65 | 66 | return c 67 | } 68 | 69 | //SetReadTimeout set the connection read timeout 70 | func (c *Connection) SetReadTimeout(timeout time.Duration) { 71 | c.readTimeout = timeout 72 | } 73 | 74 | func (c *Connection) Read(p []byte) (int, error) { 75 | if !c.Usable { 76 | return 0, errConnectionUnusable 77 | } 78 | 79 | if c.readTimeout != 0 { 80 | c.conn.SetReadDeadline(time.Now().Add(c.readTimeout)) 81 | } 82 | 83 | n, err := c.conn.Read(p) 84 | if err != nil { 85 | if err == io.EOF { 86 | c.Usable = false 87 | } else { 88 | if strings.Contains(err.Error(), "timeout") { 89 | err = errors.New("i/o timeout") 90 | } 91 | } 92 | } 93 | 94 | return n, err 95 | } 96 | 97 | func (c *Connection) Write(data []byte) (int, error) { 98 | if !c.Usable { 99 | return 0, errConnectionUnusable 100 | } 101 | 102 | c.writeLock.Lock() 103 | defer c.writeLock.Unlock() 104 | 105 | count := 0 106 | size := len(data) 107 | for count < size { 108 | n, err := c.conn.Write(data[count:]) 109 | if err != nil { 110 | if err == io.EOF { 111 | c.Usable = false 112 | } 113 | return count, err 114 | } 115 | 116 | count += n 117 | } 118 | 119 | return count, nil 120 | } 121 | 122 | //SendData sends a []byte over the network 123 | func (c *Connection) SendData(data []byte) error { 124 | 125 | if !c.Usable { 126 | return errConnectionUnusable 127 | } 128 | 129 | c.writeLock.Lock() 130 | defer c.writeLock.Unlock() 131 | 132 | count := 0 133 | size := len(data) 134 | for count < size { 135 | n, err := c.conn.Write(data[count:]) 136 | if err != nil { 137 | if err == io.EOF { 138 | c.Usable = false 139 | } 140 | return err 141 | } 142 | 143 | count += n 144 | } 145 | 146 | return nil 147 | } 148 | 149 | //ReadData reads size byte of data and return is to the caller 150 | func (c *Connection) ReadData(size uint, timeout time.Duration) ([]byte, error) { 151 | 152 | ret := make([]byte, size) 153 | var err error 154 | 155 | tmp := c.readTimeout 156 | c.SetReadTimeout(timeout) 157 | defer c.SetReadTimeout(tmp) 158 | 159 | if 0 != timeout { 160 | done := make(chan bool) 161 | defer close(done) 162 | 163 | go func() { 164 | _, err = io.ReadFull(c.buffReader, ret) 165 | done <- true 166 | }() 167 | 168 | select { 169 | case <-done: 170 | return ret, err 171 | case <-time.After(timeout): 172 | return nil, errTimeout 173 | } 174 | } else { 175 | _, err = io.ReadFull(c.buffReader, ret) 176 | return ret, err 177 | } 178 | } 179 | 180 | //Close calls shutdown on this struct 181 | func (c *Connection) Close() { 182 | c.Shutdown() 183 | } 184 | 185 | //Shutdown closes the network connection 186 | func (c *Connection) Shutdown() { 187 | c.conn.Close() 188 | c.buffReader = nil 189 | c.Usable = false 190 | } 191 | -------------------------------------------------------------------------------- /message/defs.go: -------------------------------------------------------------------------------- 1 | package message 2 | 3 | //Constants for message codes 4 | const ( 5 | MsgVERIFY = iota + 10 6 | MsgVERIFYOK 7 | MsgPING 8 | MsgPONG 9 | MsgPUT 10 | MsgGETReq 11 | MsgGETRsp 12 | MsgDEL 13 | MsgSyncReq 14 | MsgSyncRsp 15 | ) 16 | 17 | //NodeWireMessage defines the struct that carries message on the wire 18 | type NodeWireMessage struct { 19 | Code uint16 20 | Data []byte 21 | } 22 | 23 | //NodeMessage defines the interface for all message that can be sent and received 24 | type NodeMessage interface { 25 | Serialize() *NodeWireMessage 26 | DeSerialize(msg *NodeWireMessage) 27 | } 28 | 29 | //ProposedPeer are used to represent peers that needs to be connected to 30 | type ProposedPeer struct { 31 | Id string 32 | IpAddress string 33 | } 34 | 35 | //MsgCodeToString maps message code to strings 36 | func MsgCodeToString(code uint16) string { 37 | switch code { 38 | case MsgDEL: 39 | return "msgDelete" 40 | case MsgSyncRsp: 41 | return "msgSyncRsp" 42 | case MsgSyncReq: 43 | return "msgSyncReq" 44 | case MsgPUT: 45 | return "msgPUT" 46 | case MsgPONG: 47 | return "msgPONG" 48 | case MsgPING: 49 | return "mgsPING" 50 | case MsgVERIFYOK: 51 | return "msgVerifyOK" 52 | case MsgVERIFY: 53 | return "msgVerify" 54 | case MsgGETReq: 55 | return "msgGETReq" 56 | case MsgGETRsp: 57 | return "msgGETRsp" 58 | } 59 | 60 | return "unknown" 61 | } 62 | -------------------------------------------------------------------------------- /message/deleteMessage.go: -------------------------------------------------------------------------------- 1 | package message 2 | 3 | import "encoding/json" 4 | 5 | //DeleteMessage is the struct message that defines which data to delete 6 | type DeleteMessage struct { 7 | Code uint16 `json:"code"` 8 | Key string `json:"key"` 9 | } 10 | 11 | //Serialize delete message to node wire message 12 | func (dm *DeleteMessage) Serialize() *NodeWireMessage { 13 | dm.Code = MsgDEL 14 | data, _ := json.Marshal(dm) 15 | return &NodeWireMessage{Code: MsgDEL, Data: data} 16 | } 17 | 18 | //DeSerialize node wire message into delete message 19 | func (dm *DeleteMessage) DeSerialize(msg *NodeWireMessage) { 20 | json.Unmarshal(msg.Data, dm) 21 | } 22 | -------------------------------------------------------------------------------- /message/getReqMessage.go: -------------------------------------------------------------------------------- 1 | package message 2 | 3 | import "encoding/json" 4 | 5 | //GetReqMessage is message struct for getting request from remoteNode 6 | type GetReqMessage struct { 7 | Code uint16 `json:"code"` 8 | Key string `json:"key"` 9 | PendingKey string `json:"pending_key"` 10 | } 11 | 12 | //Serialize get request message to node wire message 13 | func (gm *GetReqMessage) Serialize() *NodeWireMessage { 14 | gm.Code = MsgGETReq 15 | data, _ := json.Marshal(gm) 16 | return &NodeWireMessage{Code: MsgGETReq, Data: data} 17 | } 18 | 19 | //DeSerialize node wire message into get request message 20 | func (gm *GetReqMessage) DeSerialize(msg *NodeWireMessage) { 21 | json.Unmarshal(msg.Data, gm) 22 | } 23 | -------------------------------------------------------------------------------- /message/getRspMessage.go: -------------------------------------------------------------------------------- 1 | package message 2 | 3 | import ( 4 | "encoding/binary" 5 | ) 6 | 7 | //GetRspMessage is the struct for the response message for a remote get 8 | type GetRspMessage struct { 9 | Code uint16 `json:"code"` 10 | PendingKey string `json:"pending_key"` 11 | Data []byte `json:"data"` 12 | } 13 | 14 | //Serialize get response message to node wire message 15 | func (gm *GetRspMessage) Serialize() *NodeWireMessage { 16 | msg := &NodeWireMessage{Code: MsgGETRsp} 17 | bKey := []byte(gm.PendingKey) 18 | keyLen := len(bKey) 19 | msg.Data = make([]byte, keyLen+len(gm.Data)+2) //2 is needed for the size of the key 20 | binary.LittleEndian.PutUint16(msg.Data, uint16(keyLen)) 21 | copy(msg.Data[2:], bKey) 22 | copy(msg.Data[(2+keyLen):], gm.Data) 23 | 24 | return msg 25 | } 26 | 27 | //DeSerialize node wire message into get response message 28 | func (gm *GetRspMessage) DeSerialize(msg *NodeWireMessage) { 29 | gm.Code = MsgGETRsp 30 | keyLen := binary.LittleEndian.Uint16(msg.Data) 31 | gm.PendingKey = string(msg.Data[2:(2 + keyLen)]) 32 | gm.Data = msg.Data[(2 + keyLen):] 33 | } 34 | -------------------------------------------------------------------------------- /message/messages_test.go: -------------------------------------------------------------------------------- 1 | package message 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | "time" 7 | ) 8 | 9 | func TestPutMessage(t *testing.T) { 10 | msg := PutMessage{Code: MsgPUT, Expiry: uint64(time.Now().Unix()), Key: "key_1", Data: []byte("data_A")} 11 | newMsg := PutMessage{} 12 | newMsg.DeSerialize(msg.Serialize()) 13 | if !reflect.DeepEqual(msg, newMsg) { 14 | t.Error("PutMessage serialization and deserialization not working properly") 15 | } 16 | } 17 | 18 | func TestSyncReqMessage(t *testing.T) { 19 | msg := SyncReqMessage{Code: MsgSyncReq, Mode: 20} 20 | newMsg := SyncReqMessage{} 21 | newMsg.DeSerialize(msg.Serialize()) 22 | if !reflect.DeepEqual(msg, newMsg) { 23 | t.Error("SyncReqMessage serialization and deserialization not working properly") 24 | } 25 | } 26 | 27 | func TestSyncRspMessage(t *testing.T) { 28 | msg := SyncRspMessage{Code: MsgSyncRsp, List: []ProposedPeer{ 29 | {Id: "id_1", IpAddress: "192.168.56.1"}, 30 | {Id: "id_2", IpAddress: "192.168.56.2"}, 31 | {Id: "id_3", IpAddress: "192.168.56.3"}, 32 | {Id: "id_4", IpAddress: "192.168.56.4"}, 33 | {Id: "id_5", IpAddress: "192.168.56.5"}, 34 | }} 35 | 36 | newMsg := SyncRspMessage{} 37 | newMsg.DeSerialize(msg.Serialize()) 38 | if !reflect.DeepEqual(msg, newMsg) { 39 | t.Error("SyncRspMessage serialization and deserialization not working properly") 40 | } 41 | } 42 | 43 | func TestVerifyMessage(t *testing.T) { 44 | msg := VerifyMessage{Id: "id_node", Version: "1.02", ServicePort: "9090"} 45 | newMsg := VerifyMessage{} 46 | newMsg.DeSerialize(msg.Serialize()) 47 | if !reflect.DeepEqual(msg, newMsg) { 48 | t.Error("VerifyMessage serialization and deserialization not working properly") 49 | } 50 | } 51 | 52 | func TestDeleteMessage(t *testing.T) { 53 | msg := DeleteMessage{Key: "key_ab"} 54 | newMsg := DeleteMessage{} 55 | newMsg.DeSerialize(msg.Serialize()) 56 | if !reflect.DeepEqual(msg, newMsg) { 57 | t.Error("DeleteMessage serialization and deserialization not working properly") 58 | } 59 | } 60 | 61 | func TestGetReqMessage(t *testing.T) { 62 | msg := GetReqMessage{Key: "key_ab", PendingKey: "pending_key_1"} 63 | newMsg := GetReqMessage{} 64 | newMsg.DeSerialize(msg.Serialize()) 65 | if !reflect.DeepEqual(msg, newMsg) { 66 | t.Error("GetReqMessage serialization and deserialization not working properly") 67 | } 68 | } 69 | 70 | func TestGetRspMessage(t *testing.T) { 71 | msg := GetRspMessage{Code: MsgGETRsp, PendingKey: "pending_key_5", Data: []byte("data_5")} 72 | newMsg := GetRspMessage{} 73 | newMsg.DeSerialize(msg.Serialize()) 74 | if !reflect.DeepEqual(msg, newMsg) { 75 | t.Error("GetRspMessage serialization and deserialization not working properly") 76 | } 77 | 78 | if newMsg.Data == nil { 79 | t.Log("data is nil") 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /message/pingMessage.go: -------------------------------------------------------------------------------- 1 | package message 2 | 3 | //PingMessage is the message struct for sending ping messages 4 | type PingMessage struct { 5 | code uint16 6 | } 7 | 8 | //Serialize ping message to node wire message 9 | func (pm *PingMessage) Serialize() *NodeWireMessage { 10 | return &NodeWireMessage{Code: MsgPING} 11 | } 12 | 13 | //DeSerialize node wire message into ping message 14 | func (pm *PingMessage) DeSerialize(msg *NodeWireMessage) { 15 | pm.code = msg.Code 16 | } 17 | -------------------------------------------------------------------------------- /message/pongMessage.go: -------------------------------------------------------------------------------- 1 | package message 2 | 3 | //PongMessage is the struct for a ping response (pong) 4 | type PongMessage struct { 5 | code uint16 6 | } 7 | 8 | //Serialize pong message to node wire message 9 | func (pm *PongMessage) Serialize() *NodeWireMessage { 10 | return &NodeWireMessage{Code: MsgPONG} 11 | } 12 | 13 | //DeSerialize node wire message into pong message 14 | func (pm *PongMessage) DeSerialize(msg *NodeWireMessage) { 15 | pm.code = msg.Code 16 | } 17 | -------------------------------------------------------------------------------- /message/putMessage.go: -------------------------------------------------------------------------------- 1 | package message 2 | 3 | import ( 4 | "encoding/binary" 5 | ) 6 | 7 | //PutMessage is message struct for sending data across to a remoteNode 8 | type PutMessage struct { 9 | Code uint16 `json:"code"` 10 | Key string `json:"key"` 11 | Expiry uint64 `json:"expiry"` 12 | Data []byte `json:"data"` 13 | } 14 | 15 | //Serialize put message to node wire message 16 | func (pm *PutMessage) Serialize() *NodeWireMessage { 17 | msg := &NodeWireMessage{Code: MsgPUT} 18 | bKey := []byte(pm.Key) 19 | keyLen := len(bKey) 20 | msg.Data = make([]byte, keyLen+len(pm.Data)+2+8) //2 is needed for the size of the key while 8 is for expiry 21 | binary.LittleEndian.PutUint64(msg.Data, pm.Expiry) 22 | binary.LittleEndian.PutUint16(msg.Data[8:], uint16(keyLen)) 23 | copy(msg.Data[(8+2):], bKey) 24 | copy(msg.Data[(8+2+keyLen):], pm.Data) 25 | 26 | return msg 27 | } 28 | 29 | //DeSerialize node wire message into put message 30 | func (pm *PutMessage) DeSerialize(msg *NodeWireMessage) { 31 | pm.Code = MsgPUT 32 | pm.Expiry = binary.LittleEndian.Uint64(msg.Data) 33 | keyLen := binary.LittleEndian.Uint16(msg.Data[8:]) 34 | pm.Key = string(msg.Data[(8 + 2):(8 + 2 + keyLen)]) 35 | pm.Data = msg.Data[(8 + 2 + keyLen):] 36 | } 37 | -------------------------------------------------------------------------------- /message/syncReqMessage.go: -------------------------------------------------------------------------------- 1 | package message 2 | 3 | import "encoding/json" 4 | 5 | //SyncReqMessage is message struct to request synchronization from a remoteNode 6 | type SyncReqMessage struct { 7 | Code uint16 `json:"code"` 8 | Mode byte `json:"mode"` 9 | } 10 | 11 | //Serialize sync message to node wire message 12 | func (sc *SyncReqMessage) Serialize() *NodeWireMessage { 13 | data, _ := json.Marshal(sc) 14 | return &NodeWireMessage{Code: MsgSyncReq, Data: data} 15 | } 16 | 17 | //DeSerialize node wire message into sync message 18 | func (sc *SyncReqMessage) DeSerialize(msg *NodeWireMessage) { 19 | json.Unmarshal(msg.Data, sc) 20 | } 21 | -------------------------------------------------------------------------------- /message/syncRspMessage.go: -------------------------------------------------------------------------------- 1 | package message 2 | 3 | import "encoding/json" 4 | 5 | //SyncRspMessage is the struct that represents the response to a sync message 6 | type SyncRspMessage struct { 7 | Code uint64 `json:"code"` 8 | ReplicationFactor int `json:"replication_factor"` 9 | List []ProposedPeer `json:"list"` 10 | } 11 | 12 | //Serialize sync response message to node wire message 13 | func (sc *SyncRspMessage) Serialize() *NodeWireMessage { 14 | sc.Code = MsgSyncRsp 15 | data, _ := json.Marshal(sc) 16 | return &NodeWireMessage{Code: MsgSyncRsp, Data: data} 17 | } 18 | 19 | //DeSerialize node wire message into sync response message 20 | func (sc *SyncRspMessage) DeSerialize(msg *NodeWireMessage) { 21 | json.Unmarshal(msg.Data, sc) 22 | } 23 | -------------------------------------------------------------------------------- /message/verifyMessge.go: -------------------------------------------------------------------------------- 1 | package message 2 | 3 | import "encoding/json" 4 | 5 | //VerifyMessage is message struct for node verification 6 | type VerifyMessage struct { 7 | Id string `json:"id"` 8 | Version string `json:"version"` 9 | ServicePort string `json:"service_port"` 10 | Mode byte `json:"mode"` 11 | } 12 | 13 | //Serialize verify message to node wire message 14 | func (vm *VerifyMessage) Serialize() *NodeWireMessage { 15 | data, _ := json.Marshal(vm) 16 | return &NodeWireMessage{Code: MsgVERIFY, Data: data} 17 | } 18 | 19 | //DeSerialize node wire message into verify message 20 | func (vm *VerifyMessage) DeSerialize(msg *NodeWireMessage) { 21 | json.Unmarshal(msg.Data, vm) 22 | } 23 | -------------------------------------------------------------------------------- /message/verifyOK.go: -------------------------------------------------------------------------------- 1 | package message 2 | 3 | //VerifyOKMessage is message for verification ok 4 | type VerifyOKMessage struct { 5 | } 6 | 7 | //Serialize verify ok message to node wire message 8 | func (vm *VerifyOKMessage) Serialize() *NodeWireMessage { 9 | return &NodeWireMessage{Code: MsgVERIFYOK, Data: nil} 10 | } 11 | 12 | //DeSerialize node wire message into verify ok message 13 | func (vm *VerifyOKMessage) DeSerialize(msg *NodeWireMessage) { 14 | } 15 | -------------------------------------------------------------------------------- /test/cache_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "bytes" 5 | "github.com/oaStuff/clusteredBigCache/bigcache" 6 | "testing" 7 | "time" 8 | ) 9 | 10 | func TestReplacingSameKey(t *testing.T) { 11 | bc, _ := bigcache.NewBigCache(bigcache.DefaultConfig()) 12 | bc.Set("one", []byte("one"), time.Second*5) 13 | bc.Set("one", []byte("three"), time.Second*5) 14 | val, _ := bc.Get("one") 15 | if !bytes.Equal(val, []byte("three")) { 16 | t.Error("returned value ought to be equal ot 'three'") 17 | } 18 | } 19 | 20 | func TestReplacingSameKeyWithDiffExpiry(t *testing.T) { 21 | bc, _ := bigcache.NewBigCache(bigcache.DefaultConfig()) 22 | bc.Set("one", []byte("one"), time.Second*5) 23 | bc.Set("one", []byte("three"), time.Second*3) 24 | val, _ := bc.Get("one") 25 | if !bytes.Equal(val, []byte("three")) { 26 | t.Error("returned value ought to be equal ot 'three'") 27 | } 28 | time.Sleep(time.Second * 4) 29 | val, _ = bc.Get("one") 30 | if nil != val { 31 | t.Error("value ought to have expired after 3 seconds") 32 | } 33 | } 34 | 35 | func TestRemove(t *testing.T) { 36 | bc, _ := bigcache.NewBigCache(bigcache.DefaultConfig()) 37 | bc.Set("one", []byte("one"), time.Second*5) 38 | bc.Delete("one") 39 | val, _ := bc.Get("one") 40 | if nil != val { 41 | t.Error("value ought not to still be in cache after been deleted") 42 | } 43 | } 44 | 45 | func TestExpiry(t *testing.T) { 46 | bc, _ := bigcache.NewBigCache(bigcache.DefaultConfig()) 47 | bc.Set("one", []byte("one"), time.Second*5) 48 | time.Sleep(time.Second * 3) 49 | val, _ := bc.Get("one") 50 | if !bytes.Equal(val, []byte("one")) { 51 | t.Error("returned value ought to be equal 'one'") 52 | } 53 | time.Sleep(time.Second * 3) 54 | val, _ = bc.Get("one") 55 | if nil != val { 56 | t.Error("value ought to have expired") 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /utils/defs.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | //AppLogger is an interface defining logging for clusteredBigCache 4 | type AppLogger interface { 5 | Info(msg string) 6 | Warn(msg string) 7 | Critical(msg string) 8 | Error(msg string) 9 | } 10 | -------------------------------------------------------------------------------- /utils/loggerWrapper.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | //Info uses logger to print info messages 4 | func Info(logger AppLogger, msg string) { 5 | if nil != logger { 6 | logger.Info(msg) 7 | } 8 | } 9 | 10 | //Error uses logger to print error messages 11 | func Error(logger AppLogger, msg string) { 12 | if nil != logger { 13 | logger.Error(msg) 14 | } 15 | } 16 | 17 | //Warn uses logger to print warning messages 18 | func Warn(logger AppLogger, msg string) { 19 | if nil != logger { 20 | logger.Warn(msg) 21 | } 22 | } 23 | 24 | //Critical uses logger to print critical messages 25 | func Critical(logger AppLogger, msg string) { 26 | if nil != logger { 27 | logger.Critical(msg) 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /utils/sliceList.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "sync" 5 | "sync/atomic" 6 | ) 7 | 8 | //SliceList represents a list of slices 9 | type SliceList struct { 10 | items *sync.Map 11 | size int32 12 | } 13 | 14 | //NewSliceList creates a new list 15 | func NewSliceList() *SliceList { 16 | return &SliceList{size: 0, items: &sync.Map{}} 17 | } 18 | 19 | // Add appends a value at the end of the list 20 | func (list *SliceList) Add(key, value interface{}) { 21 | if nil == value { 22 | return 23 | } 24 | atomic.AddInt32(&list.size, 1) 25 | list.items.Store(key, value) 26 | } 27 | 28 | //Contains verifies if a key exist within the list 29 | func (list *SliceList) Contains(key interface{}) bool { 30 | _, ok := list.items.Load(key) 31 | return ok 32 | } 33 | 34 | //Size returns the size of the list 35 | func (list *SliceList) Size() int32 { 36 | return list.size 37 | } 38 | 39 | // Remove removes one or more elements from the list with the supplied indices. 40 | func (list *SliceList) Remove(key interface{}) { 41 | if _, ok := list.items.Load(key); ok { 42 | list.items.Delete(key) 43 | atomic.AddInt32(&list.size, -1) 44 | } 45 | } 46 | 47 | //Get returns the corresponding values of a key and true if it exists within the list 48 | func (list *SliceList) Get(key interface{}) (interface{}, bool) { 49 | return list.items.Load(key) 50 | } 51 | 52 | //Values returns the values of every key in the list 53 | func (list *SliceList) Values() []interface{} { 54 | newElements := make([]interface{}, 0, list.size) 55 | list.items.Range(func(key, value interface{}) bool { 56 | if value != nil { 57 | newElements = append(newElements, value) 58 | } 59 | return true 60 | }) 61 | return newElements 62 | } 63 | 64 | //Keys returns the keys within the list 65 | func (list *SliceList) Keys() *sync.Map { 66 | return list.items 67 | } 68 | -------------------------------------------------------------------------------- /utils/sliceList_test.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func TestSliceList_Add(t *testing.T) { 8 | sl := NewSliceList() 9 | sl.Add(12, "value") 10 | if sl.Contains(121) { 11 | t.Error("value should not be in list") 12 | } 13 | 14 | if !sl.Contains(12) { 15 | t.Error("value should be in the list") 16 | } 17 | } 18 | 19 | func TestSlice_Index(t *testing.T) { 20 | sl := NewSliceList() 21 | sl.Add(0, "value") 22 | if data, ok := sl.Get(0); !ok && "value" != data { 23 | t.Error("indexing not working properly") 24 | } 25 | } 26 | 27 | func TestSliceList_Remove(t *testing.T) { 28 | sl := NewSliceList() 29 | sl.Add(12, 12) 30 | sl.Add(44, 44) 31 | sl.Add(123, 123) 32 | 33 | sl.Remove(12) 34 | if sl.Contains(12) { 35 | t.Error("value ought to have been deleted") 36 | } 37 | } 38 | 39 | func TestSliceList_Size(t *testing.T) { 40 | sl := NewSliceList() 41 | sl.Add(12, 12) 42 | if sl.size != 1 { 43 | t.Error("size of list should be one") 44 | } 45 | 46 | sl.Add(123, 123) 47 | sl.Add(909, 909) 48 | sl.Add(834, 834) 49 | sl.Add(76123, 76123) 50 | 51 | if sl.size != 5 { 52 | t.Error("size should be equal to five") 53 | } 54 | 55 | sl.Remove(9091) 56 | if sl.size != 5 { 57 | t.Error("size should be equal to five") 58 | } 59 | 60 | sl.Remove(909) 61 | if sl.size != 4 { 62 | t.Error("size should be equal to four") 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /utils/testUtils.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "encoding/binary" 5 | "github.com/oaStuff/clusteredBigCache/message" 6 | "net" 7 | "strconv" 8 | 9 | "time" 10 | ) 11 | 12 | //TestClient represents a client endpoint in a network connection 13 | type TestClient struct { 14 | conn *net.TCPConn 15 | respondToPings bool 16 | } 17 | 18 | //TestServer represents a server endpoint in a network connection 19 | type TestServer struct { 20 | TestClient 21 | port int 22 | started chan struct{} 23 | } 24 | 25 | //NewTestClient creates a new client connection 26 | func NewTestClient() *TestClient { 27 | return &TestClient{} 28 | } 29 | 30 | //NewTestServer creates a server connection 31 | func NewTestServer(port int, respondToPings bool) *TestServer { 32 | return &TestServer{port: port, 33 | TestClient: TestClient{respondToPings: respondToPings}, 34 | started: make(chan struct{})} 35 | } 36 | 37 | //Start starts up the network 38 | func (s *TestServer) Start() error { 39 | l, err := net.Listen("tcp", net.JoinHostPort("", strconv.Itoa(s.port))) 40 | if err != nil { 41 | return err 42 | } 43 | go func() { 44 | conn, _ := l.Accept() 45 | s.conn = conn.(*net.TCPConn) 46 | go s.readNetwork() 47 | close(s.started) 48 | l.Close() 49 | }() 50 | 51 | return nil 52 | } 53 | 54 | //Close terminates the network connection 55 | func (c *TestClient) Close() { 56 | if c.conn != nil { 57 | c.conn.Close() 58 | } 59 | } 60 | 61 | //SendVerifyMessage send a verify message to remote peer 62 | func (s *TestServer) SendVerifyMessage(id string) { 63 | <-s.started 64 | verifyMsg := message.VerifyMessage{Id: id, ServicePort: "1111"} 65 | wMsg := verifyMsg.Serialize() 66 | d := make([]byte, 6+len(wMsg.Data)) 67 | binary.LittleEndian.PutUint32(d, uint32(len(wMsg.Data)+2)) //the 2 is for the message code 68 | binary.LittleEndian.PutUint16(d[4:], wMsg.Code) 69 | copy(d[6:], wMsg.Data) 70 | s.conn.Write(d) 71 | } 72 | 73 | func (c *TestClient) readNetwork() { 74 | 75 | go func() { 76 | for range time.NewTicker(time.Second * 1).C { 77 | msg := &message.PingMessage{} 78 | wireMsg := msg.Serialize() 79 | data := make([]byte, 6+len(wireMsg.Data)) 80 | binary.LittleEndian.PutUint32(data, uint32(len(wireMsg.Data)+2)) //the 2 is for the message code 81 | binary.LittleEndian.PutUint16(data[4:], wireMsg.Code) 82 | copy(data[6:], wireMsg.Data) 83 | c.conn.Write(data) 84 | } 85 | }() 86 | 87 | header := make([]byte, 6) 88 | for { 89 | _, err := c.conn.Read(header) 90 | if err != nil { 91 | break 92 | } 93 | 94 | length := binary.LittleEndian.Uint32(header) 95 | code := binary.LittleEndian.Uint16(header[4:]) 96 | 97 | var data []byte 98 | if (length - 2) > 0 { 99 | data = make([]byte, length-2) 100 | _, err = c.conn.Read(data) 101 | if err != nil { 102 | break 103 | } 104 | } 105 | 106 | if code == message.MsgVERIFY { 107 | msg := &message.VerifyOKMessage{} 108 | wireMsg := msg.Serialize() 109 | data := make([]byte, 6+len(wireMsg.Data)) 110 | binary.LittleEndian.PutUint32(data, uint32(len(wireMsg.Data)+2)) //the 2 is for the message code 111 | binary.LittleEndian.PutUint16(data[4:], wireMsg.Code) 112 | copy(data[6:], wireMsg.Data) 113 | c.conn.Write(data) 114 | 115 | } else if code == message.MsgPING { 116 | if c.respondToPings { 117 | msg := &message.PongMessage{} 118 | wireMsg := msg.Serialize() 119 | data := make([]byte, 6+len(wireMsg.Data)) 120 | binary.LittleEndian.PutUint32(data, uint32(len(wireMsg.Data)+2)) //the 2 is for the message code 121 | binary.LittleEndian.PutUint16(data[4:], wireMsg.Code) 122 | copy(data[6:], wireMsg.Data) 123 | c.conn.Write(data) 124 | } 125 | 126 | } else if code == message.MsgVERIFYOK { 127 | msg := &message.SyncReqMessage{} 128 | wireMsg := msg.Serialize() 129 | data := make([]byte, 6+len(wireMsg.Data)) 130 | binary.LittleEndian.PutUint32(data, uint32(len(wireMsg.Data)+2)) //the 2 is for the message code 131 | binary.LittleEndian.PutUint16(data[4:], wireMsg.Code) 132 | copy(data[6:], wireMsg.Data) 133 | c.conn.Write(data) 134 | 135 | } else if code == message.MsgPONG { 136 | 137 | } else if code == message.MsgSyncReq { 138 | listMsg := message.SyncRspMessage{} 139 | listMsg.List = []message.ProposedPeer{{Id: "remote_1", IpAddress: "192.168.56.21:9990"}, 140 | {Id: "remote_2", IpAddress: "172.16.111.89:9991"}, 141 | {Id: "remote_3", IpAddress: "10.10.0.1:9090"}} 142 | 143 | wMsg := listMsg.Serialize() 144 | d := make([]byte, 6+len(wMsg.Data)) 145 | binary.LittleEndian.PutUint32(d, uint32(len(wMsg.Data)+2)) //the 2 is for the message code 146 | binary.LittleEndian.PutUint16(d[4:], wMsg.Code) 147 | copy(d[6:], wMsg.Data) 148 | c.conn.Write(d) 149 | } 150 | } 151 | } 152 | -------------------------------------------------------------------------------- /utils/utils.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "math/rand" 5 | "sync" 6 | "time" 7 | ) 8 | 9 | const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" 10 | const ( 11 | letterIdxBits = 6 // 6 bits to represent a letter indexInParent 12 | letterIdxMask = 1<= 0; { 27 | if remain == 0 { 28 | cache, remain = src.Int63(), letterIdxMax 29 | } 30 | if idx := int(cache & letterIdxMask); idx < len(letterBytes) { 31 | b[i] = letterBytes[idx] 32 | i-- 33 | } 34 | cache >>= letterIdxBits 35 | remain-- 36 | } 37 | 38 | return string(b) 39 | } 40 | --------------------------------------------------------------------------------