├── .idea ├── four-seasons.iml ├── misc.xml ├── modules.xml ├── thriftCompiler.xml ├── vcs.xml └── workspace.xml ├── README.md ├── algorithm ├── doubly_list.go ├── doubly_list_test.go ├── linkedlist.go ├── priority_queue.go └── priority_queue_test.go ├── cache-goV1 ├── Dispatcher.go ├── cache.go ├── cache_test.go ├── commons.go ├── commons_test.go ├── defs.go ├── dispatcher_test.go ├── easy_cache.go ├── element.go ├── lfu.go ├── lfu_doubly_list.go ├── liquidator.go ├── liquidator_test.go ├── lru.go ├── lru_doubly_list.go ├── lru_test.go ├── manager.go ├── manager_test.go └── signal_cache.go └── log └── log.go /.idea/four-seasons.iml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /.idea/misc.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 6 | -------------------------------------------------------------------------------- /.idea/modules.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /.idea/thriftCompiler.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | -------------------------------------------------------------------------------- /.idea/vcs.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | -------------------------------------------------------------------------------- /.idea/workspace.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 11 | 12 | 13 | 14 | 15 | 20 | 21 | 22 | 23 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 122 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # channel-cache (Golang) 2 | 3 | 4 | 5 | [toc] 6 | 7 | ![img](https://timgsa.baidu.com/timg?image&quality=80&size=b9999_10000&sec=1575746010175&di=fe88bcf4903519ce8eacd51bae5379ef&imgtype=0&src=http%3A%2F%2Fpic2.zhimg.com%2Fv2-e23145800bbd3d684aef85ad51145eee_1200x500.jpg) 8 | 9 | ## 重要 【更新于 2020-11-27】 10 | 项目因为早期写还有些东西不足可能包括知识上的不足,目前已经进入到企业(国内互联网顶尖企业之一)工作学习到了很多大佬的知识。 11 | 但是它依旧是一个很好的学习项目。因为由于单个执行器的原因造成了性能的瓶颈。 12 | 但是项目中很多的思想依然值得学习 包括淘汰策略等等。 13 | 目前笔者在写一套值得你在项目中使用的工具包。包括 缓存组件(性能比这个要高很多) 日志组件(这个我觉得非常好用) 等等一系列。。。 14 | 链接: https://github.com/xy63237777/go-lib-utils 15 | 当然目前也在开发当中 16 | 那个缓存也值得学习 : https://github.com/xy63237777/go-lib-utils/tree/master/cache 17 | 18 | ## 引言 19 | 20 | - ​ 这是一款高性能的Go语言的缓存框架.笔者写的并不完美.虽然是线程安全并且使用了无锁.基于管道命令.支持同步异步获取.使用了LRU和LFU两种淘汰策略并且可以设置缓存时间. 21 | 22 | - ​ 然后这里我会进行一个快速开始.并且教你们如何使用.如果你想学习这个缓存项目的源代码并且提高功力.或者你可以帮帮笔者继续提高一下性能.那么我可以为你提供博客的教学地址.并且我会在博客的教学地址里写上我当时的完整思路还有问题的所在.如果解决这个问题的. 23 | 24 | ​ 博客地址: https://blog.csdn.net/qq_42011541/article/details/103410959 <- 戳我(看详细思路) 25 | 26 | - **如果你是想学习Go语言没有好的项目并且想提高功力,那么这个项目一定是你的首选** 27 | - 这里附上笔者的邮箱联系方式 : 63237777@qq.com 28 | 29 | ## 快速开始 30 | 31 | 32 | 33 | 你只需要下面这条命令就可以把这个正式版的项目copy下来 34 | 35 | ```shell 36 | go get -v github.com/xy63237777/channel-cache 37 | ``` 38 | 39 | 如果你想学习源码或者能为这个项目提交你的思路和代码 你可以切换至dev分支 40 | 41 | dev : https://github.com/xy63237777/channel-cache/tree/channel-dev 42 | 43 | 44 | 45 | 然后在你的代码里获取到一个Cache实例 46 | 47 | ```go 48 | package main 49 | 50 | import ( 51 | "github.com/xy63237777/channel-cache/cache-goV1" 52 | 53 | ) 54 | 55 | func main() { 56 | cache := gocache.NewSignalCacheForDefault() 57 | } 58 | 59 | ``` 60 | 61 | 当然你也可以指定一些策略 62 | 63 | 第一个参数是LRU(最近最久未使用)缓存 默认是LFU(最近最少使用算法) 然后第二个参数是这个缓存的大小. 64 | 65 | 默认是2048 66 | 67 | 这是一个简单开始的例子 68 | 69 | ```go 70 | cache := gocache.NewSignalCache(gocache.CacheForLRU, gocache.DefaultCapacity) 71 | ``` 72 | 73 | 然后对于返回会返回值本身和bool类型. bool类型表示有无这个对象 通常会返回你想要的值+true 或者 nil + false 74 | 75 | ```go 76 | func main() { 77 | cache := gocache.NewSignalCache(gocache.CacheForLRU, gocache.DefaultCapacity) 78 | cache.Set("hello","world") 79 | fmt.Println(cache.Get("hello")) 80 | } 81 | ``` 82 | 83 | 当然你在Set的时候为这个对象写一定的过期时间. 如果不指定则默认不过期 84 | 85 | 下面代码如果3个小时后就会过期 86 | 87 | ```go 88 | func main() { 89 | cache := gocache.NewSignalCache(gocache.CacheForLRU, gocache.DefaultCapacity) 90 | cache.SetForExpiration("hello","world", time.Hour * 3) 91 | fmt.Println(cache.Get("hello")) 92 | } 93 | ``` 94 | 95 | 下面代码将异步返回你的结果 96 | 97 | 返回的先是一个管道 你通过这个管道再去读. 也就是说 你可以先去获管道,如果你不着急获取值的话. 你可以在客户端异步去完成这些工作. 等你真正需要这个变量的时候再通过管道去拿. 98 | 99 | ```go 100 | func main() { 101 | cache := gocache.NewSignalCache(gocache.CacheForLRU, gocache.DefaultCapacity) 102 | cache.SetForExpiration("hello","world", time.Hour * 3) 103 | async := cache.GetAsync("hello") 104 | val := <- async 105 | fmt.Println(val) 106 | } 107 | ``` 108 | 109 | 当然你可以删除一个缓存实例 110 | 111 | ```go 112 | func main() { 113 | cache := gocache.NewSignalCache(gocache.CacheForLRU, gocache.DefaultCapacity) 114 | cache.SetForExpiration("hello","world", time.Hour * 3) 115 | async := cache.GetAsync("hello") 116 | val := <- async 117 | fmt.Println(val) 118 | cache.Delete("hello") 119 | } 120 | ``` 121 | 122 | ## 较高级操作 123 | 124 | 125 | 126 | 你可以创建一个缓存管理者,这个管理者是单例的.所以放心使用 127 | 128 | 如果你想单独做一个缓存服务器的话,你可以制定一个go的服务端和客户端来做一些负载均衡.,那么就应该使用manager 如果你只需要一个简单的缓存NewSignalCache应该是你的首选 129 | 130 | ```go 131 | func main() { 132 | manager := gocache.NewCacheManager() 133 | manager.CreateCacheForDefault("cache1") 134 | } 135 | ``` 136 | 137 | 然后通过Get这个缓存就和以前使用一样了 138 | 139 | ```go 140 | func main() { 141 | manager := gocache.NewCacheManager() 142 | manager.CreateCacheForDefault("cache1") 143 | cache := manager.GetCache("cache1") 144 | cache.Set("hello", "world") 145 | cache.Get("hello") 146 | } 147 | ``` 148 | 149 | 你可以暂停或者继续一个缓存 150 | 151 | 如果当你暂停以后你的所有操作都不会接受. 并且会打印warning 152 | 153 | ```go 154 | func main() { 155 | manager := gocache.NewCacheManager() 156 | manager.CreateCacheForDefault("cache1") 157 | cache := manager.GetCache("cache1") 158 | err := manager.CacheStop("cache1") 159 | if err != nil { 160 | panic(err) 161 | } 162 | cache.Set("hello", "world") 163 | fmt.Println(cache.Get("hello")) 164 | err = manager.CacheRun("cache1") 165 | if err != nil { 166 | panic(err) 167 | } 168 | cache.Set("hello", "world") 169 | fmt.Println(cache.Get("hello")) 170 | } 171 | ``` 172 | 173 | 以下是笔者电脑上的打印信息 174 | 175 | ``` 176 | [GOPM] 12-08 00:21:23 [ WARN] Warning... Using a stopped cache 177 | [GOPM] 12-08 00:21:23 [ WARN] Warning... Using a stopped cache 178 | false 179 | world true 180 | 181 | ``` 182 | 183 | 当然你也可以像管理者里加入一个缓存实例或者删除一个实例 184 | 185 | 186 | 187 | 如果你想自定义你的缓存 你不想使用LRU 或者 LFU 188 | 189 | 那么你只需要继承下面的接口 190 | 191 | ```go 192 | type CacheInter interface { 193 | Get(string) interface{} 194 | Put(string, interface{}) 195 | Delete(string) 196 | } 197 | ``` 198 | 199 | ```go 200 | package main 201 | 202 | import gocache "four-seasons/cache-goV1" 203 | 204 | func main() { 205 | //参数传入你的实现 206 | cache := gocache.NewSignalCacheCustom() 207 | } 208 | ``` 209 | 210 | 211 | 212 | 当然你可以使用Manager来管理 213 | 214 | 215 | 216 | ## 不足和总结 217 | 218 | ​ 可能是因为读写管道比较耗时,这个性能的损耗竟然比加解锁要耗时.本来笔者最一开始也想加锁.如果加锁的话不对加锁的粒度进行优化的话.那么是没有灵魂的. 然后我发现如果使用LFU算法 这个加锁的粒度很麻烦 很容易把代码改的面目全非. 然后就想到了Redis的方式. 执行器只有一个. 所以笔者这里也模仿了这个模式.有一个goroutines去监听这个管道.如果管道有值了就执行然后完全不需要加锁.写起来也很舒服.但是这样的代价就是可能性能丢了1倍左右. 但是我觉得目前为止来说是值得的 219 | 220 | ​ 对比github上一个go的缓存框架. 虽然性能会慢上一倍到2倍. 这也是因为我指令太多的原因.但是换来的是功能强大. 比如缓存大小. 淘汰算法. 或者以后你自定义的数据结构 你完全不需要去加锁. 这正是Go的哲学理念不是吗? 大道至简 221 | 222 | 然后也希望大家点一个关注. 集思广益帮我优化一下. 其实我本来还有很多的想法 但是我觉得可能得不偿失就没有去试. 然后 在博客里面为会把我的所有思路写出来 223 | -------------------------------------------------------------------------------- /algorithm/doubly_list.go: -------------------------------------------------------------------------------- 1 | package algorithm 2 | 3 | type DoublyListNode struct { 4 | prev *DoublyListNode 5 | next *DoublyListNode 6 | Data interface{} 7 | } 8 | 9 | 10 | type DoublyLinkedList struct { 11 | size int 12 | head *DoublyListNode 13 | tail *DoublyListNode 14 | } 15 | 16 | func New() *DoublyLinkedList { 17 | head := &DoublyListNode{} 18 | tail := &DoublyListNode{} 19 | head.next = tail 20 | tail.prev = head 21 | return &DoublyLinkedList{ 22 | head:head, 23 | tail:tail, 24 | size:0, 25 | } 26 | } 27 | 28 | func (dln *DoublyListNode) LeaveChain() { 29 | dln.next.prev = dln.prev 30 | dln.prev.next = dln.next 31 | dln.next = nil 32 | dln.prev = nil 33 | } 34 | 35 | func (dll *DoublyLinkedList) Size() int { 36 | return dll.size 37 | } 38 | 39 | func (dll *DoublyLinkedList) GetHead() interface{} { 40 | if dll.Size() == 0 { 41 | return nil 42 | } 43 | return dll.head.next.Data 44 | } 45 | 46 | func (dll *DoublyLinkedList) GetHeadForNode() *DoublyListNode { 47 | if dll.Size() == 0 { 48 | return nil 49 | } 50 | return dll.head.next 51 | } 52 | 53 | func (dll *DoublyLinkedList) IsEmpty() bool { 54 | return dll.head.next == dll.tail 55 | } 56 | 57 | func (dll *DoublyLinkedList) GetTailForNode() *DoublyListNode { 58 | if dll.Size() == 0 { 59 | return nil 60 | } 61 | return dll.tail.next 62 | } 63 | 64 | func (dll *DoublyLinkedList) AddFirst(data interface{}) { 65 | node := &DoublyListNode{Data:data, next:dll.head.next, prev:dll.head} 66 | dll.head.next.prev = node 67 | dll.head.next = node 68 | dll.size++ 69 | } 70 | 71 | func (dll *DoublyLinkedList) RemoveHead() (interface{}, bool) { 72 | if dll.Size() == 0 { 73 | return nil, false 74 | } 75 | node := dll.head.next 76 | node.next.prev = dll.head 77 | dll.head.next = node.next 78 | node.next = nil 79 | node.prev = nil 80 | return node.Data, true 81 | } 82 | 83 | func (dll *DoublyLinkedList) RemoveHeadForNode() (*DoublyListNode, bool) { 84 | if dll.Size() == 0 { 85 | return nil, false 86 | } 87 | node := dll.head.next 88 | node.next.prev = dll.head 89 | dll.head.next = node.next 90 | node.next = nil 91 | node.prev = nil 92 | return node, true 93 | } 94 | 95 | func (dll *DoublyLinkedList) AddTail(data interface{}) { 96 | node := &DoublyListNode{Data:data, next:dll.tail, prev:dll.tail.prev} 97 | dll.tail.prev.next = node 98 | dll.tail.prev = node 99 | dll.size++ 100 | } 101 | 102 | func (dll *DoublyLinkedList) RemoveTail() (interface{}, bool) { 103 | if dll.Size() == 0 { 104 | return nil, false 105 | } 106 | node := dll.tail.prev 107 | node.prev.next = dll.tail 108 | dll.tail.prev = node.prev 109 | node.prev = nil 110 | node.next = nil 111 | return node.Data, true 112 | } 113 | 114 | func (dll *DoublyLinkedList) RemoveTailForNode() (*DoublyListNode, bool) { 115 | if dll.Size() == 0 { 116 | return nil, false 117 | } 118 | node := dll.tail.prev 119 | node.prev.next = dll.tail 120 | dll.tail.prev = node.prev 121 | node.prev = nil 122 | node.next = nil 123 | return node, true 124 | } 125 | 126 | -------------------------------------------------------------------------------- /algorithm/doubly_list_test.go: -------------------------------------------------------------------------------- 1 | package algorithm 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func TestDoublyLinkedList(t *testing.T) { 8 | list := New() 9 | str := "hello" 10 | list.AddFirst(str) 11 | list.AddTail("abc") 12 | list.AddFirst("kkkk") 13 | show(list) 14 | list.RemoveTail() 15 | show(list) 16 | list.RemoveHead() 17 | show(list) 18 | } 19 | 20 | func show(list *DoublyLinkedList) { 21 | for temp := list.head.next; temp != nil; temp = temp.next { 22 | //fmt.Println(temp.data) 23 | } 24 | } -------------------------------------------------------------------------------- /algorithm/linkedlist.go: -------------------------------------------------------------------------------- 1 | package algorithm 2 | 3 | type LinkedList struct { 4 | 5 | } 6 | -------------------------------------------------------------------------------- /algorithm/priority_queue.go: -------------------------------------------------------------------------------- 1 | package algorithm 2 | 3 | import "container/heap" 4 | 5 | type Comparable interface { 6 | CompareTo(other Comparable) int 7 | } 8 | 9 | type PriorityQueue struct { 10 | base *priorityQueue 11 | } 12 | 13 | func NewPriorityQueue() *PriorityQueue { 14 | priorityQueue := PriorityQueue{base:new(priorityQueue)} 15 | heap.Init(priorityQueue.base) 16 | return &priorityQueue 17 | } 18 | 19 | 20 | type priorityQueue []Comparable 21 | 22 | func (pq priorityQueue) Less(i, j int) bool { 23 | //return true 24 | return pq[i].CompareTo(pq[j]) < 0 25 | } 26 | 27 | func (pq *priorityQueue) Push(x interface{}) { 28 | *pq = append(*pq, x.(Comparable)) 29 | } 30 | 31 | func (pq *priorityQueue) Pop() interface{} { 32 | n := len(*pq) 33 | if n <= 0 { 34 | return nil 35 | } 36 | x := (*pq)[n - 1] 37 | *pq = (*pq)[0 : n-1] 38 | return x 39 | } 40 | 41 | 42 | func (pq *priorityQueue) Swap(i, j int) { 43 | (*pq)[i],(*pq)[j] = (*pq)[j],(*pq)[i] 44 | } 45 | 46 | func (pq priorityQueue) Len() int { 47 | return len(pq) 48 | } 49 | 50 | 51 | func (pq *PriorityQueue) Push(x Comparable) { 52 | heap.Push(pq.base, x) 53 | } 54 | 55 | func (pq *PriorityQueue) Pop() Comparable { 56 | return heap.Pop(pq.base).(Comparable) 57 | } 58 | 59 | func (pq PriorityQueue) Top() Comparable { 60 | 61 | return (*pq.base)[0].(Comparable) 62 | } 63 | 64 | 65 | func (pq PriorityQueue) Length() int { 66 | return pq.base.Len() 67 | } 68 | 69 | -------------------------------------------------------------------------------- /algorithm/priority_queue_test.go: -------------------------------------------------------------------------------- 1 | package algorithm 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | ) 7 | 8 | type myInt int 9 | 10 | func (m myInt) CompareTo(other Comparable) int { 11 | return int(m - other.(myInt)) 12 | } 13 | 14 | func TestNewPriorityQueue(t *testing.T) { 15 | var a myInt = 5 16 | var b myInt = 9 17 | var c myInt = 7 18 | queue := NewPriorityQueue() 19 | queue.Push(a) 20 | queue.Push(b) 21 | queue.Push(c) 22 | fmt.Println("top ", queue.Top()) 23 | fmt.Println("top ", queue.Top()) 24 | fmt.Println(queue.Pop()) 25 | fmt.Println("top ", queue.Top()) 26 | fmt.Println(queue.Pop()) 27 | fmt.Println("top ", queue.Top()) 28 | fmt.Println(queue.Pop()) 29 | } 30 | -------------------------------------------------------------------------------- /cache-goV1/Dispatcher.go: -------------------------------------------------------------------------------- 1 | package gocache 2 | 3 | import ( 4 | "time" 5 | ) 6 | 7 | const ( 8 | CLOSE = "close" 9 | ) 10 | 11 | func newDefaultDispatcher() dispatcher { 12 | return dispatcher{queue:make(chan *commons,DefaultCommonsChannelSize), 13 | stateCh:make(chan string, 1)} 14 | } 15 | 16 | func (d *dispatcher) start(li *Liquidator) { 17 | go d.run(li) 18 | } 19 | 20 | func (d *dispatcher) doClose() { 21 | close(d.queue) 22 | for commons := range d.queue { 23 | (*commons.fn)(commons.data) 24 | } 25 | } 26 | 27 | func (d *dispatcher) run(li *Liquidator) { 28 | ticker := time.NewTicker(DefaultCleatStep) 29 | for { 30 | select { 31 | case commons := <- d.queue: (*commons.fn)(commons.data) 32 | case state := <- d.stateCh: 33 | if state == CLOSE { 34 | d.doClose() 35 | ticker.Stop() 36 | return 37 | } 38 | case <- ticker.C: 39 | li.clearFunc() 40 | } 41 | } 42 | } 43 | 44 | 45 | 46 | -------------------------------------------------------------------------------- /cache-goV1/cache.go: -------------------------------------------------------------------------------- 1 | package gocache 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "four-seasons/log" 7 | "time" 8 | ) 9 | 10 | func newCacheForCustom(ca CacheInter) *Cache { 11 | cache := newCache("", 0) 12 | cache.cache = ca 13 | return cache 14 | } 15 | 16 | func newCache(typeCache CacheType, capacity int) *Cache { 17 | var cache *Cache 18 | if capacity <= 0 { 19 | capacity = DefaultCapacity 20 | } 21 | cache = &Cache{cache:nil, 22 | dp:newDefaultDispatcher(), 23 | expiration:DefaultExpiration, 24 | liquidator:newLiquidator(), 25 | onceOceChannel:make(chan interface{}, 1), 26 | } 27 | if typeCache == CacheForLRU { 28 | cache.cache = newLRUCache(capacity, cache) 29 | } else if typeCache == CacheForEasy { 30 | cache.cache = newEasyCache(capacity) 31 | } else if typeCache == "" { 32 | //nothing 33 | } else { 34 | cache.cache = newLFUCache(capacity, cache) 35 | } 36 | 37 | cache.dp.start(cache.liquidator) 38 | return cache 39 | } 40 | 41 | 42 | func newCacheForDefault() *Cache { 43 | return newCache(CacheForLFU, DefaultCapacity) 44 | } 45 | 46 | 47 | /* 48 | ----------------------------------------------------------------------------- 49 | 50 | */ 51 | 52 | func (c *Cache) IsClose() bool { 53 | return c.isClose 54 | } 55 | 56 | 57 | func (c *Cache) close(name string) error { 58 | if c.isClose { 59 | log.Error("Cache name : %s is Closed", name) 60 | return errors.New("Cache Name : " + name +" is Closed") 61 | } 62 | log.Info("Closing cache Name is %s ...",name) 63 | c.isClose = true 64 | for i := 0; i < len(c.dp.queue); i++ { 65 | c.dp.stateCh <- CLOSE 66 | } 67 | return nil 68 | } 69 | 70 | func (c *Cache) run(name string) error { 71 | if !c.isClose { 72 | log.Error("Cache name : %s is Running", name) 73 | return errors.New("Cache Name : " + name +" is Running") 74 | } 75 | log.Info("Start cache Name is %s ...",name) 76 | c.isClose = false 77 | c.dp.start(c.liquidator) 78 | return nil 79 | } 80 | 81 | /* 82 | ----------------------------------------------------------------------------- 83 | 84 | */ 85 | func (c Cache) GetDefaultExpiration() time.Duration { 86 | return c.expiration 87 | } 88 | 89 | func (c *Cache) SetDefaultExpiration(newExpiration time.Duration) { 90 | c.expiration = newExpiration 91 | } 92 | 93 | /* 94 | ----------------------------------------------------------------------------- 95 | */ 96 | 97 | 98 | func (c *Cache) Set(key string, val interface{}) { 99 | if checkCloseGentle(c) { 100 | return 101 | } 102 | c.setAsync(key, newItem(val, NoExpiration)) 103 | } 104 | 105 | func (c *Cache) SetForDefaultExpiration(key string, val interface{}) { 106 | if checkCloseGentle(c) { 107 | return 108 | } 109 | doPush(c, newClearNode(c, key, c.expiration)) 110 | c.setAsync(key, newItem(val, c.expiration)) 111 | } 112 | 113 | func doPush(c *Cache, node *needClearNode) { 114 | c.liquidator.push(node) 115 | if c.liquidator.elements.Length() >= c.cache.Capacity() { 116 | c.liquidator.clearFunc() 117 | } 118 | 119 | } 120 | 121 | func (c *Cache) SetForExpiration(key string, val interface{}, d time.Duration) { 122 | if checkCloseGentle(c) { 123 | return 124 | } 125 | doPush(c, newClearNode(c, key, d)) 126 | c.setAsync(key, newItem(val, d)) 127 | } 128 | 129 | 130 | /* 131 | ----------------------------------------------------------------------------- 132 | 133 | */ 134 | 135 | func (c *Cache) setAsync(key string, val *item) { 136 | setFunc(key, val, c) 137 | } 138 | 139 | 140 | 141 | var onceSetFunc = func(data *commonsData) { 142 | data.master.cache.Put(*data.key, data.val) 143 | } 144 | 145 | func setFunc(key string, val *item ,c *Cache) { 146 | nc :=newCommons(&onceSetFunc, newCommonsData(key, val, c, nil)) 147 | c.dp.queue <- nc 148 | } 149 | 150 | /* 151 | ----------------------------------------------------------------------------- 152 | 153 | */ 154 | 155 | 156 | 157 | func (c *Cache) Get(key string) (interface{}, bool) { 158 | if checkCloseGentle(c) { 159 | return nil, false 160 | } 161 | 162 | return c.get(key) 163 | } 164 | 165 | 166 | func (c *Cache) GetAsync(key string) chan interface{} { 167 | if checkCloseGentle(c) { 168 | return nil 169 | } 170 | return c.getAsync(key) 171 | } 172 | 173 | var onceGetFunc = func(data *commonsData) { 174 | obj := data.master.cache.Get(*data.key) 175 | if obj == nil { 176 | data.out <- nil 177 | } else { 178 | item := obj.(*item) 179 | if item.IsExpired() { 180 | data.out <- nil 181 | } else { 182 | data.out <- item.obj 183 | } 184 | } 185 | } 186 | 187 | 188 | 189 | func getHash(key *string, size int) byte { 190 | hash := (*key)[0]%uint8(size) 191 | fmt.Println(hash, *key) 192 | return hash 193 | } 194 | 195 | func getFunc(key string, c *Cache, ch chan interface{}) { 196 | c.dp.queue <- newCommons(&onceGetFunc, newCommonsData(key, nil, c,ch)) 197 | } 198 | 199 | 200 | 201 | 202 | func (c *Cache) get(key string) (interface{} ,bool) { 203 | getFunc(key, c, c.onceOceChannel) 204 | obj := <- c.onceOceChannel 205 | if obj == nil { 206 | return obj, false 207 | } 208 | return obj, true 209 | } 210 | 211 | func (c *Cache) getAsync(key string) chan interface{} { 212 | ch := make(chan interface{}, 1) 213 | getFunc(key, c, ch) 214 | return ch 215 | } 216 | 217 | 218 | 219 | /* 220 | ----------------------------------------------------------------------------- 221 | */ 222 | 223 | func (c *Cache) Delete(key string) { 224 | if checkCloseGentle(c) { 225 | return 226 | } 227 | c.delAsync(key) 228 | } 229 | 230 | //func (c *Cache) Delete(key string) interface{} { 231 | // if checkCloseGentle(c) { 232 | // return nil 233 | // } 234 | // return c.del(key) 235 | //} 236 | 237 | //func (c *Cache) DeleteAsync(key string) interface{} { 238 | // if checkCloseGentle(c) { 239 | // return nil 240 | // } 241 | // return c.delAsync(key) 242 | //} 243 | 244 | //func (c *Cache) del(key string) interface{} { 245 | // ch := make(chan interface{}, 1) 246 | // delFunc(key, c, ch) 247 | // return <- ch 248 | //} 249 | 250 | 251 | func (c *Cache) delAsync(key string) { 252 | delFunc(key, c) 253 | } 254 | 255 | //func (c *Cache) delAsync(key string) chan interface{} { 256 | // ch := make(chan interface{}, 1) 257 | // delFunc(key, c, ch) 258 | // return ch 259 | //} 260 | 261 | var onceDelFunc = func(data *commonsData) { 262 | data.master.cache.Delete(*data.key) 263 | } 264 | 265 | func delFunc(key string ,c *Cache) { 266 | c.dp.queue <- newCommons(&onceDelFunc, newCommonsData(key, nil, c, nil)) 267 | } 268 | 269 | /* 270 | ----------------------------------------------------------------------------- 271 | */ 272 | 273 | func checkClose(c *Cache) { 274 | if c.isClose { 275 | panic("Cannot use a stopped Cache\n" + 276 | "It is not recommended that you use stop to stop\n" + 277 | "A better way is to use signal") 278 | } 279 | } 280 | 281 | func checkCloseGentle(c *Cache) bool { 282 | if c.isClose { 283 | log.Warn("Warning... Using a stopped cache") 284 | return true 285 | } 286 | return false 287 | } 288 | 289 | 290 | -------------------------------------------------------------------------------- /cache-goV1/cache_test.go: -------------------------------------------------------------------------------- 1 | package gocache 2 | 3 | import ( 4 | "fmt" 5 | "strconv" 6 | "testing" 7 | "time" 8 | ) 9 | 10 | func TestCache_GetAsync(t *testing.T) { 11 | cache := NewSignalCacheForDefault() 12 | cache.Set("hello", "world") 13 | fmt.Println(cache.Get("hello")) 14 | async := cache.GetAsync("hello") 15 | fmt.Println(<- async) 16 | cache.SetForExpiration("kkm", "world",time.Second) 17 | time.Sleep(time.Second) 18 | fmt.Println(cache.Get("kkm")) 19 | fmt.Println(<- cache.GetAsync("kkm")) 20 | //fmt.Println(cache.SetForExpiration("hello","pppp",time.Second)) 21 | fmt.Println(cache.Get("hello")) 22 | fmt.Println(<- cache.GetAsync("hello")) 23 | time.Sleep(time.Second) 24 | fmt.Println(cache.Get("hello")) 25 | fmt.Println(<- cache.GetAsync("hello")) 26 | 27 | } 28 | 29 | 30 | func TestCache_Get(t *testing.T) { 31 | n := 500000 32 | cache := NewSignalCacheForDefault() 33 | 34 | start := time.Now().UnixNano() 35 | for i:=0; i < n; i++ { 36 | k := i % 4096 37 | cache.Set(strconv.Itoa(k), "hello") 38 | cache.GetAsync(strconv.Itoa(k)) 39 | } 40 | fmt.Println((time.Now().UnixNano() - start)/int64(time.Millisecond)) 41 | start = time.Now().UnixNano() 42 | for i:=0; i < n; i++ { 43 | //cache.SetFor(strconv.Itoa(i), "hello") 44 | //cache.GetFor(strconv.Itoa(i)) 45 | } 46 | fmt.Println((time.Now().UnixNano() - start) / int64(time.Millisecond)) 47 | } 48 | 49 | func hello() { 50 | 51 | } 52 | 53 | func TestFunc(t *testing.T) { 54 | cache := NewSignalCacheForDefault() 55 | cache.Set("hello", "world") 56 | cache.Set("hello", "world") 57 | cache.Set("hello", "world") 58 | 59 | fn := hello 60 | p := &fn 61 | ch := make(chan interface{}, 1) 62 | fmt.Printf("%p\n", p) 63 | ch <- p 64 | pp := <-ch 65 | km := pp.(*func()) 66 | (*km)() 67 | fmt.Printf("%p\n", pp) 68 | fmt.Printf("%p\n", km) 69 | } -------------------------------------------------------------------------------- /cache-goV1/commons.go: -------------------------------------------------------------------------------- 1 | package gocache 2 | 3 | 4 | func newCommons(fn *func(*commonsData), data *commonsData) *commons { 5 | return &commons{ 6 | fn: fn, 7 | data: data, 8 | } 9 | } 10 | 11 | func newCommonsData(key string, val interface{}, c *Cache, out chan interface{}) *commonsData { 12 | return &commonsData{ 13 | key: &key, 14 | val: val, 15 | master: c, 16 | out: out, 17 | } 18 | } 19 | 20 | -------------------------------------------------------------------------------- /cache-goV1/commons_test.go: -------------------------------------------------------------------------------- 1 | package gocache 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "testing" 7 | ) 8 | 9 | func str(ctx context.Context) { 10 | 11 | fmt.Println("hello") 12 | } 13 | 14 | func TestCache(t *testing.T) { 15 | //executor := &CacheExecutor{fn: (*func())(unsafe.Pointer(&str))} 16 | //executor.fn() 17 | } -------------------------------------------------------------------------------- /cache-goV1/defs.go: -------------------------------------------------------------------------------- 1 | package gocache 2 | 3 | import ( 4 | alg "four-seasons/algorithm" 5 | "sync" 6 | "time" 7 | ) 8 | 9 | type item struct { 10 | obj interface{} 11 | expiration int64 12 | } 13 | 14 | var ( 15 | NoExpiration = time.Second * 0 16 | DefaultExpiration = time.Hour * 1 17 | DefaultCapacity = 2048 18 | DefaultCommonsChannelSize = 4096 19 | DefaultCleatStep = time.Second * 1 20 | ) 21 | 22 | 23 | type CacheType string 24 | 25 | var CacheForLFU CacheType = "Type_LFU" 26 | var CacheForLRU CacheType = "Type_LRU" 27 | var CacheForEasy CacheType = "Type_EASY" 28 | 29 | 30 | type CacheManager struct { 31 | mu sync.RWMutex 32 | m map[string]*Cache 33 | } 34 | 35 | type SignalCache Cache 36 | 37 | type Cache struct { 38 | cache CacheInter 39 | dp dispatcher 40 | expiration time.Duration 41 | liquidator *Liquidator 42 | isClose bool 43 | onceOceChannel chan interface{} 44 | } 45 | 46 | type Liquidator struct { 47 | elements *alg.PriorityQueue 48 | mu sync.RWMutex 49 | } 50 | 51 | type commons struct { 52 | fn *func(*commonsData) 53 | data *commonsData 54 | } 55 | 56 | type commonsData struct { 57 | key *string 58 | val interface{} 59 | master *Cache 60 | out chan interface{} 61 | } 62 | 63 | 64 | 65 | type dispatcher struct { 66 | queue chan *commons 67 | stateCh chan string 68 | } 69 | 70 | 71 | type CacheInter interface { 72 | Get(string) interface{} 73 | Put(string, interface{}) 74 | Capacity() int 75 | Delete(string) 76 | } 77 | 78 | type strategy interface { 79 | update(*item) 80 | isClear(*item) bool 81 | } 82 | 83 | 84 | /************************************************************* 85 | LFU 86 | *************************************************************/ 87 | type lfuNode struct { 88 | prev *lfuNode 89 | next *lfuNode 90 | parent *lfuNodeChain 91 | key string 92 | data interface{} 93 | freq int 94 | } 95 | 96 | type lfuNodeChain struct { 97 | prev *lfuNodeChain 98 | next *lfuNodeChain 99 | head *lfuNode 100 | tail *lfuNode 101 | freq int 102 | } 103 | 104 | type lfuCache struct { 105 | capacity int 106 | size int 107 | elements map[string]*lfuNode 108 | manager lfuChainManager 109 | master *Cache 110 | } 111 | type lfuChainManager struct { 112 | firstLinkedList *lfuNodeChain 113 | lastLinkedList *lfuNodeChain 114 | } 115 | 116 | 117 | type needClearNode struct { 118 | masterCache *Cache 119 | key string 120 | expiration int64 121 | } 122 | 123 | type lruNode struct { 124 | key string 125 | data interface{} 126 | next *lruNode 127 | prev *lruNode 128 | } 129 | 130 | type lruNodeChain struct { 131 | head *lruNode 132 | tail *lruNode 133 | } 134 | 135 | type lruCache struct { 136 | capacity int 137 | size int 138 | elements map[string]*lruNode 139 | manager *lruNodeChain 140 | master *Cache 141 | } 142 | 143 | 144 | type easyCache struct { 145 | elements map[string]interface{} 146 | } 147 | 148 | 149 | 150 | -------------------------------------------------------------------------------- /cache-goV1/dispatcher_test.go: -------------------------------------------------------------------------------- 1 | package gocache 2 | 3 | import ( 4 | "four-seasons/log" 5 | "testing" 6 | ) 7 | 8 | func TestCache_IsClose(t *testing.T) { 9 | ch := make(chan interface{},5) 10 | ch <- "hello" 11 | ch <- "world" 12 | ch <- "common" 13 | close(ch) 14 | for lb := range ch { 15 | log.Warn("hello %s", lb) 16 | } 17 | } -------------------------------------------------------------------------------- /cache-goV1/easy_cache.go: -------------------------------------------------------------------------------- 1 | package gocache 2 | 3 | func newEasyCache(capacity int) *easyCache { 4 | return &easyCache{elements:make(map[string]interface{}, capacity << 1 | capacity)} 5 | } 6 | 7 | 8 | func (ec *easyCache) Get(key string) interface{} { 9 | return ec.elements[key] 10 | 11 | } 12 | 13 | func (ec *easyCache) Delete(key string) { 14 | delete(ec.elements, key) 15 | 16 | } 17 | 18 | func (ec *easyCache) Capacity() int { 19 | return DefaultCapacity 20 | } 21 | 22 | 23 | func (ec *easyCache) Put(key string, value interface{}) { 24 | ec.elements[key] =value 25 | } -------------------------------------------------------------------------------- /cache-goV1/element.go: -------------------------------------------------------------------------------- 1 | package gocache 2 | 3 | import ( 4 | "time" 5 | ) 6 | 7 | //func (item Item) CompareTo(other alg.Comparable) int { 8 | // return int(item.expiration - other.(Item).expiration) 9 | //} 10 | 11 | func (item item) IsExpired() bool { 12 | if item.expiration <= int64(NoExpiration) { 13 | return false 14 | } else if item.expiration > time.Now().UnixNano() { 15 | return false 16 | } 17 | return true 18 | } 19 | func newItem(obj interface{}, expiration time.Duration) *item { 20 | exp := time.Now().UnixNano() + expiration.Nanoseconds() 21 | if expiration == NoExpiration { 22 | exp = int64(NoExpiration) 23 | } 24 | return &item{ 25 | obj: obj, 26 | expiration: exp, 27 | } 28 | } 29 | 30 | func newClearNode(ca *Cache, key string, expiration time.Duration) *needClearNode { 31 | exp := time.Now().UnixNano() + expiration.Nanoseconds() 32 | return &needClearNode{ 33 | masterCache: ca, 34 | key: key, 35 | expiration: exp, 36 | } 37 | } 38 | 39 | -------------------------------------------------------------------------------- /cache-goV1/lfu.go: -------------------------------------------------------------------------------- 1 | package gocache 2 | 3 | func (lc *lfuCache) Size() int { 4 | return lc.size 5 | } 6 | 7 | func newLFUCache(capacity int, master *Cache) *lfuCache { 8 | lfuCache := &lfuCache{ 9 | capacity: capacity, 10 | size: 0, 11 | elements: make(map[string]*lfuNode, (capacity<<1|capacity)+1), 12 | manager: lfuChainManager{ 13 | firstLinkedList: newLFUChain(0), 14 | lastLinkedList: newLFUChain(0), 15 | }, 16 | master:master, 17 | } 18 | lfuCache.manager.firstLinkedList.next = lfuCache.manager.lastLinkedList 19 | lfuCache.manager.lastLinkedList.prev = lfuCache.manager.firstLinkedList 20 | return lfuCache 21 | } 22 | 23 | func (lc *lfuCache) Capacity() int { 24 | return lc.capacity 25 | } 26 | 27 | func (lc *lfuCache) Get(key string) interface{} { 28 | node, ok := lc.elements[key] 29 | if !ok { 30 | return nil 31 | } 32 | freqInc(node) 33 | return node.data 34 | } 35 | 36 | func (lc *lfuCache) Delete(key string) { 37 | if lc.capacity <= 0 { 38 | return 39 | } 40 | node,ok := lc.elements[key] 41 | if !ok { 42 | return 43 | } 44 | lc.deleteNode(node) 45 | delete(lc.elements,key) 46 | lc.size-- 47 | 48 | } 49 | 50 | func (lc *lfuCache) deleteNode(node *lfuNode) { 51 | chain := node.parent 52 | node.LeaveForList() 53 | if chain.IsEmpty() { 54 | chain.LeaveForChain() 55 | } 56 | } 57 | 58 | 59 | func (lc *lfuCache) Put(key string, value interface{}) { 60 | if lc.capacity <= 0 { 61 | return 62 | } 63 | node , ok := lc.elements[key] 64 | if ok { 65 | node.data = value 66 | freqInc(node) 67 | } else { 68 | if lc.Size() >= lc.capacity { 69 | 70 | listNodeChain := lc.manager.firstLinkedList.next 71 | delNode := listNodeChain.GetTail() 72 | lc.deleteNode(delNode) 73 | delete(lc.elements, delNode.key) 74 | //delNode, _ := listNodeChain.GetTail() 75 | //if listNodeChain.IsEmpty() { 76 | // listNodeChain.LeaveForChain() 77 | //} 78 | lc.size-- 79 | } 80 | lfuNode := &lfuNode{ 81 | key:key, 82 | data:value, 83 | freq:1, 84 | } 85 | lc.elements[key] = lfuNode 86 | if lc.manager.firstLinkedList.next.freq != 1 { 87 | tempList := newLFUChain(1) 88 | tempList.AddFirst(lfuNode) 89 | insertChain(lc.manager.firstLinkedList, tempList) 90 | } else { 91 | lc.manager.firstLinkedList.next.AddFirst(lfuNode) 92 | } 93 | lc.size++ 94 | } 95 | } 96 | 97 | func freqInc(node *lfuNode) { 98 | parentList := node.parent 99 | node.LeaveForList() 100 | nextList := parentList.next 101 | tempParent := parentList.prev 102 | if parentList.IsEmpty() { 103 | parentList.LeaveForChain() 104 | parentList = tempParent 105 | } 106 | node.freq++ 107 | if nextList.freq != node.freq { 108 | newChain := newLFUChain(node.freq) 109 | insertChain(parentList, newChain) 110 | newChain.AddFirst(node) 111 | node.parent = newChain 112 | } else { 113 | nextList.AddFirst(node) 114 | } 115 | } 116 | 117 | func insertChain(preChain, newChain *lfuNodeChain) { 118 | newChain.next = preChain.next 119 | preChain.next.prev = newChain 120 | newChain.prev = preChain 121 | preChain.next = newChain 122 | } -------------------------------------------------------------------------------- /cache-goV1/lfu_doubly_list.go: -------------------------------------------------------------------------------- 1 | package gocache 2 | 3 | func (dln *lfuNode) LeaveForList() { 4 | dln.prev.next = dln.next 5 | dln.next.prev = dln.prev 6 | dln.next = nil 7 | dln.prev = nil 8 | dln.parent = nil 9 | } 10 | 11 | func (lc *lfuNodeChain) LeaveForChain() { 12 | lc.prev.next = lc.next 13 | lc.next.prev = lc.prev 14 | lc.prev = nil 15 | lc.next = nil 16 | } 17 | 18 | func (lc *lfuNodeChain) IsEmpty() bool { 19 | return lc.head.next == lc.tail 20 | } 21 | 22 | func newLFUChain(freq int) *lfuNodeChain { 23 | head := &lfuNode{} 24 | tail := &lfuNode{} 25 | head.next = tail 26 | tail.prev = head 27 | return &lfuNodeChain{ 28 | head:head, 29 | tail:tail, 30 | freq:freq, 31 | } 32 | } 33 | 34 | func (lc *lfuNodeChain) GetHead() *lfuNode { 35 | if lc.head.next == lc.tail { 36 | return nil 37 | } 38 | return lc.head.next 39 | } 40 | 41 | func (lc *lfuNodeChain) GetTail() *lfuNode { 42 | if lc.head.next == lc.tail { 43 | return nil 44 | } 45 | return lc.tail.prev 46 | } 47 | 48 | func (lc *lfuNodeChain) AddFirst(node *lfuNode) { 49 | node.prev = lc.head 50 | node.next = lc.head.next 51 | lc.head.next.prev = node 52 | lc.head.next = node 53 | node.parent = lc 54 | } 55 | 56 | func (lc *lfuNodeChain) RemoveHead() (*lfuNode, bool) { 57 | if lc.head.next == lc.tail { 58 | return nil, false 59 | } 60 | node := lc.head.next 61 | node.next.prev = lc.head 62 | lc.head.next = node.next 63 | node.next = nil 64 | node.prev = nil 65 | return node, true 66 | } 67 | 68 | func (lc *lfuNodeChain) AddTail(node *lfuNode) { 69 | node.next = lc.tail 70 | node.prev = lc.tail.prev 71 | lc.tail.prev.next = node 72 | lc.tail.prev = node 73 | node.parent = lc 74 | } 75 | 76 | func (lc *lfuNodeChain) RemoveTail() (*lfuNode, bool) { 77 | if lc.head.next == lc.tail { 78 | return nil, false 79 | } 80 | node := lc.tail.prev 81 | node.prev.next = lc.tail 82 | lc.tail.prev = node.prev 83 | node.prev = nil 84 | node.next = nil 85 | return node, true 86 | } 87 | 88 | 89 | -------------------------------------------------------------------------------- /cache-goV1/liquidator.go: -------------------------------------------------------------------------------- 1 | package gocache 2 | 3 | import ( 4 | alg "four-seasons/algorithm" 5 | "four-seasons/log" 6 | "sync" 7 | "time" 8 | ) 9 | 10 | func (node needClearNode) CompareTo(other alg.Comparable) int { 11 | return int(node.expiration - other.(*needClearNode).expiration) 12 | } 13 | 14 | func (node needClearNode) IsExpired() bool { 15 | if node.expiration <= int64(NoExpiration) { 16 | return false 17 | } else if node.expiration > time.Now().UnixNano() { 18 | return false 19 | } 20 | return true 21 | } 22 | 23 | func newLiquidator() *Liquidator { 24 | return &Liquidator{ 25 | elements: alg.NewPriorityQueue(), 26 | mu: sync.RWMutex{}, 27 | } 28 | } 29 | 30 | func (l *Liquidator) top() alg.Comparable { 31 | l.mu.RLock() 32 | defer l.mu.RUnlock() 33 | return l.elements.Top() 34 | } 35 | 36 | func (l *Liquidator) push(obj alg.Comparable) { 37 | l.mu.Lock() 38 | l.elements.Push(obj) 39 | l.mu.Unlock() 40 | } 41 | 42 | func (l *Liquidator) pop() alg.Comparable { 43 | l.mu.Lock() 44 | defer l.mu.Unlock() 45 | return l.elements.Pop() 46 | } 47 | 48 | func (l *Liquidator) clearNode(number int) (total int) { 49 | total = 0 50 | n := l.elements.Length() 51 | if n == 0 { 52 | return 53 | } 54 | for i := 0; i < number; i++ { 55 | top := l.top().(*needClearNode) 56 | if top.IsExpired() { 57 | top = l.pop().(*needClearNode) 58 | top.masterCache.Delete(top.key) 59 | } else { 60 | break 61 | } 62 | total++ 63 | } 64 | log.Info("clear Expired Node size is %d Node total Size is %d ",total, n) 65 | return 66 | } 67 | 68 | func (l *Liquidator) clearFunc() { 69 | num := 0 70 | if l.elements.Length() < 25 { 71 | num = l.elements.Length() 72 | } else if l.elements.Length() > 500 { 73 | num = 100 74 | } else { 75 | num = l.elements.Length() >> 2 76 | } 77 | l.clearNode(num) 78 | } -------------------------------------------------------------------------------- /cache-goV1/liquidator_test.go: -------------------------------------------------------------------------------- 1 | package gocache 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | "time" 7 | ) 8 | 9 | func TestLiquidator(t *testing.T) { 10 | cache := NewSignalCacheForDefault() 11 | cache.SetForExpiration("hello", "world", time.Second) 12 | time.Sleep(time.Millisecond * 300) 13 | fmt.Println(cache.Get("hello")) 14 | time.Sleep(time.Millisecond * 300) 15 | fmt.Println(cache.Get("hello")) 16 | time.Sleep(time.Millisecond * 300) 17 | fmt.Println(cache.Get("hello")) 18 | time.Sleep(time.Millisecond * 300) 19 | fmt.Println(cache.Get("hello")) 20 | time.Sleep(time.Millisecond * 300) 21 | fmt.Println(cache.Get("hello")) 22 | time.Sleep(time.Millisecond * 300) 23 | fmt.Println(cache.Get("hello")) 24 | time.Sleep(time.Millisecond * 300) 25 | fmt.Println(cache.Get("hello")) 26 | time.Sleep(time.Millisecond * 300) 27 | fmt.Println(cache.Get("hello")) 28 | time.Sleep(time.Millisecond * 300) 29 | fmt.Println(cache.Get("hello")) 30 | time.Sleep(time.Millisecond * 300) 31 | fmt.Println(cache.Get("hello")) 32 | time.Sleep(time.Millisecond * 300) 33 | fmt.Println(cache.Get("hello")) 34 | time.Sleep(time.Millisecond * 300) 35 | fmt.Println(cache.Get("hello")) 36 | time.Sleep(time.Millisecond * 300) 37 | fmt.Println(cache.Get("hello")) 38 | time.Sleep(time.Millisecond * 300) 39 | fmt.Println(cache.Get("hello")) 40 | time.Sleep(time.Millisecond * 1200) 41 | fmt.Println(cache.Get("hello")) 42 | 43 | fmt.Println(<- cache.GetAsync("hello")) 44 | } 45 | -------------------------------------------------------------------------------- /cache-goV1/lru.go: -------------------------------------------------------------------------------- 1 | package gocache 2 | 3 | func (lc *lruCache) Size() int { 4 | return lc.size 5 | } 6 | 7 | func newLRUCache(capacity int, master *Cache) *lruCache { 8 | lruCache := &lruCache{ 9 | capacity: capacity, 10 | size: 0, 11 | elements: make(map[string]*lruNode, (capacity<<1|capacity)+1), 12 | manager: newLRUChain(), 13 | master:master, 14 | } 15 | return lruCache 16 | } 17 | 18 | 19 | func (lc *lruCache) Get(key string) interface{} { 20 | node, ok := lc.elements[key] 21 | if !ok { 22 | return nil 23 | } 24 | lruUp(node, lc.manager) 25 | return node.data 26 | } 27 | 28 | func (lc *lruCache) Delete(key string) { 29 | if lc.capacity <= 0 { 30 | return 31 | } 32 | node,ok := lc.elements[key] 33 | if !ok { 34 | return 35 | } 36 | lc.deleteNode(node) 37 | delete(lc.elements,key) 38 | lc.size-- 39 | } 40 | 41 | func (lc *lruCache) deleteNode(node *lruNode) { 42 | node.LeaveForList() 43 | } 44 | 45 | func (lc *lruCache) Capacity() int { 46 | return lc.capacity 47 | } 48 | 49 | func (lc *lruCache) Put(key string, value interface{}) { 50 | if lc.capacity <= 0 { 51 | return 52 | } 53 | node , ok := lc.elements[key] 54 | if ok { 55 | node.data = value 56 | lruUp(node, lc.manager) 57 | } else { 58 | if lc.Size() >= lc.capacity { 59 | //先删除超时的不成功再删除LRU 60 | delNode, _ := lc.manager.RemoveHead() 61 | delete(lc.elements, delNode.key) 62 | lc.size-- 63 | 64 | } 65 | lruNode := &lruNode{ 66 | key:key, 67 | data:value, 68 | } 69 | lc.elements[key] = lruNode 70 | lc.manager.AddTail(lruNode) 71 | lc.size++ 72 | } 73 | } 74 | 75 | func lruUp(node *lruNode, chain *lruNodeChain) { 76 | node.LeaveForList() 77 | chain.AddTail(node) 78 | } 79 | -------------------------------------------------------------------------------- /cache-goV1/lru_doubly_list.go: -------------------------------------------------------------------------------- 1 | package gocache 2 | 3 | func newLRUChain() *lruNodeChain { 4 | head := &lruNode{} 5 | tail := &lruNode{} 6 | head.next = tail 7 | tail.prev = head 8 | return &lruNodeChain{ 9 | head:head, 10 | tail:tail, 11 | } 12 | } 13 | 14 | func (ln *lruNode) LeaveForList() { 15 | ln.prev.next = ln.next 16 | ln.next.prev = ln.prev 17 | ln.next = nil 18 | ln.prev = nil 19 | } 20 | 21 | func (lc *lruNodeChain) IsEmpty() bool { 22 | return lc.head.next == lc.tail 23 | } 24 | 25 | func (lc *lruNodeChain) GetHead() *lruNode { 26 | if lc.head.next == lc.tail { 27 | return nil 28 | } 29 | return lc.head.next 30 | } 31 | 32 | func (lc *lruNodeChain) GetTail() *lruNode { 33 | if lc.head.next == lc.tail { 34 | return nil 35 | } 36 | return lc.tail.prev 37 | } 38 | 39 | func (lc *lruNodeChain) AddFirst(node *lruNode) { 40 | node.prev = lc.head 41 | node.next = lc.head.next 42 | lc.head.next.prev = node 43 | lc.head.next = node 44 | } 45 | 46 | func (lc *lruNodeChain) RemoveHead() (*lruNode, bool) { 47 | if lc.head.next == lc.tail { 48 | return nil, false 49 | } 50 | node := lc.head.next 51 | node.next.prev = lc.head 52 | lc.head.next = node.next 53 | node.next = nil 54 | node.prev = nil 55 | return node, true 56 | } 57 | 58 | func (lc *lruNodeChain) AddTail(node *lruNode) { 59 | node.next = lc.tail 60 | node.prev = lc.tail.prev 61 | lc.tail.prev.next = node 62 | lc.tail.prev = node 63 | } 64 | 65 | func (lc *lruNodeChain) RemoveTail() (*lruNode, bool) { 66 | if lc.head.next == lc.tail { 67 | return nil, false 68 | } 69 | node := lc.tail.prev 70 | node.prev.next = lc.tail 71 | lc.tail.prev = node.prev 72 | node.prev = nil 73 | node.next = nil 74 | return node, true 75 | } 76 | -------------------------------------------------------------------------------- /cache-goV1/lru_test.go: -------------------------------------------------------------------------------- 1 | package gocache 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | ) 7 | 8 | func TestNewLRUCache(t *testing.T) { 9 | cache := NewSignalCache(CacheForLRU, 2) 10 | cache.Set("hello", 1) 11 | cache.Set("aaaa", 2) 12 | cache.Set("cccc", 3) 13 | fmt.Println(cache.Get("hello")) 14 | fmt.Println(cache.Get("aaaa")) 15 | fmt.Println(cache.Get("cccc")) 16 | cache.Set("kmp","3333") 17 | fmt.Println(cache.Get("aaaa" + "")) 18 | } 19 | -------------------------------------------------------------------------------- /cache-goV1/manager.go: -------------------------------------------------------------------------------- 1 | package gocache 2 | 3 | import ( 4 | "errors" 5 | "log" 6 | "sync" 7 | "time" 8 | ) 9 | 10 | const ( 11 | DEBUG = iota 12 | DEFAULT 13 | ) 14 | 15 | 16 | /** 17 | 得到缓存 18 | */ 19 | 20 | var mFlag = false 21 | var onceCacheManager *CacheManager 22 | var LoggerLevel = DEFAULT 23 | 24 | 25 | func NewCacheManager() *CacheManager { 26 | return newCacheManagerOnce() 27 | } 28 | 29 | var onceManagerChanel = make(chan *CacheManager,1) 30 | 31 | var once sync.Once 32 | 33 | func newCacheManagerOnce() *CacheManager { 34 | if mFlag { 35 | return onceCacheManager 36 | } 37 | once.Do(func() { 38 | if LoggerLevel == DEBUG { 39 | log.Println("NewCacheManger is once call...") 40 | } 41 | 42 | onceCacheManager = newCacheManager() 43 | mFlag = true 44 | onceManagerChanel <- onceCacheManager 45 | }) 46 | select { 47 | case manager := <- onceManagerChanel: return manager 48 | case _ = <- time.After(time.Second*5): 49 | log.Println("NewCacheManger call timeout...") 50 | return onceCacheManager 51 | } 52 | } 53 | 54 | func newCacheManager() *CacheManager { 55 | return &CacheManager{ 56 | mu: sync.RWMutex{}, 57 | m: make(map[string]*Cache), 58 | } 59 | } 60 | 61 | 62 | 63 | func (cm *CacheManager) GetCache(key string) *Cache { 64 | cm.mu.RLock() 65 | defer cm.mu.RUnlock() 66 | return cm.m[key] 67 | } 68 | 69 | /** 70 | 根据规则创建缓存 71 | */ 72 | func (cm *CacheManager) CreateCache(key string,typeCache CacheType, capacity int) *Cache { 73 | cm.mu.Lock() 74 | defer cm.mu.Unlock() 75 | cache := newCache(typeCache, capacity) 76 | cm.m[key] = cache 77 | return cache 78 | } 79 | 80 | func (cm *CacheManager) CreateCacheCustom(key string, ca CacheInter) *Cache { 81 | cm.mu.Lock() 82 | defer cm.mu.Unlock() 83 | cache := newCacheForCustom(ca) 84 | cm.m[key] = cache 85 | return cache 86 | } 87 | 88 | /** 89 | 创建一个缓存实例 90 | */ 91 | func (cm *CacheManager) CreateCacheForDefault(key string) *Cache { 92 | 93 | cm.mu.Lock() 94 | defer cm.mu.Unlock() 95 | cache := newCacheForDefault() 96 | cm.m[key] = cache 97 | return cache 98 | } 99 | 100 | /** 101 | 添加一个缓存实例 102 | */ 103 | func (cm *CacheManager) AddCache(key string, cache *Cache) { 104 | cm.mu.Lock() 105 | defer cm.mu.Unlock() 106 | cm.m[key] = cache 107 | } 108 | 109 | /** 110 | 删除缓存并且返回给客户端 111 | */ 112 | func (cm *CacheManager) DeleteCache(key string) { 113 | cm.mu.Lock() 114 | defer cm.mu.Unlock() 115 | cache,ok := cm.m[key] 116 | if ok { 117 | delete(cm.m, key) 118 | } 119 | _ = cache.close(key) 120 | } 121 | 122 | func (cm *CacheManager) CacheRun(key string) error { 123 | cm.mu.Lock() 124 | defer cm.mu.Unlock() 125 | cache,ok := cm.m[key] 126 | if !ok { 127 | return errors.New("Cache name : " + key + "not found") 128 | } 129 | return cache.run(key) 130 | } 131 | 132 | func (cm *CacheManager) CacheStop(key string) error { 133 | cm.mu.Lock() 134 | defer cm.mu.Unlock() 135 | cache,ok := cm.m[key] 136 | if !ok { 137 | return errors.New("Cache name : " + key + "not found") 138 | } 139 | return cache.close(key) 140 | } 141 | 142 | 143 | 144 | 145 | -------------------------------------------------------------------------------- /cache-goV1/manager_test.go: -------------------------------------------------------------------------------- 1 | package gocache 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | "testing" 7 | ) 8 | 9 | func TestNewCacheManger(t *testing.T) { 10 | group := sync.WaitGroup{} 11 | for i := 0; i < 50; i++ { 12 | group.Add(1) 13 | go func() { 14 | 15 | fmt.Printf("%p\n",NewCacheManager()) 16 | group.Done() 17 | }() 18 | } 19 | group.Wait() 20 | } 21 | 22 | 23 | -------------------------------------------------------------------------------- /cache-goV1/signal_cache.go: -------------------------------------------------------------------------------- 1 | package gocache 2 | 3 | import ( 4 | "errors" 5 | "log" 6 | "sync" 7 | "time" 8 | ) 9 | 10 | /** 11 | ***************************************************** 12 | * Signal Cache * 13 | ***************************************************** 14 | **/ 15 | 16 | var cFlag = false 17 | var onceSignalCache *Cache 18 | var signalMu sync.Mutex 19 | var onceSignal sync.Once 20 | func NewSignalCache(typeCache CacheType, capacity int) *Cache { 21 | 22 | return newSignalCacheOnce(typeCache, capacity) 23 | } 24 | 25 | func NewSignalCacheCustom(ca CacheInter) *Cache { 26 | return newCacheForCustom(ca) 27 | } 28 | 29 | func NewSignalCacheForDefault() *Cache { 30 | return newSignalCacheOnce(CacheForLFU, DefaultCapacity) 31 | } 32 | 33 | func newSignalCacheOnce(typeCache CacheType, capacity int) *Cache { 34 | ch := make(chan *Cache,1) 35 | if cFlag { 36 | return onceSignalCache 37 | } 38 | onceSignal.Do(func() { 39 | if LoggerLevel == DEBUG { 40 | log.Println("NewSignalCache is once call...") 41 | } 42 | onceSignalCache = newCache(typeCache, capacity) 43 | cFlag = true 44 | ch <- onceSignalCache 45 | }) 46 | select { 47 | case cache := <- ch: return cache 48 | case _ = <- time.After(time.Second): 49 | log.Println("NewSignalCache call timeout...") 50 | return onceSignalCache 51 | } 52 | } 53 | 54 | 55 | func StopSignalCache() error { 56 | if cFlag { 57 | return errors.New("signal Cache not start") 58 | } 59 | signalMu.Lock() 60 | defer signalMu.Unlock() 61 | return onceSignalCache.close("signal") 62 | } 63 | 64 | func RunSignalCache() error { 65 | if cFlag { 66 | return errors.New("signal Cache not start") 67 | } 68 | signalMu.Lock() 69 | defer signalMu.Unlock() 70 | return onceSignalCache.run("signal") 71 | } 72 | -------------------------------------------------------------------------------- /log/log.go: -------------------------------------------------------------------------------- 1 | package log 2 | import ( 3 | "fmt" 4 | "io" 5 | "os" 6 | "runtime" 7 | "time" 8 | ) 9 | 10 | const ( 11 | PREFIX = "[FOUR-SEASONS]" 12 | TIME_FORMAT = "2006-01-02 15:04:05" 13 | ) 14 | 15 | var ( 16 | Verbose, NonColor bool 17 | Output io.Writer = os.Stdout 18 | 19 | LEVEL_FLAGS = [...]string{"DEBUG", " INFO", " WARN", "ERROR", "FATAL"} 20 | ) 21 | 22 | func init() { 23 | if runtime.GOOS == "windows" { 24 | NonColor = true 25 | } 26 | } 27 | 28 | const ( 29 | DEBUG = iota 30 | INFO 31 | WARNING 32 | ERROR 33 | FATAL 34 | ) 35 | 36 | func Print(level int, format string, args ...interface{}) { 37 | 38 | var logFormat = "%s %s [%s] %s\n" 39 | if !NonColor { 40 | switch level { 41 | case DEBUG: 42 | logFormat = "%s \033[36m%s\033[0m [\033[34m%s\033[0m] %s\n" 43 | case INFO: 44 | logFormat = "%s \033[36m%s\033[0m [\033[32m%s\033[0m] %s\n" 45 | case WARNING: 46 | logFormat = "%s \033[36m%s\033[0m [\033[33m%s\033[0m] %s\n" 47 | case ERROR: 48 | logFormat = "%s \033[36m%s\033[0m [\033[31m%s\033[0m] %s\n" 49 | case FATAL: 50 | logFormat = "%s \033[36m%s\033[0m [\033[35m%s\033[0m] %s\n" 51 | } 52 | } 53 | 54 | fmt.Fprintf(Output, logFormat, PREFIX, time.Now().Format(TIME_FORMAT), 55 | LEVEL_FLAGS[level], fmt.Sprintf(format, args...)) 56 | 57 | if level == FATAL { 58 | os.Exit(1) 59 | } 60 | } 61 | 62 | func Debug(format string, args ...interface{}) { 63 | Print(DEBUG, format, args...) 64 | } 65 | 66 | func Warn(format string, args ...interface{}) { 67 | Print(WARNING, format, args...) 68 | } 69 | 70 | func Info(format string, args ...interface{}) { 71 | Print(INFO, format, args...) 72 | } 73 | 74 | func Error(format string, args ...interface{}) { 75 | Print(ERROR, format, args...) 76 | } 77 | 78 | func Fatal(format string, args ...interface{}) { 79 | Print(FATAL, format, args...) 80 | } --------------------------------------------------------------------------------