├── .gitignore ├── LICENSE ├── async.go ├── batch.go ├── go.mod ├── kvcache.go ├── merge.go └── task ├── group.go └── task.go /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.dll 4 | *.so 5 | *.dylib 6 | 7 | # Test binary, build with `go test -c` 8 | *.test 9 | 10 | # Output of the go coverage tool, specifically when used with LiteIDE 11 | *.out 12 | 13 | # Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 14 | .glide/ 15 | 16 | .DS_Store -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Ryou Zhang 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /async.go: -------------------------------------------------------------------------------- 1 | package async 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "sync" 7 | "time" 8 | ) 9 | 10 | const Unlimit = 0 11 | 12 | type Method func(args ...interface{}) (interface{}, error) 13 | type LambdaMethod func() (interface{}, error) 14 | 15 | var panicHandler func(interface{}) 16 | 17 | func SetPanicHandler(hanlder func(interface{})) { 18 | panicHandler = hanlder 19 | } 20 | 21 | func Safety(method func() (interface{}, error)) (res interface{}, err error) { 22 | defer func() { 23 | if e := recover(); e != nil { 24 | _, ok := e.(error) 25 | if ok { 26 | err = e.(error) 27 | } else { 28 | err = fmt.Errorf("%v", e) 29 | } 30 | if panicHandler != nil { 31 | panicHandler(err) 32 | } 33 | } 34 | }() 35 | res, err = method() 36 | return 37 | } 38 | 39 | func Retry(method func() (interface{}, error), maxCount int, interval time.Duration) (interface{}, error) { 40 | count := 0 41 | for { 42 | res, err := Lambda(method, Unlimit) 43 | if err == nil { 44 | return res, err 45 | } 46 | count = count + 1 47 | if count >= maxCount { 48 | return nil, err 49 | } 50 | <-time.After(time.Duration(count) * interval) 51 | } 52 | } 53 | 54 | type result struct { 55 | val interface{} 56 | err error 57 | } 58 | 59 | func Lambda(method func() (interface{}, error), timeout time.Duration) (interface{}, error) { 60 | output := make(chan *result, 1) 61 | go func() { 62 | defer close(output) 63 | defer func() { 64 | if e := recover(); e != nil { 65 | err := fmt.Errorf("%s", e) 66 | if panicHandler != nil { 67 | panicHandler(err) 68 | } 69 | output <- &result{err: err} 70 | } 71 | }() 72 | res, err := method() 73 | output <- &result{val: res, err: err} 74 | }() 75 | if timeout > 0 { 76 | timer := time.NewTimer(timeout) 77 | defer timer.Stop() 78 | select { 79 | case res := <-output: 80 | { 81 | return res.val, res.err 82 | } 83 | case <-timer.C: 84 | { 85 | return nil, errors.New("Async_Timeout") 86 | } 87 | } 88 | } else { 89 | res := <-output 90 | return res.val, res.err 91 | } 92 | } 93 | 94 | func Call(m Method, timeout time.Duration, args ...interface{}) (interface{}, error) { 95 | return Lambda(func() (interface{}, error) { 96 | return m(args...) 97 | }, timeout) 98 | } 99 | 100 | func All(methods []LambdaMethod, timeout time.Duration) []interface{} { 101 | var wg sync.WaitGroup 102 | result := make([]interface{}, len(methods)) 103 | for i, m := range methods { 104 | wg.Add(1) 105 | go func(index int, method LambdaMethod) { 106 | defer wg.Done() 107 | res, err := Lambda(method, timeout) 108 | if err != nil { 109 | result[index] = err 110 | } else { 111 | result[index] = res 112 | } 113 | }(i, m) 114 | } 115 | wg.Wait() 116 | return result 117 | } 118 | 119 | func Serise(methods []LambdaMethod, timeout time.Duration) []interface{} { 120 | result := make([]interface{}, 0) 121 | for _, m := range methods { 122 | res, err := Lambda(m, timeout) 123 | if err != nil { 124 | result = append(result, err) 125 | return result 126 | } else { 127 | result = append(result, res) 128 | } 129 | } 130 | return result 131 | } 132 | 133 | func Flow(enter Method, args []interface{}, methods []Method, timeout time.Duration) (interface{}, error) { 134 | var ( 135 | res interface{} 136 | err error 137 | ) 138 | res, err = Call(enter, timeout, args...) 139 | if err != nil { 140 | return nil, err 141 | } 142 | for _, m := range methods { 143 | res, err = Call(m, timeout, res) 144 | if err != nil { 145 | return nil, err 146 | } 147 | } 148 | return res, nil 149 | } 150 | 151 | func Any(methods []LambdaMethod, timeout time.Duration) ([]interface{}, error) { 152 | resChan := make(chan []interface{}, 1) 153 | errChan := make(chan error, len(methods)) 154 | go func() { 155 | defer func() { 156 | close(resChan) 157 | close(errChan) 158 | }() 159 | var wg sync.WaitGroup 160 | result := make([]interface{}, len(methods)) 161 | for i, m := range methods { 162 | wg.Add(1) 163 | go func(index int, method LambdaMethod) { 164 | defer wg.Done() 165 | res, err := Lambda(method, timeout) 166 | if err != nil { 167 | errChan <- err 168 | } else { 169 | result[index] = res 170 | } 171 | }(i, m) 172 | } 173 | wg.Wait() 174 | resChan <- result 175 | }() 176 | select { 177 | case err := <-errChan: 178 | return nil, err 179 | case res := <-resChan: 180 | return res, nil 181 | } 182 | } 183 | 184 | func AnyOne(methods []LambdaMethod, timeout time.Duration) (interface{}, []error) { 185 | resChan := make(chan interface{}, len(methods)) 186 | errChan := make(chan []error, 1) 187 | go func() { 188 | defer func() { 189 | close(resChan) 190 | close(errChan) 191 | }() 192 | var wg sync.WaitGroup 193 | errs := make([]error, len(methods)) 194 | for i, m := range methods { 195 | wg.Add(1) 196 | go func(index int, method LambdaMethod) { 197 | defer wg.Done() 198 | res, err := Lambda(method, timeout) 199 | if err != nil { 200 | errs[index] = err 201 | } else { 202 | resChan <- res 203 | } 204 | }(i, m) 205 | } 206 | wg.Wait() 207 | errChan <- errs 208 | }() 209 | select { 210 | case errs := <-errChan: 211 | return nil, errs 212 | case res := <-resChan: 213 | return res, nil 214 | } 215 | } 216 | 217 | func Parallel(methods []LambdaMethod, maxCount int) []interface{} { 218 | if maxCount == Unlimit { 219 | maxCount = 64 220 | } 221 | var wg sync.WaitGroup 222 | workers := make(chan bool, maxCount) 223 | results := make([]interface{}, len(methods)) 224 | for index, method := range methods { 225 | workers <- true 226 | wg.Add(1) 227 | go func(i int, m LambdaMethod) { 228 | defer func() { 229 | <-workers 230 | wg.Done() 231 | }() 232 | res, err := Lambda(m, 0) 233 | if err != nil { 234 | results[i] = err 235 | } else { 236 | results[i] = res 237 | } 238 | }(index, method) 239 | } 240 | wg.Wait() 241 | close(workers) 242 | return results 243 | } 244 | 245 | func Foreach(objs []interface{}, method func(int) (interface{}, error), maxConcurrent int) []interface{} { 246 | if maxConcurrent == Unlimit { 247 | maxConcurrent = 64 248 | } 249 | var wg sync.WaitGroup 250 | workers := make(chan bool, maxConcurrent) 251 | results := make([]interface{}, len(objs)) 252 | for index := range objs { 253 | workers <- true 254 | wg.Add(1) 255 | go func(i int, method func(int) (interface{}, error)) { 256 | defer func() { 257 | <-workers 258 | wg.Done() 259 | }() 260 | res, err := Safety(func() (interface{}, error) { 261 | return method(i) 262 | }) 263 | if err != nil { 264 | results[i] = err 265 | } else { 266 | results[i] = res 267 | } 268 | }(index, method) 269 | } 270 | wg.Wait() 271 | close(workers) 272 | return results 273 | } 274 | 275 | func For(count int, method func(int) (interface{}, error), maxConcurrent int) []interface{} { 276 | if maxConcurrent == Unlimit { 277 | maxConcurrent = 64 278 | } 279 | var wg sync.WaitGroup 280 | workers := make(chan bool, maxConcurrent) 281 | results := make([]interface{}, count) 282 | for index := 0; index < count; index++ { 283 | workers <- true 284 | wg.Add(1) 285 | go func(i int, method func(int) (interface{}, error)) { 286 | defer func() { 287 | <-workers 288 | wg.Done() 289 | }() 290 | res, err := Safety(func() (interface{}, error) { 291 | return method(i) 292 | }) 293 | if err != nil { 294 | results[i] = err 295 | } else { 296 | results[i] = res 297 | } 298 | }(index, method) 299 | } 300 | wg.Wait() 301 | close(workers) 302 | return results 303 | } 304 | -------------------------------------------------------------------------------- /batch.go: -------------------------------------------------------------------------------- 1 | package async 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "sync" 7 | "time" 8 | ) 9 | 10 | var ( 11 | one sync.Once 12 | input chan *cmd 13 | output chan *batchCmd 14 | groupDic map[string]*group 15 | ) 16 | 17 | type CacheProvider interface { 18 | Put(string, interface{}, interface{}) error 19 | Get(string, interface{}) (interface{}, error) 20 | } 21 | 22 | func init() { 23 | groupDic = make(map[string]*group) 24 | input = make(chan *cmd, 128) 25 | output = make(chan *batchCmd, 128) 26 | } 27 | 28 | type group struct { 29 | name string 30 | batchSize int 31 | method func(...interface{}) (map[interface{}]interface{}, error) 32 | cmdDic map[interface{}][]*cmd 33 | keyDic map[interface{}]bool 34 | cache CacheProvider 35 | } 36 | 37 | type cmd struct { 38 | group string 39 | key interface{} 40 | count int 41 | callback chan interface{} 42 | forced bool 43 | } 44 | 45 | func (c *cmd) output(res interface{}) { 46 | c.callback <- res 47 | c.count-- 48 | if c.count == 0 { 49 | close(c.callback) 50 | } 51 | } 52 | 53 | type batchCmd struct { 54 | group string 55 | keys []interface{} 56 | err error 57 | result map[interface{}]interface{} 58 | } 59 | 60 | func RegisterGroup( 61 | name string, 62 | batchSize int, 63 | method func(...interface{}) (map[interface{}]interface{}, error), 64 | cache CacheProvider) error { 65 | 66 | _, ok := groupDic[name] 67 | if ok { 68 | return fmt.Errorf("duplicate group:%s", name) 69 | } 70 | 71 | groupDic[name] = &group{ 72 | name: name, 73 | batchSize: batchSize, 74 | method: method, 75 | cmdDic: make(map[interface{}][]*cmd), 76 | keyDic: make(map[interface{}]bool), 77 | cache: cache, 78 | } 79 | one.Do(func() { 80 | go runloop() 81 | }) 82 | return nil 83 | } 84 | 85 | func doing(ctx context.Context, b *batchCmd, method func(...interface{}) (map[interface{}]interface{}, error)) { 86 | res, err := Safety(func() (interface{}, error) { 87 | return method(b.keys...) 88 | }) 89 | if err != nil { 90 | b.err = err 91 | } else { 92 | b.result = res.(map[interface{}]interface{}) 93 | } 94 | output <- b 95 | } 96 | 97 | func runloop() { 98 | ctx := context.Background() 99 | timer := time.NewTimer(10 * time.Millisecond) 100 | for { 101 | select { 102 | case c := <-input: 103 | { 104 | g, ok := groupDic[c.group] 105 | if false == ok { 106 | c.output(fmt.Errorf("invalid found:%s", c.group)) 107 | if c.count > 0 { 108 | close(c.callback) 109 | } 110 | continue 111 | } 112 | var keys []interface{} 113 | if c.count > 1 { 114 | keys = c.key.([]interface{}) 115 | } else { 116 | keys = []interface{}{c.key} 117 | } 118 | 119 | for index := range keys { 120 | key := keys[index] 121 | // cache provider 122 | if g.cache != nil && c.forced == false { 123 | res, err := g.cache.Get(g.name, key) 124 | if res != nil && err == nil { 125 | c.output(res) 126 | continue 127 | } 128 | } 129 | target, ok := g.cmdDic[key] 130 | if ok { 131 | g.cmdDic[key] = append(target, c) 132 | } else { 133 | g.cmdDic[key] = []*cmd{c} 134 | g.keyDic[key] = true 135 | } 136 | 137 | if len(g.keyDic) >= g.batchSize { 138 | b := &batchCmd{group: g.name} 139 | b.keys = make([]interface{}, len(g.keyDic)) 140 | index := 0 141 | for k := range g.keyDic { 142 | b.keys[index] = k 143 | index = index + 1 144 | } 145 | g.keyDic = make(map[interface{}]bool) 146 | go doing(ctx, b, g.method) 147 | } 148 | } 149 | } 150 | case b := <-output: 151 | { 152 | g, _ := groupDic[b.group] 153 | if b.err != nil { 154 | for _, key := range b.keys { 155 | target, ok := g.cmdDic[key] 156 | if ok { 157 | for index := range target { 158 | c := target[index] 159 | c.output(b.err) 160 | } 161 | } 162 | delete(g.cmdDic, key) 163 | } 164 | } else { 165 | for _, key := range b.keys { 166 | res, ok := b.result[key] 167 | if false == ok { 168 | res = fmt.Errorf("invalid key:%s", key) 169 | } else { 170 | // cache provider 171 | if g.cache != nil && res != nil { 172 | g.cache.Put(g.name, key, res) 173 | } 174 | } 175 | target, ok := g.cmdDic[key] 176 | if ok { 177 | for index := range target { 178 | c := target[index] 179 | c.output(res) 180 | } 181 | } 182 | delete(g.cmdDic, key) 183 | } 184 | } 185 | } 186 | case <-timer.C: 187 | { 188 | for _, g := range groupDic { 189 | if len(g.keyDic) > 0 { 190 | b := &batchCmd{group: g.name} 191 | b.keys = make([]interface{}, len(g.keyDic)) 192 | index := 0 193 | for k := range g.keyDic { 194 | b.keys[index] = k 195 | index = index + 1 196 | } 197 | g.keyDic = make(map[interface{}]bool) 198 | go doing(ctx, b, g.method) 199 | } 200 | } 201 | timer.Reset(10 * time.Millisecond) 202 | } 203 | } 204 | } 205 | } 206 | 207 | func Get(group string, key interface{}) (interface{}, error) { 208 | return get(group, false, key) 209 | } 210 | 211 | func ForceGet(group string, key interface{}) (interface{}, error) { 212 | return get(group, true, key) 213 | } 214 | 215 | func get(group string, forced bool, key interface{}) (interface{}, error) { 216 | c := &cmd{ 217 | group: group, 218 | key: key, 219 | count: 1, 220 | callback: make(chan interface{}, 1), 221 | forced: forced, 222 | } 223 | input <- c 224 | res := <-c.callback 225 | switch res.(type) { 226 | case error: 227 | return nil, res.(error) 228 | default: 229 | return res, nil 230 | } 231 | } 232 | 233 | func MGet(group string, keys ...interface{}) []interface{} { 234 | return mget(group, false, keys...) 235 | } 236 | 237 | func ForceMGet(group string, keys ...interface{}) []interface{} { 238 | return mget(group, true, keys...) 239 | } 240 | 241 | func mget(group string, forced bool, keys ...interface{}) []interface{} { 242 | c := &cmd{ 243 | group: group, 244 | key: keys, 245 | count: len(keys), 246 | callback: make(chan interface{}, len(keys)), 247 | forced: forced, 248 | } 249 | input <- c 250 | results := make([]interface{}, 0) 251 | for result := range c.callback { 252 | results = append(results, result) 253 | } 254 | return results 255 | } 256 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/RyouZhang/async-go 2 | 3 | go 1.17 4 | -------------------------------------------------------------------------------- /kvcache.go: -------------------------------------------------------------------------------- 1 | package async 2 | 3 | import ( 4 | "container/list" 5 | "errors" 6 | "sync" 7 | "time" 8 | ) 9 | 10 | type operation struct { 11 | method func(*KVData) (interface{}, error) 12 | callback chan interface{} 13 | } 14 | 15 | type keyInfo struct { 16 | key interface{} 17 | expire int64 18 | } 19 | 20 | type KVData struct { 21 | pairs map[interface{}]interface{} 22 | keys *list.List 23 | index map[interface{}]*list.Element 24 | lru bool 25 | ttl int64 //second 26 | maxSize int 27 | } 28 | 29 | func newKVData() *KVData { 30 | return &KVData{ 31 | pairs: make(map[interface{}]interface{}), 32 | keys: list.New(), 33 | index: make(map[interface{}]*list.Element), 34 | lru: false, 35 | ttl: 0, 36 | maxSize: 0, 37 | } 38 | } 39 | 40 | func (kv *KVData) drain() { 41 | if kv.maxSize == 0 { 42 | return 43 | } 44 | offset := kv.keys.Len() - kv.maxSize 45 | for offset > 0 { 46 | ele := kv.keys.Front() 47 | info := ele.Value.(*keyInfo) 48 | kv.keys.Remove(ele) 49 | delete(kv.index, info.key) 50 | delete(kv.pairs, info.key) 51 | offset = offset - 1 52 | } 53 | } 54 | 55 | func (kv *KVData) updateLRU(keys []interface{}) { 56 | if false == kv.lru { 57 | return 58 | } 59 | now := time.Now().Unix() + kv.ttl 60 | for _, key := range keys { 61 | ele, ok := kv.index[key] 62 | if ok { 63 | info := ele.Value.(*keyInfo) 64 | info.expire = now 65 | kv.keys.MoveToBack(ele) 66 | } 67 | } 68 | } 69 | 70 | func (kv *KVData) updateTTL() { 71 | if kv.ttl <= 0 { 72 | return 73 | } 74 | now := time.Now().Unix() 75 | for { 76 | if kv.keys.Len() == 0 { 77 | break 78 | } 79 | ele := kv.keys.Front() 80 | info := ele.Value.(*keyInfo) 81 | if info.expire > now { 82 | break 83 | } 84 | delete(kv.pairs, info.key) 85 | delete(kv.index, info.key) 86 | kv.keys.Remove(ele) 87 | } 88 | } 89 | 90 | func (kv *KVData) Set(key interface{}, value interface{}) { 91 | defer kv.drain() 92 | kv.pairs[key] = value 93 | 94 | ele, ok := kv.index[key] 95 | if ok { 96 | kv.keys.MoveToBack(ele) 97 | info := ele.Value.(*keyInfo) 98 | info.expire = time.Now().Unix() + kv.ttl 99 | } else { 100 | info := &keyInfo{key: key, expire: time.Now().Unix() + kv.ttl} 101 | kv.keys.PushBack(info) 102 | kv.index[key] = kv.keys.Back() 103 | } 104 | } 105 | 106 | func (kv *KVData) MSet(keys []interface{}, values []interface{}) { 107 | defer kv.drain() 108 | for i, key := range keys { 109 | kv.pairs[key] = values[i] 110 | 111 | ele, ok := kv.index[key] 112 | if ok { 113 | kv.keys.MoveToBack(ele) 114 | info := ele.Value.(*keyInfo) 115 | info.expire = time.Now().Unix() + kv.ttl 116 | } else { 117 | info := &keyInfo{key: key, expire: time.Now().Unix() + kv.ttl} 118 | kv.keys.PushBack(info) 119 | kv.index[key] = kv.keys.Back() 120 | } 121 | } 122 | } 123 | 124 | func (kv *KVData) Merge(pairs map[interface{}]interface{}) { 125 | defer kv.drain() 126 | for key, value := range pairs { 127 | kv.pairs[key] = value 128 | 129 | ele, ok := kv.index[key] 130 | if ok { 131 | kv.keys.MoveToBack(ele) 132 | info := ele.Value.(*keyInfo) 133 | info.expire = time.Now().Unix() + kv.ttl 134 | } else { 135 | info := &keyInfo{key: key, expire: time.Now().Unix() + kv.ttl} 136 | kv.keys.PushBack(info) 137 | kv.index[key] = kv.keys.Back() 138 | } 139 | } 140 | } 141 | 142 | func (kv *KVData) Get(key interface{}) (interface{}, error) { 143 | defer kv.updateLRU([]interface{}{key}) 144 | value, ok := kv.pairs[key] 145 | if ok { 146 | return value, nil 147 | } else { 148 | return nil, errors.New("Invalid Key") 149 | } 150 | } 151 | 152 | func (kv *KVData) MGet(keys []interface{}) []interface{} { 153 | defer kv.updateLRU(keys) 154 | values := make([]interface{}, len(keys)) 155 | for i, key := range keys { 156 | value, ok := kv.pairs[key] 157 | if ok { 158 | values[i] = value 159 | } else { 160 | values[i] = errors.New("Invalid Key") 161 | } 162 | } 163 | return values 164 | } 165 | 166 | func (kv *KVData) Del(key interface{}) { 167 | delete(kv.pairs, key) 168 | ele, ok := kv.index[key] 169 | if ok { 170 | kv.keys.Remove(ele) 171 | delete(kv.index, key) 172 | } 173 | } 174 | 175 | func (kv *KVData) MDel(keys []interface{}) { 176 | for _, key := range keys { 177 | kv.Del(key) 178 | } 179 | } 180 | 181 | func (kv *KVData) Keys() []interface{} { 182 | keys := make([]interface{}, len(kv.pairs)) 183 | index := 0 184 | for key := range kv.pairs { 185 | keys[index] = key 186 | index = index + 1 187 | } 188 | return keys 189 | } 190 | 191 | func (kv *KVData) Clean() { 192 | for { 193 | ele := kv.keys.Front() 194 | if ele == nil { 195 | return 196 | } 197 | info := ele.Value.(*keyInfo) 198 | kv.keys.Remove(ele) 199 | delete(kv.index, info.key) 200 | delete(kv.pairs, info.key) 201 | } 202 | } 203 | 204 | type KVCache struct { 205 | data *KVData 206 | queue chan operation 207 | shutdown chan bool 208 | desLock sync.RWMutex 209 | isDestory bool 210 | } 211 | 212 | func NewKVCache() *KVCache { 213 | kv := &KVCache{ 214 | data: newKVData(), 215 | queue: make(chan operation, 16), 216 | shutdown: make(chan bool), 217 | isDestory: false, 218 | } 219 | go kv.runloop() 220 | return kv 221 | } 222 | 223 | func (kv *KVCache) TTL(ttl int64) *KVCache { 224 | kv.data.ttl = ttl 225 | return kv 226 | } 227 | 228 | func (kv *KVCache) LRU(flag bool) *KVCache { 229 | kv.data.lru = flag 230 | return kv 231 | } 232 | 233 | func (kv *KVCache) MaxSize(maxSize int) *KVCache { 234 | kv.data.maxSize = maxSize 235 | return kv 236 | } 237 | 238 | func (kv *KVCache) runloop() { 239 | timer := time.NewTimer(1 * time.Second) 240 | for { 241 | select { 242 | case <-kv.shutdown: 243 | { 244 | timer.Stop() 245 | goto End 246 | } 247 | case <-timer.C: 248 | { 249 | if kv.data.ttl > 0 { 250 | kv.data.updateTTL() 251 | } 252 | timer.Reset(1 * time.Second) 253 | } 254 | case op := <-kv.queue: 255 | { 256 | func() { 257 | defer func() { 258 | if e := recover(); e != nil { 259 | op.callback <- e.(error) 260 | } 261 | }() 262 | res, err := op.method(kv.data) 263 | if err != nil { 264 | op.callback <- err 265 | } else { 266 | op.callback <- res 267 | } 268 | }() 269 | } 270 | } 271 | } 272 | End: 273 | close(kv.queue) 274 | } 275 | 276 | func (kv *KVCache) Commit(method func(*KVData) (interface{}, error)) (interface{}, error) { 277 | kv.desLock.RLock() 278 | if kv.isDestory { 279 | kv.desLock.RUnlock() 280 | return nil, errors.New("Invalid KVCache") 281 | } 282 | output := make(chan interface{}, 1) 283 | defer close(output) 284 | kv.queue <- operation{method: method, callback: output} 285 | kv.desLock.RUnlock() 286 | 287 | select { 288 | case res := <-output: 289 | { 290 | switch res.(type) { 291 | case error: 292 | { 293 | return nil, res.(error) 294 | } 295 | default: 296 | { 297 | return res, nil 298 | } 299 | } 300 | } 301 | } 302 | } 303 | 304 | func (kv *KVCache) Destory() { 305 | kv.desLock.Lock() 306 | defer kv.desLock.Unlock() 307 | if kv.isDestory { 308 | return 309 | } 310 | kv.isDestory = true 311 | kv.shutdown <- true 312 | close(kv.shutdown) 313 | } 314 | -------------------------------------------------------------------------------- /merge.go: -------------------------------------------------------------------------------- 1 | package async 2 | 3 | import ( 4 | "errors" 5 | "sync" 6 | ) 7 | 8 | type request struct { 9 | key string 10 | method func() (interface{}, error) 11 | callback chan *reply 12 | } 13 | 14 | type reply struct { 15 | key string 16 | result interface{} 17 | err error 18 | } 19 | 20 | type Merge struct { 21 | callbackDic map[string][]chan *reply 22 | inputQueue chan *request 23 | outputQueue chan *reply 24 | shutdown chan bool 25 | wg sync.WaitGroup 26 | isDestory bool 27 | destoryOnce sync.Once 28 | } 29 | 30 | func NewMerge() *Merge { 31 | m := &Merge{ 32 | callbackDic: make(map[string][]chan *reply), 33 | inputQueue: make(chan *request, 128), 34 | outputQueue: make(chan *reply, 16), 35 | shutdown: make(chan bool), 36 | isDestory: false, 37 | } 38 | go m.runloop() 39 | return m 40 | } 41 | 42 | func (m *Merge) runloop() { 43 | for { 44 | select { 45 | case <-m.shutdown: 46 | { 47 | return 48 | } 49 | case rep := <-m.outputQueue: 50 | { 51 | target, ok := m.callbackDic[rep.key] 52 | if ok { 53 | for _, callback := range target { 54 | callback <- rep 55 | } 56 | delete(m.callbackDic, rep.key) 57 | } 58 | } 59 | case req := <-m.inputQueue: 60 | { 61 | target, ok := m.callbackDic[req.key] 62 | if ok { 63 | m.callbackDic[req.key] = append(target, req.callback) 64 | } else { 65 | target = make([]chan *reply, 1) 66 | target[0] = req.callback 67 | m.callbackDic[req.key] = target 68 | 69 | go func(key string, method func() (interface{}, error)) { 70 | res, err := Safety(method) 71 | m.outputQueue <- &reply{key: key, result: res, err: err} 72 | }(req.key, req.method) 73 | } 74 | } 75 | } 76 | } 77 | } 78 | 79 | func (m *Merge) Destory() { 80 | m.destoryOnce.Do(func() { 81 | m.isDestory = true 82 | }) 83 | m.wg.Wait() 84 | close(m.shutdown) 85 | close(m.inputQueue) 86 | close(m.outputQueue) 87 | } 88 | 89 | func (m *Merge) Exec(key string, method func() (interface{}, error)) (interface{}, error) { 90 | if m.isDestory { 91 | return nil, errors.New("Merge Destoried") 92 | } 93 | m.wg.Add(1) 94 | defer m.wg.Done() 95 | 96 | callback := make(chan *reply, 1) 97 | m.inputQueue <- &request{key: key, method: method, callback: callback} 98 | 99 | res := <-callback 100 | close(callback) 101 | return res.result, res.err 102 | } 103 | -------------------------------------------------------------------------------- /task/group.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "time" 7 | 8 | "github.com/RyouZhang/async-go" 9 | ) 10 | 11 | type taskGroup struct { 12 | name string 13 | batchSize int 14 | timeRange int 15 | 16 | requestQueue chan *request 17 | resultQueue chan *result 18 | 19 | workerCount int 20 | maxWorker int 21 | 22 | taskToReq map[string]*request 23 | mergeTaskDic map[string][]Task 24 | groupTaskDic map[string][]Task 25 | 26 | tasks []Task 27 | 28 | method func(...Task) (map[string]interface{}, error) 29 | } 30 | 31 | func newTaskGroup(name string, batchSize int, maxWorker int, timeRange int, method func(...Task) (map[string]interface{}, error)) *taskGroup { 32 | tg := &taskGroup{ 33 | name: name, 34 | batchSize: batchSize, 35 | timeRange: timeRange, 36 | requestQueue: make(chan *request, 128), 37 | resultQueue: make(chan *result, 128), 38 | workerCount: 0, 39 | maxWorker: maxWorker, 40 | taskToReq: make(map[string]*request), 41 | mergeTaskDic: make(map[string][]Task), 42 | groupTaskDic: make(map[string][]Task), 43 | tasks: make([]Task, 0), 44 | method: method, 45 | } 46 | 47 | go tg.runloop() 48 | 49 | return tg 50 | } 51 | 52 | func (tg *taskGroup) runloop() { 53 | ctx := context.Background() 54 | timer := time.NewTimer(time.Duration(tg.timeRange) * time.Millisecond) 55 | for { 56 | select { 57 | case req := <-tg.requestQueue: 58 | { 59 | for i, _ := range req.tasks { 60 | t := req.tasks[i] 61 | 62 | _, ok := tg.taskToReq[t.UniqueId()] 63 | if !ok { 64 | tg.taskToReq[t.UniqueId()] = req 65 | } else { 66 | req.callback <- &result{key: t.UniqueId(), err: fmt.Errorf("duplicate unique id %s", t.UniqueId())} 67 | req.count-- 68 | if req.count == 0 { 69 | close(req.callback) 70 | } 71 | continue 72 | } 73 | 74 | // merge 75 | _, ok = t.(MergeTask) 76 | if ok { 77 | mkey := t.(MergeTask).MergeBy() 78 | if len(mkey) > 0 { 79 | target, ok := tg.mergeTaskDic[mkey] 80 | if ok { 81 | tg.mergeTaskDic[mkey] = append(target, t) 82 | continue 83 | } else { 84 | tg.mergeTaskDic[mkey] = []Task{t} 85 | } 86 | } 87 | } 88 | // group 89 | _, ok = t.(GroupTask) 90 | if ok { 91 | gkey := t.(GroupTask).GroupBy() 92 | if len(gkey) > 0 { 93 | target, ok := tg.groupTaskDic[gkey] 94 | if ok { 95 | tg.groupTaskDic[gkey] = append(target, t) 96 | } else { 97 | tg.groupTaskDic[gkey] = []Task{t} 98 | } 99 | continue 100 | } 101 | } 102 | // default 103 | tg.tasks = append(tg.tasks, t) 104 | } 105 | tg.schedule(ctx) 106 | } 107 | case res := <-tg.resultQueue: 108 | { 109 | tg.workerCount-- 110 | //check merge 111 | if len(res.mkey) > 0 { 112 | target, ok := tg.mergeTaskDic[res.mkey] 113 | if ok { 114 | for i, _ := range target { 115 | t := target[i] 116 | req, ok := tg.taskToReq[t.UniqueId()] 117 | if ok { 118 | req.callback <- &result{ 119 | key: t.UniqueId(), 120 | val: res.val, 121 | err: res.err, 122 | } 123 | req.count-- 124 | if req.count == 0 { 125 | close(req.callback) 126 | } 127 | delete(tg.taskToReq, t.UniqueId()) 128 | } 129 | } 130 | delete(tg.mergeTaskDic, res.mkey) 131 | } 132 | } else { 133 | req, ok := tg.taskToReq[res.key] 134 | if ok { 135 | req.callback <- res 136 | req.count-- 137 | if req.count == 0 { 138 | close(req.callback) 139 | } 140 | delete(tg.taskToReq, res.key) 141 | } 142 | } 143 | tg.schedule(ctx) 144 | } 145 | case <-timer.C: 146 | { 147 | tg.timerSchedule(ctx) 148 | timer.Reset(time.Duration(tg.timeRange) * time.Millisecond) 149 | } 150 | } 151 | } 152 | } 153 | 154 | func (tg *taskGroup) schedule(ctx context.Context) { 155 | tg.scheduleGroupTask(ctx, tg.batchSize) 156 | tg.scheduleTask(ctx) 157 | } 158 | 159 | func (tg *taskGroup) timerSchedule(ctx context.Context) { 160 | tg.scheduleGroupTask(ctx, 0) 161 | tg.scheduleTask(ctx) 162 | } 163 | 164 | func (tg *taskGroup) scheduleGroupTask(ctx context.Context, max int) { 165 | if tg.workerCount >= tg.maxWorker { 166 | return 167 | } 168 | 169 | delKeys := make([]string, 0) 170 | for gkey, _ := range tg.groupTaskDic { 171 | tasks := tg.groupTaskDic[gkey] 172 | 173 | if len(tasks) < max { 174 | continue 175 | } 176 | 177 | for { 178 | if tg.workerCount >= tg.maxWorker { 179 | goto CLEAN 180 | } 181 | if len(tasks) <= tg.batchSize { 182 | tg.workerCount++ 183 | go tg.running(ctx, tasks) 184 | delKeys = append(delKeys, gkey) 185 | break 186 | } 187 | 188 | target := tasks[:tg.batchSize] 189 | tasks = tasks[tg.batchSize:] 190 | tg.groupTaskDic[gkey] = tasks 191 | 192 | tg.workerCount++ 193 | go tg.running(ctx, target) 194 | } 195 | } 196 | CLEAN: 197 | for i, _ := range delKeys { 198 | gkey := delKeys[i] 199 | delete(tg.groupTaskDic, gkey) 200 | } 201 | } 202 | 203 | func (tg *taskGroup) scheduleTask(ctx context.Context) { 204 | for { 205 | if tg.workerCount >= tg.maxWorker || len(tg.tasks) == 0 { 206 | break 207 | } 208 | 209 | t := tg.tasks[0] 210 | tg.workerCount++ 211 | go tg.running(ctx, []Task{t}) 212 | if len(tg.tasks) == 1 { 213 | tg.tasks = tg.tasks[:0] 214 | } else { 215 | tg.tasks = tg.tasks[1:] 216 | } 217 | } 218 | } 219 | 220 | func (tg *taskGroup) running(ctx context.Context, tasks []Task) { 221 | _, err := async.Safety(func() (interface{}, error) { 222 | res, err := tg.method(tasks...) 223 | if err != nil { 224 | return nil, err 225 | } 226 | 227 | for i, _ := range tasks { 228 | t := tasks[i] 229 | 230 | mkey := "" 231 | _, ok := t.(MergeTask) 232 | if ok { 233 | mkey = t.(MergeTask).MergeBy() 234 | } 235 | 236 | val, ok := res[t.UniqueId()] 237 | if ok { 238 | tg.resultQueue <- &result{ 239 | key: t.UniqueId(), 240 | mkey: mkey, 241 | val: val, 242 | } 243 | } else { 244 | tg.resultQueue <- &result{ 245 | key: t.UniqueId(), 246 | mkey: mkey, 247 | val: nil, 248 | } 249 | } 250 | } 251 | return nil, nil 252 | }) 253 | if err == nil { 254 | return 255 | } 256 | 257 | for i, _ := range tasks { 258 | t := tasks[i] 259 | 260 | mkey := "" 261 | _, ok := t.(MergeTask) 262 | if ok { 263 | mkey = t.(MergeTask).MergeBy() 264 | } 265 | 266 | tg.resultQueue <- &result{ 267 | key: t.UniqueId(), 268 | mkey: mkey, 269 | err: err, 270 | } 271 | } 272 | } 273 | -------------------------------------------------------------------------------- /task/task.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "sync" 7 | ) 8 | 9 | var ( 10 | taskGroupDic map[string]*taskGroup 11 | taskGroupMux sync.RWMutex 12 | ) 13 | 14 | func init() { 15 | taskGroupDic = make(map[string]*taskGroup) 16 | } 17 | 18 | type Task interface { 19 | UniqueId() string //must unique id 20 | } 21 | type MergeTask interface { 22 | MergeBy() string 23 | } 24 | type GroupTask interface { 25 | GroupBy() string 26 | } 27 | 28 | type result struct { 29 | key string 30 | mkey string 31 | val interface{} 32 | err error 33 | } 34 | 35 | type request struct { 36 | id string 37 | tasks []Task 38 | count int 39 | callback chan *result 40 | } 41 | 42 | type Option struct { 43 | maxWorker int 44 | batchSize int 45 | timeRange int //ms 46 | } 47 | 48 | func DefaultOption() *Option { 49 | return &Option{maxWorker: 8, batchSize: 32, timeRange: 10} 50 | } 51 | 52 | func (opt *Option) WithMaxWoker(max int) *Option { 53 | opt.maxWorker = max 54 | return opt 55 | } 56 | 57 | func (opt *Option) WithBatchSize(batchSize int) *Option { 58 | opt.batchSize = batchSize 59 | return opt 60 | } 61 | 62 | // ms 63 | func (opt *Option) WithTimeRange(timeRange int) *Option { 64 | opt.timeRange = timeRange 65 | return opt 66 | } 67 | 68 | func RegisterTaskGroup(taskGroup string, method func(...Task) (map[string]interface{}, error), opt *Option) { 69 | taskGroupMux.Lock() 70 | defer taskGroupMux.Unlock() 71 | if opt == nil { 72 | taskGroupDic[taskGroup] = newTaskGroup(taskGroup, 32, 8, 10, method) 73 | } else { 74 | taskGroupDic[taskGroup] = newTaskGroup(taskGroup, opt.batchSize, opt.maxWorker, opt.timeRange, method) 75 | } 76 | } 77 | 78 | func Exec(ctx context.Context, taskGroup string, in Task) (interface{}, error) { 79 | taskGroupMux.RLock() 80 | tg, ok := taskGroupDic[taskGroup] 81 | taskGroupMux.RUnlock() 82 | 83 | if !ok { 84 | return nil, fmt.Errorf("invalid task group %s", taskGroup) 85 | } 86 | 87 | req := &request{ 88 | tasks: []Task{in}, 89 | count: 1, 90 | callback: make(chan *result, 1), 91 | } 92 | tg.requestQueue <- req 93 | 94 | res := <-req.callback 95 | if res.err != nil { 96 | return nil, res.err 97 | } 98 | return res.val, nil 99 | } 100 | 101 | func BatchExec(ctx context.Context, taskGroup string, in ...Task) map[string]interface{} { 102 | taskGroupMux.RLock() 103 | tg, ok := taskGroupDic[taskGroup] 104 | taskGroupMux.RUnlock() 105 | 106 | if !ok { 107 | return nil 108 | } 109 | 110 | count := len(in) 111 | 112 | req := &request{ 113 | tasks: in, 114 | count: count, 115 | callback: make(chan *result, count), 116 | } 117 | tg.requestQueue <- req 118 | 119 | result := make(map[string]interface{}) 120 | 121 | for res := range req.callback { 122 | if res.err != nil { 123 | result[res.key] = res.err 124 | } else { 125 | result[res.key] = res.val 126 | } 127 | } 128 | return result 129 | } 130 | --------------------------------------------------------------------------------