├── LICENSE ├── README.md ├── doc.go ├── generic ├── README.md ├── doc.go └── multicast.go ├── go.mod ├── multicast.go ├── multicast_jig.go ├── multicast_test.go ├── svg ├── godev.svg └── godoc.svg └── test ├── README.md ├── benchmark_test.go ├── doc.go ├── example_test.go ├── integrity_test.go ├── internals_test.go ├── multicast.go └── testing_test.go /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (C) 2017-2020 René Post, ReactiveGo 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of 4 | this software and associated documentation files (the "Software"), to deal in 5 | the Software without restriction, including without limitation the rights to 6 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies 7 | of the Software, and to permit persons to whom the Software is furnished to do 8 | so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all 11 | copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # multicast 2 | 3 | import "github.com/reactivego/multicast" 4 | 5 | [![](svg/godev.svg)](https://pkg.go.dev/github.com/reactivego/multicast?tab=doc) 6 | [![](svg/godoc.svg)](https://godoc.org/github.com/reactivego/multicast) 7 | 8 | Package `multicast` provides MxN multicast channels for Go with buffering and time based buffer eviction. 9 | It can be fed by multiple concurrent senders. It multicasts and replays messages to multiple concurrent receivers. 10 | 11 | If you are in a situation where you need to record and replay a stream of data or need to split a stream of data into multiple identical streams, then this package offers a fast and simple implementation. 12 | 13 | ## Example (Send 2x2) 14 | 15 | Send from 2 goroutines and receive in 2 goroutines. 16 | 17 | Code: 18 | ```go 19 | ch := multicast.NewChan(128, 2) 20 | 21 | // Send suppports multiple goroutine sending and stores a timestamp with 22 | // every message sent. 23 | 24 | var wgs sync.WaitGroup 25 | wgs.Add(2) 26 | go func() { 27 | ch.Send("Hello") 28 | wgs.Done() 29 | }() 30 | go func() { 31 | ch.Send("World!") 32 | wgs.Done() 33 | }() 34 | 35 | print := func(value interface{}, err error, closed bool) bool { 36 | switch { 37 | case !closed: 38 | fmt.Println(value) 39 | case err != nil: 40 | fmt.Println(err) 41 | default: 42 | fmt.Println("closed") 43 | } 44 | return true 45 | } 46 | 47 | var wgr sync.WaitGroup 48 | wgr.Add(2) 49 | ep1, _ := ch.NewEndpoint(multicast.ReplayAll) 50 | go func() { 51 | ep1.Range(print, 0) 52 | wgr.Done() 53 | }() 54 | 55 | ep2, _ := ch.NewEndpoint(multicast.ReplayAll) 56 | go func() { 57 | ep2.Range(print, 0) 58 | wgr.Done() 59 | }() 60 | 61 | wgs.Wait() 62 | ch.Close(nil) 63 | if ch.Closed() { 64 | fmt.Println("channel closed") 65 | } 66 | wgr.Wait() 67 | ``` 68 | Unordered Output: 69 | ``` 70 | Hello 71 | Hello 72 | World! 73 | World! 74 | closed 75 | closed 76 | channel closed 77 | ``` 78 | 79 | ## Example (FastSend 1x2) 80 | Send from 1 goroutine and receive in 2 goroutines 81 | Code: 82 | ```go 83 | ch := multicast.NewChan(128, 2) 84 | 85 | // FastSend allows only a single goroutine sending and does not store 86 | // timestamps with messages. 87 | 88 | ch.FastSend("Hello") 89 | ch.FastSend("World!") 90 | ch.Close(nil) 91 | if ch.Closed() { 92 | fmt.Println("channel closed") 93 | } 94 | 95 | print := func(value interface{}, err error, closed bool) bool { 96 | switch { 97 | case !closed: 98 | fmt.Println(value) 99 | case err != nil: 100 | fmt.Println(err) 101 | default: 102 | fmt.Println("closed") 103 | } 104 | return true 105 | } 106 | 107 | var wg sync.WaitGroup 108 | wg.Add(2) 109 | ep1, _ := ch.NewEndpoint(multicast.ReplayAll) 110 | go func() { 111 | ep1.Range(print, 0) 112 | wg.Done() 113 | }() 114 | 115 | ep2, _ := ch.NewEndpoint(multicast.ReplayAll) 116 | go func() { 117 | ep2.Range(print, 0) 118 | wg.Done() 119 | }() 120 | wg.Wait() 121 | ``` 122 | Unordered Output: 123 | ``` 124 | channel closed 125 | Hello 126 | Hello 127 | World! 128 | World! 129 | closed 130 | closed 131 | ``` 132 | 133 | ## Compared to Go channels 134 | The standard Go channel cannot multicast the same message to multiple receivers and it cannot play back messages previously sent to it. The `multicast.Chan` type offered here does. 135 | 136 | Additionally, you can even evict messages from the buffer that are past a certain age because `multicast.Chan` also stores a timestamp with each message sent. 137 | 138 | ## Compared to other Multicast packages 139 | This multicast channel is different from other multicast implementations. 140 | 141 | 1. It uses only fast synchronization primitives like atomic operations to implement its features. 142 | 2. It doesn't use goroutines internally. 143 | 3. It uses internal struct padding to speed up CPU cache access. 144 | 145 | This allows it to operate at a very high level of performance. 146 | 147 | ## Regenerating this Package 148 | This package is generated from generics in the sub-folder `generic` by the [jig](http://github.com/reactivego/jig) tool. 149 | You don't need to regenerate this package in order to use it. However, if you are interested in regenerating it, then read on. 150 | 151 | The [jig](http://github.com/reactivego/jig) tool provides the parametric polymorphism capability that Go 1 is missing. 152 | It works by replacing place-holder types of generic functions and datatypes with `interface{}` (it can also generate statically typed code though). 153 | 154 | To regenerate, change the current working directory to the package directory and run the [jig](http://github.com/reactivego/jig) tool as follows: 155 | 156 | ```bash 157 | $ go get -d github.com/reactivego/jig 158 | $ go run github.com/reactivego/jig -v 159 | ``` 160 | 161 | ## License 162 | This library is licensed under the terms of the MIT License. See [LICENSE](LICENSE) file in this repository for copyright notice and exact wording. 163 | -------------------------------------------------------------------------------- /doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | Package multicast provides a Chan type that can multicast and replay messages to 3 | multiple receivers. 4 | 5 | Multicast and Replay 6 | 7 | Native Go channels don't support multicasting the same message to multiple 8 | receivers and they don't support replaying previously sent messages. 9 | 10 | Unlike native Go channels, messages send to this channel are multicast to 11 | all receiving endpoints. A new endpoint created while the channel is operational 12 | can choose to receive messages previously sent by specifying a replay count 13 | parameter, or 0 to indicate it is only interested in new messages. 14 | 15 | You can also limit playback to messages younger than a certain age because 16 | the channel stores a timestamp with each message you send to it. 17 | 18 | Just like native Go channels, the channel exhibits blocking backpressure to 19 | the sender goroutines when the channel buffer is full. Total speed of the 20 | channel is dictated by the slowest receiver. 21 | 22 | Lock and Goroutine free 23 | 24 | This multicast channel is different from other multicast implementations in 25 | that it uses only fast synchronization primitives like atomic operations to 26 | implement its features. Furthermore, it also doesn't use goroutines internally. 27 | This implementation is low-latency and has a high throughput. 28 | 29 | If you are in a situation where you need to record and replay a stream 30 | of data or you need to split a stream of data into multiple identical streams, 31 | then this package offers a fast and simple solution. 32 | 33 | Heterogeneous 34 | 35 | Heterogeneous simply means that you can mix types, that is very convenient but 36 | not typesafe. The Chan type provided in this package supports sending and 37 | receiving values of mixed type: 38 | 39 | ch := NewChan(128, 1) 40 | ch.Send("hello") 41 | ch.Send(42) 42 | ch.Send(1.6180) 43 | ch.Close(nil) 44 | 45 | Regenerating this Package 46 | 47 | The implementation in this package is generated from a generic implementation 48 | of the Chan type found in the subdirectory "generic" inside this package. By 49 | replacing the place-holder type with "interface{}" a heterogeneous Chan type 50 | is created. To regenerate this channel implementation, run jig inside this 51 | package directory: 52 | 53 | go get -d github.com/reactivego/generics/cmd/jig 54 | go run github.com/reactivego/generics/cmd/jig -v 55 | */ 56 | package multicast 57 | 58 | import _ "github.com/reactivego/multicast/generic" 59 | -------------------------------------------------------------------------------- /generic/README.md: -------------------------------------------------------------------------------- 1 | # multicast 2 | 3 | import "github.com/reactivego/multicast/generic" 4 | 5 | [![](../svg/godev.svg)](https://pkg.go.dev/github.com/reactivego/multicast/generic?tab=doc) 6 | [![](../svg/godoc.svg)](http://godoc.org/github.com/reactivego/multicast/generic) 7 | 8 | Package `multicast` provides generic MxN multicast channels for Go with buffering and time based buffer eviction. 9 | It can be fed by multiple concurrent senders. It multicasts and replays messages to multiple concurrent receivers. 10 | 11 | Install the [jig tool](https://github.com/reactivego/jig) to use the library. 12 | -------------------------------------------------------------------------------- /generic/doc.go: -------------------------------------------------------------------------------- 1 | // Package multicast provides generic MxN multicast channels for Go with 2 | // buffering and time based buffer eviction. It can be fed by multiple 3 | // concurrent senders. It multicasts and replays messages to multiple 4 | // concurrent receivers. 5 | // 6 | // Install the jig tool (https://github.com/reactivego/jig) to use the library. 7 | // 8 | // Unlike native Go channels, messages send to this channel are multicasted to 9 | // all receivers. A new endpoint created while the channel is operational can 10 | // choose to receive messages previously sent by specifying a replay count 11 | // parameter, or 0 to indicate it is only interested in new messages. 12 | // 13 | // Just like native Go channels, the channel exhibits blocking backpressure to 14 | // the sender goroutines when the channel buffer is full. Total speed of the 15 | // channel is dictated by the slowest receiver. 16 | // 17 | // Since this is a generics library, the way in which a channel is created will 18 | // determine strong or weak typing. Channels can be strongly typed by specifying 19 | // an explicit type, for example: 20 | // 21 | // NewChanInt(128,8) 22 | // NewChanString(128,8) 23 | // 24 | // Or alternatively you can send heterogeneous messages on an interface{} typed 25 | // channel created as follows: 26 | // 27 | // NewChan(128,8) 28 | package multicast 29 | 30 | type foo interface{} 31 | -------------------------------------------------------------------------------- /generic/multicast.go: -------------------------------------------------------------------------------- 1 | package multicast 2 | 3 | import ( 4 | "fmt" 5 | "math" 6 | "runtime" 7 | "sync" 8 | "sync/atomic" 9 | "time" 10 | ) 11 | 12 | //jig:template ChannelError 13 | 14 | type ChannelError string 15 | 16 | func (e ChannelError) Error() string { return string(e) } 17 | 18 | //jig:template ErrOutOfEndpoints 19 | //jig:needs ChannelError 20 | 21 | // ErrOutOfEndpoints is returned by NewEndpoint when the maximum number of 22 | // endpoints has already been created. 23 | const ErrOutOfEndpoints = ChannelError("out of endpoints") 24 | 25 | //jig:template ChanPadding 26 | 27 | const _PADDING = 1 // 0 turns padding off, 1 turns it on. 28 | const _EXTRA_PADDING = 0 * 64 // multiples of 64, benefits inconclusive. 29 | 30 | type pad60 [_PADDING * (_EXTRA_PADDING + 60)]byte 31 | type pad56 [_PADDING * (_EXTRA_PADDING + 56)]byte 32 | type pad48 [_PADDING * (_EXTRA_PADDING + 48)]byte 33 | type pad40 [_PADDING * (_EXTRA_PADDING + 40)]byte 34 | type pad32 [_PADDING * (_EXTRA_PADDING + 32)]byte 35 | 36 | //jig:template ChanState 37 | 38 | // Activity of committer 39 | const ( 40 | resting uint32 = iota 41 | working 42 | ) 43 | 44 | // Activity of endpoints 45 | const ( 46 | idling uint32 = iota 47 | enumerating 48 | creating 49 | ) 50 | 51 | // State of endpoint and channel 52 | const ( 53 | active uint64 = iota 54 | canceled 55 | closed 56 | ) 57 | 58 | // Cursor is parked so it does not influence advancing the commit index. 59 | const ( 60 | parked uint64 = math.MaxUint64 61 | ) 62 | 63 | const ( 64 | // ReplayAll can be passed to NewEndpoint to retain as many of the 65 | // previously sent messages as possible that are still in the buffer. 66 | ReplayAll uint64 = math.MaxUint64 67 | ) 68 | 69 | //jig:template Chan 70 | //jig:needs ChanPadding, ChanState 71 | 72 | // ChanFoo is a fast, concurrent multi-(casting,sending,receiving) buffered 73 | // channel. It is implemented using only sync/atomic operations. Spinlocks using 74 | // runtime.Gosched() are used in situations where goroutines are waiting or 75 | // contending for resources. 76 | type ChanFoo struct { 77 | buffer []foo 78 | _________a pad40 79 | begin uint64 80 | _________b pad56 81 | end uint64 82 | _________c pad56 83 | commit uint64 84 | _________d pad56 85 | mod uint64 86 | _________e pad56 87 | endpoints endpointsFoo 88 | 89 | // ChanFoo State 90 | 91 | err error 92 | ____________f pad48 93 | channelState uint64 // active, closed 94 | ____________g pad56 95 | 96 | write uint64 97 | _________________h pad56 98 | start time.Time 99 | _________________i pad40 100 | written []int64 // nanoseconds since start 101 | _________________j pad40 102 | committerActivity uint32 // resting, working 103 | _________________k pad60 104 | 105 | receivers *sync.Cond 106 | _________________l pad56 107 | } 108 | 109 | type endpointsFoo struct { 110 | entry []EndpointFoo 111 | len uint32 112 | endpointsActivity uint32 // idling, enumerating, creating 113 | ________ pad32 114 | } 115 | 116 | //jig:template Endpoint 117 | //jig:embeds Chan 118 | 119 | // EndpointFoo is returned by a call to NewEndpoint on the channel. Every 120 | // endpoint should be used by only a single goroutine, so no sharing between 121 | // goroutines. 122 | type EndpointFoo struct { 123 | *ChanFoo 124 | _____________a pad56 125 | cursor uint64 126 | _____________b pad56 127 | endpointState uint64 // active, canceled, closed 128 | _____________c pad56 129 | lastActive time.Time // track activity to deterime when to sleep 130 | _____________d pad40 131 | endpointClosed uint64 // active, closed 132 | _____________e pad56 133 | } 134 | 135 | //jig:template NewChan 136 | //jig:needs Chan, endpoints 137 | 138 | // NewChanFoo creates a new channel. The parameters bufferCapacity and 139 | // endpointCapacity determine the size of the message buffer and maximum 140 | // number of concurrent receiving endpoints respectively. 141 | // 142 | // Note that bufferCapacity is always scaled up to a power of 2 so e.g. 143 | // specifying 400 will create a buffer of 512 (2^9). Also because of this a 144 | // bufferCapacity of 0 is scaled up to 1 (2^0). 145 | func NewChanFoo(bufferCapacity int, endpointCapacity int) *ChanFoo { 146 | // Round capacity up to power of 2 147 | size := uint64(1) << uint(math.Ceil(math.Log2(float64(bufferCapacity)))) 148 | c := &ChanFoo{ 149 | end: size, 150 | mod: size - 1, 151 | buffer: make([]foo, size), 152 | start: time.Now(), 153 | written: make([]int64, size), 154 | endpoints: endpointsFoo{ 155 | entry: make([]EndpointFoo, endpointCapacity), 156 | }, 157 | } 158 | c.receivers = sync.NewCond(c) 159 | return c 160 | } 161 | 162 | // Lock, empty method so we can pass *ChanFoo to sync.NewCond as a Locker. 163 | func (c *ChanFoo) Lock() {} 164 | 165 | // Unlock, empty method so we can pass *ChanFoo to sync.NewCond as a Locker. 166 | func (c *ChanFoo) Unlock() {} 167 | 168 | //jig:template Chan Close 169 | 170 | // Close will close the channel. Pass in an error or nil. Endpoints continue to 171 | // receive data until the buffer is empty. Only then will the close notification 172 | // be delivered to the Range function. 173 | func (c *ChanFoo) Close(err error) { 174 | if atomic.CompareAndSwapUint64(&c.channelState, active, closed) { 175 | c.err = err 176 | c.endpoints.Access(func(endpoints *endpointsFoo) { 177 | for i := uint32(0); i < endpoints.len; i++ { 178 | atomic.CompareAndSwapUint64(&endpoints.entry[i].endpointState, active, closed) 179 | } 180 | }) 181 | } 182 | c.receivers.Broadcast() 183 | } 184 | 185 | //jig:template Chan Closed 186 | 187 | // Closed returns true when the channel was closed using the Close method. 188 | func (c *ChanFoo) Closed() bool { 189 | return atomic.LoadUint64(&c.channelState) >= closed 190 | } 191 | 192 | //jig:template Chan FastSend 193 | //jig:needs endpoints, Chan slideBuffer 194 | 195 | // FastSend can be used to send values to the channel from a SINGLE goroutine. 196 | // Also, this does not record the time a message was sent, so the maxAge value 197 | // passed to Range will be ignored. 198 | // 199 | // Note, that when the number of unread messages has reached bufferCapacity, then 200 | // the call to FastSend will block until the slowest Endpoint has read another 201 | // message. 202 | func (c *ChanFoo) FastSend(value foo) { 203 | for c.commit == c.end { 204 | if !c.slideBuffer() { 205 | return // channel was closed 206 | } 207 | } 208 | c.buffer[c.commit&c.mod] = value 209 | atomic.AddUint64(&c.commit, 1) 210 | c.receivers.Broadcast() 211 | } 212 | 213 | //jig:template Chan Send 214 | //jig:needs endpoints, Chan slideBuffer 215 | 216 | // Send can be used by concurrent goroutines to send values to the channel. 217 | // 218 | // Note, that when the number of unread messages has reached bufferCapacity, then 219 | // the call to Send will block until the slowest Endpoint has read another 220 | // message. 221 | func (c *ChanFoo) Send(value foo) { 222 | write := atomic.AddUint64(&c.write, 1) - 1 223 | for write >= atomic.LoadUint64(&c.end) { 224 | if !c.slideBuffer() { 225 | return // channel was closed 226 | } 227 | } 228 | c.buffer[write&c.mod] = value 229 | updated := time.Since(c.start).Nanoseconds() 230 | if updated == 0 { 231 | panic("clock failure; zero duration measured") 232 | } 233 | atomic.StoreInt64(&c.written[write&c.mod], updated<<1+1) 234 | c.receivers.Broadcast() 235 | } 236 | 237 | //jig:template Chan slideBuffer 238 | //jig:needs endpoints 239 | 240 | func (c *ChanFoo) slideBuffer() bool { 241 | slowestCursor := parked 242 | spinlock := c.endpoints.Access(func(endpoints *endpointsFoo) { 243 | for i := uint32(0); i < endpoints.len; i++ { 244 | cursor := atomic.LoadUint64(&endpoints.entry[i].cursor) 245 | if cursor < slowestCursor { 246 | slowestCursor = cursor 247 | } 248 | } 249 | if atomic.LoadUint64(&c.begin) < slowestCursor && slowestCursor <= atomic.LoadUint64(&c.end) { 250 | if c.mod < 16 { 251 | atomic.AddUint64(&c.begin, 1) 252 | atomic.AddUint64(&c.end, 1) 253 | } else { 254 | atomic.StoreUint64(&c.begin, slowestCursor) 255 | atomic.StoreUint64(&c.end, slowestCursor+c.mod+1) 256 | } 257 | } else { 258 | slowestCursor = parked 259 | } 260 | }) 261 | if slowestCursor == parked { 262 | if spinlock { 263 | runtime.Gosched() // spinlock while full 264 | } 265 | if atomic.LoadUint64(&c.channelState) != active { 266 | return false // !more 267 | } 268 | } 269 | return true // more 270 | } 271 | 272 | //jig:template Chan commitData 273 | 274 | func (c *ChanFoo) commitData() uint64 { 275 | commit := atomic.LoadUint64(&c.commit) 276 | if commit >= atomic.LoadUint64(&c.write) { 277 | return commit 278 | } 279 | if !atomic.CompareAndSwapUint32(&c.committerActivity, resting, working) { 280 | return commit // allow only a single receiver goroutine at a time 281 | } 282 | commit = atomic.LoadUint64(&c.commit) 283 | newcommit := commit 284 | for ; atomic.LoadInt64(&c.written[newcommit&c.mod])&1 == 1; newcommit++ { 285 | atomic.AddInt64(&c.written[newcommit&c.mod], -1) 286 | if newcommit >= atomic.LoadUint64(&c.end) { 287 | break 288 | } 289 | } 290 | write := atomic.LoadUint64(&c.write) 291 | if newcommit > write { 292 | panic(fmt.Sprintf("commitData: range error (commit=%d,write=%d,newcommit=%d)", commit, write, newcommit)) 293 | } 294 | if newcommit > commit { 295 | if !atomic.CompareAndSwapUint64(&c.commit, commit, newcommit) { 296 | panic(fmt.Sprintf("commitData; swap error (c.commit=%d,%d,%d)", c.commit, commit, newcommit)) 297 | } 298 | c.receivers.Broadcast() // fresh data! wakeup blocked receiver goroutines 299 | } 300 | atomic.StoreUint32(&c.committerActivity, resting) 301 | return atomic.LoadUint64(&c.commit) 302 | } 303 | 304 | //jig:template Chan NewEndpoint 305 | //jig:needs endpoints 306 | 307 | // NewEndpoint will create a new channel endpoint that can be used to receive 308 | // from the channel. The argument keep specifies how many entries of the 309 | // existing channel buffer to keep. 310 | // 311 | // After Close is called on the channel, any endpoints created after that 312 | // will still receive the number of messages as indicated in the keep parameter 313 | // and then subsequently the close. 314 | // 315 | // An endpoint that is canceled or read until it is exhausted (after channel was 316 | // closed) will be reused by NewEndpoint. 317 | func (c *ChanFoo) NewEndpoint(keep uint64) (*EndpointFoo, error) { 318 | return c.endpoints.NewForChanFoo(c, keep) 319 | } 320 | 321 | //jig:template endpoints 322 | //jig:needs Chan, ErrOutOfEndpoints 323 | 324 | func (e *endpointsFoo) NewForChanFoo(c *ChanFoo, keep uint64) (*EndpointFoo, error) { 325 | for !atomic.CompareAndSwapUint32(&e.endpointsActivity, idling, creating) { 326 | runtime.Gosched() 327 | } 328 | defer atomic.StoreUint32(&e.endpointsActivity, idling) 329 | var start uint64 330 | commit := c.commitData() 331 | begin := atomic.LoadUint64(&c.begin) 332 | if commit-begin <= keep { 333 | start = begin 334 | } else { 335 | start = commit - keep 336 | } 337 | if int(e.len) == len(e.entry) { 338 | for index := uint32(0); index < e.len; index++ { 339 | ep := &e.entry[index] 340 | if atomic.CompareAndSwapUint64(&ep.cursor, parked, start) { 341 | ep.endpointState = atomic.LoadUint64(&c.channelState) 342 | ep.lastActive = time.Now() 343 | return ep, nil 344 | } 345 | } 346 | return nil, ErrOutOfEndpoints 347 | } 348 | ep := &e.entry[e.len] 349 | ep.ChanFoo = c 350 | ep.cursor = start 351 | ep.endpointState = atomic.LoadUint64(&c.channelState) 352 | ep.lastActive = time.Now() 353 | e.len++ 354 | return ep, nil 355 | } 356 | 357 | func (e *endpointsFoo) Access(access func(*endpointsFoo)) bool { 358 | contention := false 359 | for !atomic.CompareAndSwapUint32(&e.endpointsActivity, idling, enumerating) { 360 | runtime.Gosched() 361 | contention = true 362 | } 363 | access(e) 364 | atomic.StoreUint32(&e.endpointsActivity, idling) 365 | return !contention 366 | } 367 | 368 | //jig:template Endpoint Range 369 | //jig:needs Endpoint 370 | 371 | // Range will call the passed in foreach function with all the messages in 372 | // the buffer, followed by all the messages received. When the foreach function 373 | // returns true Range will continue, when you return false this is the same as 374 | // calling Cancel. When canceled the foreach will never be called again. 375 | // Passing a maxAge duration other than 0 will skip messages that are older 376 | // than maxAge. 377 | // 378 | // When the channel is closed, eventually when the buffer is exhausted the close 379 | // with optional error will be notified by calling foreach one last time with 380 | // the closed parameter set to true. 381 | func (e *EndpointFoo) Range(foreach func(value foo, err error, closed bool) bool, maxAge time.Duration) { 382 | e.lastActive = time.Now() 383 | for { 384 | commit := e.commitData() 385 | for ; e.cursor == commit; commit = e.commitData() { 386 | if atomic.CompareAndSwapUint64(&e.endpointState, canceled, canceled) { 387 | atomic.StoreUint64(&e.cursor, parked) 388 | return 389 | } 390 | if atomic.LoadUint64(&e.commit) < atomic.LoadUint64(&e.write) { 391 | if e.endpointClosed == 1 { 392 | panic(fmt.Sprintf("data written after closing endpoint; commit(%d) write(%d)", 393 | atomic.LoadUint64(&e.commit), atomic.LoadUint64(&e.write))) 394 | } 395 | runtime.Gosched() // just backoff a little ~1us 396 | e.lastActive = time.Now() 397 | } else { 398 | now := time.Now() 399 | if now.Before(e.lastActive.Add(1 * time.Millisecond)) { 400 | if atomic.CompareAndSwapUint64(&e.endpointState, closed, closed) { 401 | e.endpointClosed = 1 // note close happened, but don't close yet. 402 | } 403 | runtime.Gosched() // 0> 1 425 | if updated != 0 && updated <= stale { 426 | emit = false 427 | } 428 | } 429 | if emit && !foreach(item, nil, false) { 430 | atomic.StoreUint64(&e.endpointState, canceled) 431 | } 432 | if atomic.LoadUint64(&e.endpointState) == canceled { 433 | atomic.StoreUint64(&e.cursor, parked) 434 | return 435 | } 436 | } 437 | e.lastActive = time.Now() 438 | } 439 | } 440 | 441 | //jig:template Endpoint Cancel 442 | //jig:needs Endpoint 443 | 444 | // Cancel cancels the endpoint, making it available to be reused when 445 | // NewEndpoint is called on the channel. When canceled the foreach function 446 | // passed to Range is not notified, instead just never called again. 447 | func (e *EndpointFoo) Cancel() { 448 | atomic.CompareAndSwapUint64(&e.endpointState, active, canceled) 449 | e.receivers.Broadcast() 450 | } 451 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/reactivego/multicast 2 | 3 | go 1.14 4 | 5 | require github.com/stretchr/testify v1.6.0 6 | -------------------------------------------------------------------------------- /multicast.go: -------------------------------------------------------------------------------- 1 | // Code generated by jig; DO NOT EDIT. 2 | 3 | //go:generate jig 4 | 5 | package multicast 6 | 7 | import ( 8 | "fmt" 9 | "math" 10 | "runtime" 11 | "sync" 12 | "sync/atomic" 13 | "time" 14 | ) 15 | 16 | //jig:name ChanPadding 17 | 18 | const _PADDING = 1 // 0 turns padding off, 1 turns it on. 19 | 20 | const _EXTRA_PADDING = 0 * 64 // multiples of 64, benefits inconclusive. 21 | 22 | type pad60 [_PADDING * (_EXTRA_PADDING + 60)]byte 23 | 24 | type pad56 [_PADDING * (_EXTRA_PADDING + 56)]byte 25 | 26 | type pad48 [_PADDING * (_EXTRA_PADDING + 48)]byte 27 | 28 | type pad40 [_PADDING * (_EXTRA_PADDING + 40)]byte 29 | 30 | type pad32 [_PADDING * (_EXTRA_PADDING + 32)]byte 31 | 32 | //jig:name ChanState 33 | 34 | // Activity of committer 35 | const ( 36 | resting uint32 = iota 37 | working 38 | ) 39 | 40 | // Activity of endpoints 41 | const ( 42 | idling uint32 = iota 43 | enumerating 44 | creating 45 | ) 46 | 47 | // State of endpoint and channel 48 | const ( 49 | active uint64 = iota 50 | canceled 51 | closed 52 | ) 53 | 54 | // Cursor is parked so it does not influence advancing the commit index. 55 | const ( 56 | parked uint64 = math.MaxUint64 57 | ) 58 | 59 | const ( 60 | // ReplayAll can be passed to NewEndpoint to retain as many of the 61 | // previously sent messages as possible that are still in the buffer. 62 | ReplayAll uint64 = math.MaxUint64 63 | ) 64 | 65 | //jig:name Chan 66 | 67 | // Chan is a fast, concurrent multi-(casting,sending,receiving) buffered 68 | // channel. It is implemented using only sync/atomic operations. Spinlocks using 69 | // runtime.Gosched() are used in situations where goroutines are waiting or 70 | // contending for resources. 71 | type Chan struct { 72 | buffer []interface{} 73 | _________a pad40 74 | begin uint64 75 | _________b pad56 76 | end uint64 77 | _________c pad56 78 | commit uint64 79 | _________d pad56 80 | mod uint64 81 | _________e pad56 82 | endpoints endpoints 83 | 84 | err error 85 | ____________f pad48 86 | channelState uint64 // active, closed 87 | ____________g pad56 88 | 89 | write uint64 90 | _________________h pad56 91 | start time.Time 92 | _________________i pad40 93 | written []int64 // nanoseconds since start 94 | _________________j pad40 95 | committerActivity uint32 // resting, working 96 | _________________k pad60 97 | 98 | receivers *sync.Cond 99 | _________________l pad56 100 | } 101 | 102 | type endpoints struct { 103 | entry []Endpoint 104 | len uint32 105 | endpointsActivity uint32 // idling, enumerating, creating 106 | ________ pad32 107 | } 108 | 109 | //jig:name ChannelError 110 | 111 | type ChannelError string 112 | 113 | func (e ChannelError) Error() string { return string(e) } 114 | 115 | //jig:name ErrOutOfEndpoints 116 | 117 | // ErrOutOfEndpoints is returned by NewEndpoint when the maximum number of 118 | // endpoints has already been created. 119 | const ErrOutOfEndpoints = ChannelError("out of endpoints") 120 | 121 | //jig:name endpoints 122 | 123 | func (e *endpoints) NewForChan(c *Chan, keep uint64) (*Endpoint, error) { 124 | for !atomic.CompareAndSwapUint32(&e.endpointsActivity, idling, creating) { 125 | runtime.Gosched() 126 | } 127 | defer atomic.StoreUint32(&e.endpointsActivity, idling) 128 | var start uint64 129 | commit := c.commitData() 130 | begin := atomic.LoadUint64(&c.begin) 131 | if commit-begin <= keep { 132 | start = begin 133 | } else { 134 | start = commit - keep 135 | } 136 | if int(e.len) == len(e.entry) { 137 | for index := uint32(0); index < e.len; index++ { 138 | ep := &e.entry[index] 139 | if atomic.CompareAndSwapUint64(&ep.cursor, parked, start) { 140 | ep.endpointState = atomic.LoadUint64(&c.channelState) 141 | ep.lastActive = time.Now() 142 | return ep, nil 143 | } 144 | } 145 | return nil, ErrOutOfEndpoints 146 | } 147 | ep := &e.entry[e.len] 148 | ep.Chan = c 149 | ep.cursor = start 150 | ep.endpointState = atomic.LoadUint64(&c.channelState) 151 | ep.lastActive = time.Now() 152 | e.len++ 153 | return ep, nil 154 | } 155 | 156 | func (e *endpoints) Access(access func(*endpoints)) bool { 157 | contention := false 158 | for !atomic.CompareAndSwapUint32(&e.endpointsActivity, idling, enumerating) { 159 | runtime.Gosched() 160 | contention = true 161 | } 162 | access(e) 163 | atomic.StoreUint32(&e.endpointsActivity, idling) 164 | return !contention 165 | } 166 | 167 | //jig:name NewChan 168 | 169 | // NewChan creates a new channel. The parameters bufferCapacity and 170 | // endpointCapacity determine the size of the message buffer and maximum 171 | // number of concurrent receiving endpoints respectively. 172 | // 173 | // Note that bufferCapacity is always scaled up to a power of 2 so e.g. 174 | // specifying 400 will create a buffer of 512 (2^9). Also because of this a 175 | // bufferCapacity of 0 is scaled up to 1 (2^0). 176 | func NewChan(bufferCapacity int, endpointCapacity int) *Chan { 177 | 178 | size := uint64(1) << uint(math.Ceil(math.Log2(float64(bufferCapacity)))) 179 | c := &Chan{ 180 | end: size, 181 | mod: size - 1, 182 | buffer: make([]interface{}, size), 183 | start: time.Now(), 184 | written: make([]int64, size), 185 | endpoints: endpoints{ 186 | entry: make([]Endpoint, endpointCapacity), 187 | }, 188 | } 189 | c.receivers = sync.NewCond(c) 190 | return c 191 | } 192 | 193 | // Lock, empty method so we can pass *Chan to sync.NewCond as a Locker. 194 | func (c *Chan) Lock() {} 195 | 196 | // Unlock, empty method so we can pass *Chan to sync.NewCond as a Locker. 197 | func (c *Chan) Unlock() {} 198 | 199 | //jig:name Endpoint 200 | 201 | // Endpoint is returned by a call to NewEndpoint on the channel. Every 202 | // endpoint should be used by only a single goroutine, so no sharing between 203 | // goroutines. 204 | type Endpoint struct { 205 | *Chan 206 | _____________a pad56 207 | cursor uint64 208 | _____________b pad56 209 | endpointState uint64 // active, canceled, closed 210 | _____________c pad56 211 | lastActive time.Time // track activity to deterime when to sleep 212 | _____________d pad40 213 | endpointClosed uint64 // active, closed 214 | _____________e pad56 215 | } 216 | 217 | //jig:name Chan_commitData 218 | 219 | func (c *Chan) commitData() uint64 { 220 | commit := atomic.LoadUint64(&c.commit) 221 | if commit >= atomic.LoadUint64(&c.write) { 222 | return commit 223 | } 224 | if !atomic.CompareAndSwapUint32(&c.committerActivity, resting, working) { 225 | return commit 226 | } 227 | commit = atomic.LoadUint64(&c.commit) 228 | newcommit := commit 229 | for ; atomic.LoadInt64(&c.written[newcommit&c.mod])&1 == 1; newcommit++ { 230 | atomic.AddInt64(&c.written[newcommit&c.mod], -1) 231 | if newcommit >= atomic.LoadUint64(&c.end) { 232 | break 233 | } 234 | } 235 | write := atomic.LoadUint64(&c.write) 236 | if newcommit > write { 237 | panic(fmt.Sprintf("commitData: range error (commit=%d,write=%d,newcommit=%d)", commit, write, newcommit)) 238 | } 239 | if newcommit > commit { 240 | if !atomic.CompareAndSwapUint64(&c.commit, commit, newcommit) { 241 | panic(fmt.Sprintf("commitData; swap error (c.commit=%d,%d,%d)", c.commit, commit, newcommit)) 242 | } 243 | c.receivers.Broadcast() 244 | } 245 | atomic.StoreUint32(&c.committerActivity, resting) 246 | return atomic.LoadUint64(&c.commit) 247 | } 248 | 249 | //jig:name Chan_slideBuffer 250 | 251 | func (c *Chan) slideBuffer() bool { 252 | slowestCursor := parked 253 | spinlock := c.endpoints.Access(func(endpoints *endpoints) { 254 | for i := uint32(0); i < endpoints.len; i++ { 255 | cursor := atomic.LoadUint64(&endpoints.entry[i].cursor) 256 | if cursor < slowestCursor { 257 | slowestCursor = cursor 258 | } 259 | } 260 | if atomic.LoadUint64(&c.begin) < slowestCursor && slowestCursor <= atomic.LoadUint64(&c.end) { 261 | if c.mod < 16 { 262 | atomic.AddUint64(&c.begin, 1) 263 | atomic.AddUint64(&c.end, 1) 264 | } else { 265 | atomic.StoreUint64(&c.begin, slowestCursor) 266 | atomic.StoreUint64(&c.end, slowestCursor+c.mod+1) 267 | } 268 | } else { 269 | slowestCursor = parked 270 | } 271 | }) 272 | if slowestCursor == parked { 273 | if spinlock { 274 | runtime.Gosched() 275 | } 276 | if atomic.LoadUint64(&c.channelState) != active { 277 | return false 278 | } 279 | } 280 | return true 281 | } 282 | 283 | //jig:name Chan_FastSend 284 | 285 | // FastSend can be used to send values to the channel from a SINGLE goroutine. 286 | // Also, this does not record the time a message was sent, so the maxAge value 287 | // passed to Range will be ignored. 288 | // 289 | // Note, that when the number of unread messages has reached bufferCapacity, then 290 | // the call to FastSend will block until the slowest Endpoint has read another 291 | // message. 292 | func (c *Chan) FastSend(value interface{}) { 293 | for c.commit == c.end { 294 | if !c.slideBuffer() { 295 | return 296 | } 297 | } 298 | c.buffer[c.commit&c.mod] = value 299 | atomic.AddUint64(&c.commit, 1) 300 | c.receivers.Broadcast() 301 | } 302 | 303 | //jig:name Chan_Send 304 | 305 | // Send can be used by concurrent goroutines to send values to the channel. 306 | // 307 | // Note, that when the number of unread messages has reached bufferCapacity, then 308 | // the call to Send will block until the slowest Endpoint has read another 309 | // message. 310 | func (c *Chan) Send(value interface{}) { 311 | write := atomic.AddUint64(&c.write, 1) - 1 312 | for write >= atomic.LoadUint64(&c.end) { 313 | if !c.slideBuffer() { 314 | return 315 | } 316 | } 317 | c.buffer[write&c.mod] = value 318 | updated := time.Since(c.start).Nanoseconds() 319 | if updated == 0 { 320 | panic("clock failure; zero duration measured") 321 | } 322 | atomic.StoreInt64(&c.written[write&c.mod], updated<<1+1) 323 | c.receivers.Broadcast() 324 | } 325 | 326 | //jig:name Chan_Close 327 | 328 | // Close will close the channel. Pass in an error or nil. Endpoints continue to 329 | // receive data until the buffer is empty. Only then will the close notification 330 | // be delivered to the Range function. 331 | func (c *Chan) Close(err error) { 332 | if atomic.CompareAndSwapUint64(&c.channelState, active, closed) { 333 | c.err = err 334 | c.endpoints.Access(func(endpoints *endpoints) { 335 | for i := uint32(0); i < endpoints.len; i++ { 336 | atomic.CompareAndSwapUint64(&endpoints.entry[i].endpointState, active, closed) 337 | } 338 | }) 339 | } 340 | c.receivers.Broadcast() 341 | } 342 | 343 | //jig:name Chan_Closed 344 | 345 | // Closed returns true when the channel was closed using the Close method. 346 | func (c *Chan) Closed() bool { 347 | return atomic.LoadUint64(&c.channelState) >= closed 348 | } 349 | 350 | //jig:name Chan_NewEndpoint 351 | 352 | // NewEndpoint will create a new channel endpoint that can be used to receive 353 | // from the channel. The argument keep specifies how many entries of the 354 | // existing channel buffer to keep. 355 | // 356 | // After Close is called on the channel, any endpoints created after that 357 | // will still receive the number of messages as indicated in the keep parameter 358 | // and then subsequently the close. 359 | // 360 | // An endpoint that is canceled or read until it is exhausted (after channel was 361 | // closed) will be reused by NewEndpoint. 362 | func (c *Chan) NewEndpoint(keep uint64) (*Endpoint, error) { 363 | return c.endpoints.NewForChan(c, keep) 364 | } 365 | 366 | //jig:name Endpoint_Range 367 | 368 | // Range will call the passed in foreach function with all the messages in 369 | // the buffer, followed by all the messages received. When the foreach function 370 | // returns true Range will continue, when you return false this is the same as 371 | // calling Cancel. When canceled the foreach will never be called again. 372 | // Passing a maxAge duration other than 0 will skip messages that are older 373 | // than maxAge. 374 | // 375 | // When the channel is closed, eventually when the buffer is exhausted the close 376 | // with optional error will be notified by calling foreach one last time with 377 | // the closed parameter set to true. 378 | func (e *Endpoint) Range(foreach func(value interface{}, err error, closed bool) bool, maxAge time.Duration) { 379 | e.lastActive = time.Now() 380 | for { 381 | commit := e.commitData() 382 | for ; e.cursor == commit; commit = e.commitData() { 383 | if atomic.CompareAndSwapUint64(&e.endpointState, canceled, canceled) { 384 | atomic.StoreUint64(&e.cursor, parked) 385 | return 386 | } 387 | if atomic.LoadUint64(&e.commit) < atomic.LoadUint64(&e.write) { 388 | if e.endpointClosed == 1 { 389 | panic(fmt.Sprintf("data written after closing endpoint; commit(%d) write(%d)", 390 | atomic.LoadUint64(&e.commit), atomic.LoadUint64(&e.write))) 391 | } 392 | runtime.Gosched() 393 | e.lastActive = time.Now() 394 | } else { 395 | now := time.Now() 396 | if now.Before(e.lastActive.Add(1 * time.Millisecond)) { 397 | if atomic.CompareAndSwapUint64(&e.endpointState, closed, closed) { 398 | e.endpointClosed = 1 399 | } 400 | runtime.Gosched() 401 | } else if now.Before(e.lastActive.Add(250 * time.Millisecond)) { 402 | if atomic.CompareAndSwapUint64(&e.endpointState, closed, closed) { 403 | var zero interface{} 404 | foreach(zero, e.err, true) 405 | atomic.StoreUint64(&e.cursor, parked) 406 | return 407 | } 408 | runtime.Gosched() 409 | } else { 410 | e.receivers.Wait() 411 | e.lastActive = time.Now() 412 | } 413 | } 414 | } 415 | 416 | for ; e.cursor != commit; atomic.AddUint64(&e.cursor, 1) { 417 | item := e.buffer[e.cursor&e.mod] 418 | emit := true 419 | if maxAge != 0 { 420 | stale := time.Since(e.start).Nanoseconds() - maxAge.Nanoseconds() 421 | updated := atomic.LoadInt64(&e.written[e.cursor&e.mod]) >> 1 422 | if updated != 0 && updated <= stale { 423 | emit = false 424 | } 425 | } 426 | if emit && !foreach(item, nil, false) { 427 | atomic.StoreUint64(&e.endpointState, canceled) 428 | } 429 | if atomic.LoadUint64(&e.endpointState) == canceled { 430 | atomic.StoreUint64(&e.cursor, parked) 431 | return 432 | } 433 | } 434 | e.lastActive = time.Now() 435 | } 436 | } 437 | 438 | //jig:name Endpoint_Cancel 439 | 440 | // Cancel cancels the endpoint, making it available to be reused when 441 | // NewEndpoint is called on the channel. When canceled the foreach function 442 | // passed to Range is not notified, instead just never called again. 443 | func (e *Endpoint) Cancel() { 444 | atomic.CompareAndSwapUint64(&e.endpointState, active, canceled) 445 | e.receivers.Broadcast() 446 | } 447 | -------------------------------------------------------------------------------- /multicast_jig.go: -------------------------------------------------------------------------------- 1 | // This file guides regeneration of the heterogeneous multicast package in 2 | // this folder. The [jig tool](https://github.com/reactivego/jig) will generate 3 | // multicast.go guided by the code used in the require function. 4 | 5 | // +build ignore 6 | 7 | package multicast 8 | 9 | import _ "github.com/reactivego/multicast/generic" 10 | 11 | func require() { 12 | c := NewChan(0, 0) 13 | c.FastSend(nil) 14 | c.Send(nil) 15 | c.Close(nil) 16 | c.Closed() 17 | e, _ := c.NewEndpoint(ReplayAll) 18 | e.Range(func(value interface{}, err error, closed bool) bool{ return false }, 0) 19 | e.Cancel() 20 | } 21 | -------------------------------------------------------------------------------- /multicast_test.go: -------------------------------------------------------------------------------- 1 | package multicast_test 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | 7 | "github.com/reactivego/multicast" 8 | ) 9 | 10 | func Example_fastSend1x2() { 11 | ch := multicast.NewChan(128, 2) 12 | 13 | // FastSend allows only a single goroutine sending and does not store 14 | // timestamps with messages. 15 | 16 | ch.FastSend("Hello") 17 | ch.FastSend("World!") 18 | ch.Close(nil) 19 | if ch.Closed() { 20 | fmt.Println("channel closed") 21 | } 22 | 23 | print := func(value interface{}, err error, closed bool) bool { 24 | switch { 25 | case !closed: 26 | fmt.Println(value) 27 | case err != nil: 28 | fmt.Println(err) 29 | default: 30 | fmt.Println("closed") 31 | } 32 | return true 33 | } 34 | 35 | var wg sync.WaitGroup 36 | wg.Add(2) 37 | ep1, _ := ch.NewEndpoint(multicast.ReplayAll) 38 | go func() { 39 | ep1.Range(print, 0) 40 | wg.Done() 41 | }() 42 | 43 | ep2, _ := ch.NewEndpoint(multicast.ReplayAll) 44 | go func() { 45 | ep2.Range(print, 0) 46 | wg.Done() 47 | }() 48 | wg.Wait() 49 | 50 | // Unordered Output: 51 | // channel closed 52 | // Hello 53 | // Hello 54 | // World! 55 | // World! 56 | // closed 57 | // closed 58 | } 59 | 60 | func Example_send2x2() { 61 | ch := multicast.NewChan(128, 2) 62 | 63 | // Send suppports multiple goroutine sending and stores a timestamp with 64 | // every message sent. 65 | 66 | var wgs sync.WaitGroup 67 | wgs.Add(2) 68 | go func() { 69 | ch.Send("Hello") 70 | wgs.Done() 71 | }() 72 | go func() { 73 | ch.Send("World!") 74 | wgs.Done() 75 | }() 76 | 77 | print := func(value interface{}, err error, closed bool) bool { 78 | switch { 79 | case !closed: 80 | fmt.Println(value) 81 | case err != nil: 82 | fmt.Println(err) 83 | default: 84 | fmt.Println("closed") 85 | } 86 | return true 87 | } 88 | 89 | var wgr sync.WaitGroup 90 | wgr.Add(2) 91 | ep1, _ := ch.NewEndpoint(multicast.ReplayAll) 92 | go func() { 93 | ep1.Range(print, 0) 94 | wgr.Done() 95 | }() 96 | 97 | ep2, _ := ch.NewEndpoint(multicast.ReplayAll) 98 | go func() { 99 | ep2.Range(print, 0) 100 | wgr.Done() 101 | }() 102 | 103 | wgs.Wait() 104 | ch.Close(nil) 105 | if ch.Closed() { 106 | fmt.Println("channel closed") 107 | } 108 | wgr.Wait() 109 | 110 | // Unordered Output: 111 | // Hello 112 | // Hello 113 | // World! 114 | // World! 115 | // closed 116 | // closed 117 | // channel closed 118 | } 119 | -------------------------------------------------------------------------------- /svg/godev.svg: -------------------------------------------------------------------------------- 1 | go.devgo.devreferencereference -------------------------------------------------------------------------------- /svg/godoc.svg: -------------------------------------------------------------------------------- 1 | godocgodocreferencereference -------------------------------------------------------------------------------- /test/README.md: -------------------------------------------------------------------------------- 1 | # test 2 | 3 | import "github.com/reactivego/multicast/test" 4 | 5 | [![](../svg/godev.svg)](https://pkg.go.dev/github.com/reactivego/multicast/test?tab=doc) 6 | [![](../svg/godoc.svg)](http://godoc.org/github.com/reactivego/multicast/test) 7 | 8 | Package `test` provides examples, tests and benchmarks for the multicast channel (specialized on type int). 9 | 10 | To run benchmarks for the channel several times, use the following command: 11 | 12 | ```bash 13 | go run github.com/reactivego/generics/cmd/jig 14 | go test -run=XXX -bench=Chan -cpu=1,2,3,4,5,6,7,8 -timeout=1h -count=10 15 | ``` 16 | 17 | ## Benchmarks 18 | 19 | As always, benchmarks should be taken with a grain of salt. When comparing 20 | the performance of our channel implementation against Go's native channels, 21 | there are issues with feature mismatch. These channels have very 22 | different semantics and strenghts. I've tried to create benchmarks that 23 | perform the same amount of work in both implementations. 24 | 25 | Initially I used interface{} as the message type. Then, later switched to 26 | int after I converted the library to a generics library and I could generate 27 | for any type. Speedwise there is not much difference between using either 28 | int or interface{}. Benchmark results are for the int type though. 29 | 30 | ## Fan-out 31 | 32 | A fan-out configuration (buffer capacity 512) where a single sender is 33 | transmitting int values to multiple receivers where messages are multicasted 34 | so every receiver receives the same set of messages performs as follows: 35 | 36 | ```bash 37 | $ go test -run=XXX -bench=FanOut.Chan -cpu=1,2,3,4,5,6,7,8 38 | goos: darwin 39 | goarch: amd64 40 | pkg: github.com/reactivego/multicast/test 41 | BenchmarkFanOut_Chan_1xN 30000000 38.3 ns/op 42 | BenchmarkFanOut_Chan_1xN-2 50000000 34.7 ns/op 43 | BenchmarkFanOut_Chan_1xN-3 50000000 27.3 ns/op 44 | BenchmarkFanOut_Chan_1xN-4 50000000 31.6 ns/op 45 | BenchmarkFanOut_Chan_1xN-5 50000000 30.4 ns/op 46 | BenchmarkFanOut_Chan_1xN-6 50000000 29.5 ns/op 47 | BenchmarkFanOut_Chan_1xN-7 50000000 27.9 ns/op 48 | BenchmarkFanOut_Chan_1xN-8 50000000 27.2 ns/op 49 | PASS 50 | ok github.com/reactivego/multicast/test 11.880s 51 | ``` 52 | 53 | From the results we can see that even for 8 concurrent receivers, the 54 | receiving goroutines are not contending for access to the data. 55 | 56 | The same configuration, but implemented using multiple parallel native Go 57 | channels. Since Go doesn't support multicasting to multiple receivers from 58 | a single channel. Our multiple channels assembly gives the following result: 59 | 60 | ```bash 61 | $ go test -run=XXX -bench=FanOut.Go -cpu=1,2,3,4,5,6,7,8 62 | goos: darwin 63 | goarch: amd64 64 | pkg: github.com/reactivego/multicast/test 65 | BenchmarkFanOut_Go_1xN 20000000 61.0 ns/op 66 | BenchmarkFanOut_Go_1xN-2 20000000 86.9 ns/op 67 | BenchmarkFanOut_Go_1xN-3 20000000 99.8 ns/op 68 | BenchmarkFanOut_Go_1xN-4 20000000 115 ns/op 69 | BenchmarkFanOut_Go_1xN-5 10000000 182 ns/op 70 | BenchmarkFanOut_Go_1xN-6 10000000 197 ns/op 71 | BenchmarkFanOut_Go_1xN-7 10000000 204 ns/op 72 | BenchmarkFanOut_Go_1xN-8 10000000 210 ns/op 73 | PASS 74 | ok github.com/reactivego/multicast/test 16.440s 75 | ``` 76 | 77 | What we see here is the sender slowing down as it is pumping the same 78 | information into increasingly more separate channels. 79 | 80 | ## Fan-in 81 | 82 | A fan-in configuration (buffer capacity 512) where multiple senders are 83 | transmitting int values to a single receiver. Messages are merged in 84 | arbitrary order and all delivered to the receiver. 85 | 86 | ```bash 87 | $ go test -run=XXX -bench=FanIn.Chan -cpu=1,2,3,4,5,6,7,8 88 | goos: darwin 89 | goarch: amd64 90 | pkg: github.com/reactivego/multicast/test 91 | BenchmarkFanIn_Chan_Nx1 20000000 78.0 ns/op 92 | BenchmarkFanIn_Chan_Nx1-2 20000000 89.6 ns/op 93 | BenchmarkFanIn_Chan_Nx1-3 20000000 75.2 ns/op 94 | BenchmarkFanIn_Chan_Nx1-4 20000000 75.5 ns/op 95 | BenchmarkFanIn_Chan_Nx1-5 20000000 68.1 ns/op 96 | BenchmarkFanIn_Chan_Nx1-6 20000000 65.0 ns/op 97 | BenchmarkFanIn_Chan_Nx1-7 20000000 63.7 ns/op 98 | BenchmarkFanIn_Chan_Nx1-8 20000000 68.7 ns/op 99 | PASS 100 | ok github.com/reactivego/multicast/test 31.698s 101 | ``` 102 | 103 | I really had to work hard to get performance to an acceptable level. Started 104 | out an order of magnitude slower than native Go, as the amount of contention 105 | for goroutines trying to gain write access to the channel was crippling 106 | performance. Eventually, I changed the solution to hand out write slots to the 107 | concurrent senders and have one of the receiver goroutines consolidate and 108 | commit the data by looking for contiguous sequences of slots marked as 109 | updated by their sender goroutines. So data is ordered on slot hand-out time 110 | not on actual data write time. But, that fits with the semantics you'd expect 111 | of a concurrent sender channel, so all in all it's a good approach. 112 | 113 | For native Go, the implementation was straight forward as merging message 114 | streams of multiple concurrent senders is standard Go channel functionality. 115 | The results for Go for 1 to 8 concurrent senders and a single receiver are 116 | as follows: 117 | 118 | ```bash 119 | $ go test -run=XXX -bench=FanIn.Go -cpu=1,2,3,4,5,6,7,8 120 | goos: darwin 121 | goarch: amd64 122 | pkg: github.com/reactivego/multicast/test 123 | BenchmarkFanIn_Go_Nx1 20000000 72.9 ns/op 124 | BenchmarkFanIn_Go_Nx1-2 20000000 115 ns/op 125 | BenchmarkFanIn_Go_Nx1-3 20000000 117 ns/op 126 | BenchmarkFanIn_Go_Nx1-4 10000000 133 ns/op 127 | BenchmarkFanIn_Go_Nx1-5 10000000 146 ns/op 128 | BenchmarkFanIn_Go_Nx1-6 10000000 169 ns/op 129 | BenchmarkFanIn_Go_Nx1-7 10000000 184 ns/op 130 | BenchmarkFanIn_Go_Nx1-8 10000000 203 ns/op 131 | PASS 132 | ok github.com/reactivego/multicast/test 28.924s 133 | ``` 134 | 135 | Go natively supports fan-in, so its performance was very good! However, for 136 | the higher sender counts the performance drops off quite sharply, whereas our 137 | implementation using the 'write slot handout' approach performs much better. 138 | 139 | ## Fan-In-Out 140 | 141 | This benchmark is only implemented for our channel implemenation. It is not 142 | possible to implement this using Go native channels in a very effective way. 143 | 144 | What we are benchmarking here is multiple (N) senders concurrently sending 145 | on the channel. The streams of messages are merged into a single stream which 146 | is then multicasted to N concurrent receivers. 147 | 148 | ```bash 149 | $ go test -run=XXX -bench=FanInOut -cpu=1,2,3,4,5,6,7,8 150 | goos: darwin 151 | goarch: amd64 152 | pkg: github.com/reactivego/multicast/test 153 | BenchmarkFanInOut_Chan_NxN 20000000 77.4 ns/op 154 | BenchmarkFanInOut_Chan_NxN-2 20000000 99.2 ns/op 155 | BenchmarkFanInOut_Chan_NxN-3 20000000 103 ns/op 156 | BenchmarkFanInOut_Chan_NxN-4 20000000 101 ns/op 157 | BenchmarkFanInOut_Chan_NxN-5 20000000 96.2 ns/op 158 | BenchmarkFanInOut_Chan_NxN-6 20000000 93.8 ns/op 159 | BenchmarkFanInOut_Chan_NxN-7 20000000 94.8 ns/op 160 | BenchmarkFanInOut_Chan_NxN-8 20000000 97.8 ns/op 161 | PASS 162 | ok github.com/reactivego/multicast/test 35.091s 163 | ``` 164 | -------------------------------------------------------------------------------- /test/benchmark_test.go: -------------------------------------------------------------------------------- 1 | // go test -run=XXX -bench=Chan -cpu=1,2,3,4,5,6,7,8 -timeout=1h -count=10 2 | 3 | package test 4 | 5 | import ( 6 | "math/rand" 7 | "runtime" 8 | "sync" 9 | "sync/atomic" 10 | "testing" 11 | "time" 12 | ) 13 | 14 | func BenchmarkFanInOut_Chan_NxN(b *testing.B) { 15 | PAR := runtime.GOMAXPROCS(0) 16 | NUM := b.N 17 | 18 | expect := rand.Perm(NUM) 19 | var count = uint64(NUM) 20 | var sum = int64(NUM * (NUM - 1) / 2) 21 | 22 | b.ResetTimer() 23 | start := time.Now() 24 | channel := NewChanInt(BUFSIZE, PAR) 25 | 26 | var rwgbegin, rwgend sync.WaitGroup 27 | receiver := func() { 28 | ep, err := channel.NewEndpoint(ReplayAll) 29 | rwgend.Add(1) 30 | rwgbegin.Done() 31 | if err != nil { 32 | b.Error(err) 33 | rwgend.Done() 34 | return 35 | } 36 | var rcount uint64 37 | var rsum int64 38 | ep.Range(func(value int, err error, closed bool) bool { 39 | if !closed { 40 | rsum += int64(value) 41 | rcount++ 42 | } 43 | return true 44 | }, 0) 45 | if count != rcount { 46 | b.Errorf("receiver: rcount(%d) != count(%d)", rcount, count /*, "ep=%+v" ep*/) 47 | } else if sum != rsum { 48 | b.Errorf("receiver: rsum(%d) != sum(%d)", rsum, sum) 49 | } 50 | rwgend.Done() 51 | } 52 | rwgbegin.Add(PAR) 53 | for i := 0; i < PAR; i++ { 54 | go receiver() 55 | } 56 | rwgbegin.Wait() 57 | 58 | var scount uint64 59 | var ssum int64 60 | b.RunParallel(func(pb *testing.PB) { 61 | for pb.Next() { 62 | index := atomic.AddUint64(&scount, 1) - 1 63 | channel.Send(expect[index]) 64 | atomic.AddInt64(&ssum, int64(expect[index])) 65 | } 66 | }) 67 | if sum != ssum { 68 | b.Errorf("sender: ssum(%d) != sum(%d)", ssum, sum) 69 | } 70 | channel.Close(nil) 71 | rwgend.Wait() 72 | 73 | nps := time.Now().Sub(start).Nanoseconds() / int64(NUM) 74 | // b.Logf("%dx%d, %d msg(s), %d ns/send, %.1fM msgs/sec", PAR, PAR, NUM, nps, 1.0e03/float64(nps)) 75 | _ = nps 76 | } 77 | 78 | func BenchmarkFanIn_Chan_Nx1(b *testing.B) { 79 | NUM := b.N 80 | 81 | expect := rand.Perm(NUM) 82 | var count = uint64(NUM) 83 | var sum = int64(NUM * (NUM - 1) / 2) 84 | 85 | b.ResetTimer() 86 | start := time.Now() 87 | 88 | channel := NewChanInt(BUFSIZE, 1) 89 | 90 | var rwgbegin, rwgend sync.WaitGroup 91 | receiver := func() { 92 | ep, err := channel.NewEndpoint(ReplayAll) 93 | rwgend.Add(1) 94 | rwgbegin.Done() 95 | if err != nil { 96 | b.Error(err) 97 | rwgend.Done() 98 | return 99 | } 100 | var rcount uint64 101 | var rsum int64 102 | ep.Range(func(value int, err error, closed bool) bool { 103 | if !closed { 104 | rsum += int64(value) 105 | rcount++ 106 | } 107 | return true 108 | }, 0) 109 | if count != rcount { 110 | b.Errorf("receiver: rcount(%d) != count(%d)", rcount, count /*, "ep=%+v" ep*/) 111 | } else if sum != rsum { 112 | b.Errorf("receiver: rsum(%d) != sum(%d)", rsum, sum) 113 | } 114 | rwgend.Done() 115 | } 116 | rwgbegin.Add(1) 117 | go receiver() 118 | rwgbegin.Wait() 119 | 120 | var scount uint64 121 | var ssum int64 122 | b.RunParallel(func(pb *testing.PB) { 123 | for pb.Next() { 124 | index := atomic.AddUint64(&scount, 1) - 1 125 | channel.Send(expect[index]) 126 | atomic.AddInt64(&ssum, int64(expect[index])) 127 | } 128 | }) 129 | if sum != ssum { 130 | b.Errorf("sender: ssum(%d) != sum(%d)", ssum, sum) 131 | } 132 | channel.Close(nil) 133 | 134 | rwgend.Wait() 135 | 136 | nps := time.Now().Sub(start).Nanoseconds() / int64(NUM) 137 | // b.Logf("%dx1, %d msg(s), %d ns/send, %.1fM msgs/sec", runtime.GOMAXPROCS(0), NUM, nps, 1.0e03/float64(nps)) 138 | _ = nps 139 | } 140 | 141 | func BenchmarkFanIn_Go_Nx1(b *testing.B) { 142 | NUM := b.N 143 | 144 | expect := rand.Perm(NUM) 145 | var count = uint64(NUM) 146 | var sum = int64(NUM * (NUM - 1) / 2) 147 | 148 | b.ResetTimer() 149 | start := time.Now() 150 | 151 | c := make(chan int, BUFSIZE) 152 | 153 | wait := make(chan struct{}) 154 | go func() { 155 | var rcount uint64 156 | var rsum int64 157 | for value := range c { 158 | rsum += int64(value) 159 | rcount++ 160 | } 161 | if count != rcount { 162 | b.Errorf("receiver: rcount(%d) != count(%d)", rcount, count) 163 | } else if sum != rsum { 164 | b.Errorf("receiver: rsum(%d) != sum(%d)", rsum, sum) 165 | } 166 | close(wait) 167 | }() 168 | 169 | var scount uint64 170 | var ssum int64 171 | b.RunParallel(func(pb *testing.PB) { 172 | for pb.Next() { 173 | index := atomic.AddUint64(&scount, 1) - 1 174 | c <- expect[index] 175 | atomic.AddInt64(&ssum, int64(expect[index])) 176 | } 177 | }) 178 | if sum != ssum { 179 | b.Errorf("sender: ssum(%d) != sum(%d)", ssum, sum) 180 | } 181 | 182 | close(c) 183 | <-wait 184 | 185 | nps := time.Now().Sub(start).Nanoseconds() / int64(NUM) 186 | // b.Logf("%dx1, %d msg(s), %d ns/send, %.1fM msgs/sec", runtime.GOMAXPROCS(0), NUM, nps, 1.0e03/float64(nps)) 187 | _ = nps 188 | } 189 | 190 | func BenchmarkFanOut_Chan_1xN(b *testing.B) { 191 | PAR := runtime.GOMAXPROCS(0) 192 | NUM := b.N 193 | 194 | start := time.Now() 195 | 196 | c := NewChanInt(BUFSIZE, PAR) 197 | 198 | var rwg sync.WaitGroup 199 | rwg.Add(PAR) 200 | 201 | wait := make(chan struct{}) 202 | go func() { 203 | rwg.Wait() 204 | count := 0 205 | for !c.Closed() { 206 | c.FastSend(count) 207 | count++ 208 | } 209 | close(wait) 210 | }() 211 | 212 | var rcount int64 213 | b.RunParallel(func(pb *testing.PB) { 214 | ep, err := c.NewEndpoint(0) 215 | rwg.Done() 216 | if err != nil { 217 | b.Error(err) 218 | return 219 | } 220 | var sum, count int64 221 | ep.Range(func(value int, err error, closed bool) bool { 222 | if !closed && pb.Next() { 223 | atomic.AddInt64(&rcount, 1) 224 | sum += int64(value) 225 | count++ 226 | expectedSum := int64(count) * int64(count-1) / 2 227 | if sum != expectedSum { 228 | b.Errorf("data corruption at count == %d ; expected == %d got sum == %d", count, expectedSum, sum) 229 | } 230 | return true 231 | } 232 | return false 233 | }, 0) 234 | }) 235 | 236 | c.Close(nil) 237 | <-wait 238 | 239 | if rcount != int64(NUM) { 240 | b.Errorf("data loss; expected %d messages got %d", NUM, rcount) 241 | } 242 | 243 | nps := time.Now().Sub(start).Nanoseconds() / int64(NUM) 244 | // b.Logf("1x%d, %d msg(s), %d ns/send, %.1fM msgs/sec", PAR, NUM, nps, 1.0e03/float64(nps)) 245 | _ = nps 246 | } 247 | 248 | func BenchmarkFanOut_Go_1xN(b *testing.B) { 249 | PAR := runtime.GOMAXPROCS(0) 250 | NUMREF := b.N 251 | NUM := NUMREF / PAR 252 | if NUM == 0 { 253 | NUM = 1 254 | } 255 | 256 | start := time.Now() 257 | 258 | var rwg sync.WaitGroup 259 | receive := func(ch chan int) { 260 | var sum, count int64 261 | for value := range ch { 262 | sum += int64(value) 263 | count++ 264 | expectedSum := count * (count - 1) / 2 265 | if sum != expectedSum { 266 | b.Errorf("data corruption; at count %d, expected sum %d got %d", count, expectedSum, sum) 267 | } 268 | } 269 | if count != int64(NUM) { 270 | b.Errorf("data loss; expected %d messages got %d", NUM, count) 271 | } 272 | rwg.Done() 273 | } 274 | 275 | // create channels 276 | var channels []chan int 277 | for p := 0; p < PAR; p++ { 278 | channels = append(channels, make(chan int, BUFSIZE)) 279 | } 280 | 281 | // start receivers 282 | rwg.Add(PAR) 283 | for p := 0; p < PAR; p++ { 284 | go receive(channels[p]) 285 | } 286 | 287 | // send data 288 | for n := 0; n < NUM; n++ { 289 | for p := 0; p < PAR; p++ { 290 | channels[p] <- n 291 | } 292 | } 293 | 294 | // close channels 295 | for p := 0; p < PAR; p++ { 296 | close(channels[p]) 297 | } 298 | 299 | // wait for receivers 300 | rwg.Wait() 301 | 302 | nps := time.Now().Sub(start).Nanoseconds() / int64(NUMREF) 303 | // b.Logf("1x%d, %d msg(s), %d ns/send, %.1fM msgs/sec", PAR, NUMREF, nps, 1.0e03/float64(nps)) 304 | _ = nps 305 | } 306 | -------------------------------------------------------------------------------- /test/doc.go: -------------------------------------------------------------------------------- 1 | // Package test provides examples, tests and benchmarks for the channel (specialized on type int). 2 | // 3 | // To run benchmarks for the channel several times, use the following command: 4 | // go run github.com/reactivego/generics/cmd/jig -r 5 | // go test -run=XXX -bench=Chan -cpu=1,2,3,4,5,6,7,8 -timeout=1h -count=10 6 | // 7 | // Benchmarks 8 | // 9 | // As always, benchmarks should be taken with a grain of salt. When comparing 10 | // the performance of our channel implementation against Go's native channels, 11 | // there are issues with feature mismatch. These channels have very 12 | // different semantics and strenghts. I've tried to create benchmarks that 13 | // perform the same amount of work in both implementations. 14 | // 15 | // Initially I used interface{} as the message type. Then, later switched to 16 | // int after I converted the library to a generics library and I could generate 17 | // for any type. Speedwise there is not much difference between using either 18 | // int or interface{}. Benchmark results are for the int type though. 19 | // 20 | // Fan-out 21 | // 22 | // A fan-out configuration (buffer capacity 512) where a single sender is 23 | // transmitting int values to multiple receivers where messages are multicasted 24 | // so every receiver receives the same set of messages performs as follows: 25 | // 26 | // $ go test -run=XXX -bench=FanOut.Chan -cpu=1,2,3,4,5,6,7,8 27 | // goos: darwin 28 | // goarch: amd64 29 | // pkg: github.com/reactivego/multicast/test 30 | // BenchmarkFanOut_Chan_1xN 30000000 38.3 ns/op 31 | // BenchmarkFanOut_Chan_1xN-2 50000000 34.7 ns/op 32 | // BenchmarkFanOut_Chan_1xN-3 50000000 27.3 ns/op 33 | // BenchmarkFanOut_Chan_1xN-4 50000000 31.6 ns/op 34 | // BenchmarkFanOut_Chan_1xN-5 50000000 30.4 ns/op 35 | // BenchmarkFanOut_Chan_1xN-6 50000000 29.5 ns/op 36 | // BenchmarkFanOut_Chan_1xN-7 50000000 27.9 ns/op 37 | // BenchmarkFanOut_Chan_1xN-8 50000000 27.2 ns/op 38 | // PASS 39 | // ok github.com/reactivego/multicast/test 11.880s 40 | // 41 | // From the results we can see that even for 8 concurrent receivers, the 42 | // receiving goroutines are not contending for access to the data. 43 | // 44 | // The same configuration, but implemented using multiple parallel native Go 45 | // channels. Since Go doesn't support multicasting to multiple receivers from 46 | // a single channel. Our multiple channels assembly gives the following result: 47 | // 48 | // $ go test -run=XXX -bench=FanOut.Go -cpu=1,2,3,4,5,6,7,8 49 | // goos: darwin 50 | // goarch: amd64 51 | // pkg: github.com/reactivego/multicast/test 52 | // BenchmarkFanOut_Go_1xN 20000000 61.0 ns/op 53 | // BenchmarkFanOut_Go_1xN-2 20000000 86.9 ns/op 54 | // BenchmarkFanOut_Go_1xN-3 20000000 99.8 ns/op 55 | // BenchmarkFanOut_Go_1xN-4 20000000 115 ns/op 56 | // BenchmarkFanOut_Go_1xN-5 10000000 182 ns/op 57 | // BenchmarkFanOut_Go_1xN-6 10000000 197 ns/op 58 | // BenchmarkFanOut_Go_1xN-7 10000000 204 ns/op 59 | // BenchmarkFanOut_Go_1xN-8 10000000 210 ns/op 60 | // PASS 61 | // ok github.com/reactivego/multicast/test 16.440s 62 | // 63 | // What we see here is the sender slowing down as it is pumping the same 64 | // information into increasingly more separate channels. 65 | // 66 | // Fan-in 67 | // 68 | // A fan-in configuration (buffer capacity 512) where multiple senders are 69 | // transmitting int values to a single receiver. Messages are merged in 70 | // arbitrary order and all delivered to the receiver. 71 | // 72 | // $ go test -run=XXX -bench=FanIn.Chan -cpu=1,2,3,4,5,6,7,8 73 | // goos: darwin 74 | // goarch: amd64 75 | // pkg: github.com/reactivego/multicast/test 76 | // BenchmarkFanIn_Chan_Nx1 20000000 78.0 ns/op 77 | // BenchmarkFanIn_Chan_Nx1-2 20000000 89.6 ns/op 78 | // BenchmarkFanIn_Chan_Nx1-3 20000000 75.2 ns/op 79 | // BenchmarkFanIn_Chan_Nx1-4 20000000 75.5 ns/op 80 | // BenchmarkFanIn_Chan_Nx1-5 20000000 68.1 ns/op 81 | // BenchmarkFanIn_Chan_Nx1-6 20000000 65.0 ns/op 82 | // BenchmarkFanIn_Chan_Nx1-7 20000000 63.7 ns/op 83 | // BenchmarkFanIn_Chan_Nx1-8 20000000 68.7 ns/op 84 | // PASS 85 | // ok github.com/reactivego/multicast/test 31.698s 86 | // 87 | // I really had to work hard to get performance to an acceptable level. Started 88 | // out an order of magnitude slower than native Go, as the amount of contention 89 | // for goroutines trying to gain write access to the channel was crippling 90 | // performance. Eventually, I changed the solution to hand out write slots to the 91 | // concurrent senders and have one of the receiver goroutines consolidate and 92 | // commit the data by looking for contiguous sequences of slots marked as 93 | // updated by their sender goroutines. So data is ordered on slot hand-out time 94 | // not on actual data write time. But, that fits with the semantics you'd expect 95 | // of a concurrent sender channel, so all in all it's a good approach. 96 | // 97 | // For native Go, the implementation was straight forward as merging message 98 | // streams of multiple concurrent senders is standard Go channel functionality. 99 | // The results for Go for 1 to 8 concurrent senders and a single receiver are 100 | // as follows: 101 | // 102 | // $ go test -run=XXX -bench=FanIn.Go -cpu=1,2,3,4,5,6,7,8 103 | // goos: darwin 104 | // goarch: amd64 105 | // pkg: github.com/reactivego/multicast/test 106 | // BenchmarkFanIn_Go_Nx1 20000000 72.9 ns/op 107 | // BenchmarkFanIn_Go_Nx1-2 20000000 115 ns/op 108 | // BenchmarkFanIn_Go_Nx1-3 20000000 117 ns/op 109 | // BenchmarkFanIn_Go_Nx1-4 10000000 133 ns/op 110 | // BenchmarkFanIn_Go_Nx1-5 10000000 146 ns/op 111 | // BenchmarkFanIn_Go_Nx1-6 10000000 169 ns/op 112 | // BenchmarkFanIn_Go_Nx1-7 10000000 184 ns/op 113 | // BenchmarkFanIn_Go_Nx1-8 10000000 203 ns/op 114 | // PASS 115 | // ok github.com/reactivego/multicast/test 28.924s 116 | // 117 | // Go natively supports fan-in, so its performance was very good! However, for 118 | // the higher sender counts the performance drops off quite sharply, whereas our 119 | // implementation using the 'write slot handout' approach performs much better. 120 | // 121 | // Fan-In-Out 122 | // 123 | // This benchmark is only implemented for our channel implemenation. It is not 124 | // possible to implement this using Go native channels in a very effective way. 125 | // 126 | // What we are benchmarking here is multiple (N) senders concurrently sending 127 | // on the channel. The streams of messages are merged into a single stream which 128 | // is then multicasted to N concurrent receivers. 129 | // 130 | // $ go test -run=XXX -bench=FanInOut -cpu=1,2,3,4,5,6,7,8 131 | // goos: darwin 132 | // goarch: amd64 133 | // pkg: github.com/reactivego/multicast/test 134 | // BenchmarkFanInOut_Chan_NxN 20000000 77.4 ns/op 135 | // BenchmarkFanInOut_Chan_NxN-2 20000000 99.2 ns/op 136 | // BenchmarkFanInOut_Chan_NxN-3 20000000 103 ns/op 137 | // BenchmarkFanInOut_Chan_NxN-4 20000000 101 ns/op 138 | // BenchmarkFanInOut_Chan_NxN-5 20000000 96.2 ns/op 139 | // BenchmarkFanInOut_Chan_NxN-6 20000000 93.8 ns/op 140 | // BenchmarkFanInOut_Chan_NxN-7 20000000 94.8 ns/op 141 | // BenchmarkFanInOut_Chan_NxN-8 20000000 97.8 ns/op 142 | // PASS 143 | // ok github.com/reactivego/multicast/test 35.091s 144 | package test 145 | 146 | import _ "github.com/reactivego/multicast/generic" 147 | 148 | const BUFSIZE = 512 149 | -------------------------------------------------------------------------------- /test/example_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import "fmt" 4 | 5 | // This simple example shows the creation of an int channel with a buffer 6 | // capacity of 32 and upto 8 concurrent receivers. We send some data, close the 7 | // channel and then show that a receiver endpoint created after closing can 8 | // actually receive messages from the past. 9 | func Example_simple() { 10 | ch := NewChanInt(32, 8) 11 | 12 | // Send 13 | for i := 0; i < 5; i++ { 14 | ch.Send(i) 15 | } 16 | ch.Close(nil) 17 | 18 | // Receive 19 | ep, _ := ch.NewEndpoint(ReplayAll) 20 | ep.Range(func(value int, err error, closed bool) bool { 21 | if !closed { 22 | fmt.Printf("%d ", value) 23 | } else { 24 | fmt.Print("closed") 25 | } 26 | return true 27 | }, 0) 28 | 29 | // Output: 30 | // 0 1 2 3 4 closed 31 | } 32 | -------------------------------------------------------------------------------- /test/integrity_test.go: -------------------------------------------------------------------------------- 1 | // go test -run=integrity -parallel=10 -cpu=1,8,10 2 | 3 | package test 4 | 5 | import ( 6 | "fmt" 7 | "math/rand" 8 | "runtime" 9 | "sync" 10 | "sync/atomic" 11 | "testing" 12 | 13 | "github.com/stretchr/testify/assert" 14 | ) 15 | 16 | func TestChan_FanInOut_integrity(t *testing.T) { 17 | var numSenders = runtime.GOMAXPROCS(0) 18 | var numReceivers = runtime.GOMAXPROCS(0) 19 | 20 | const permutations = 0x200000 21 | expect := rand.Perm(permutations) 22 | 23 | channel := NewChanInt(BUFSIZE, numReceivers) 24 | 25 | var count uint64 = permutations 26 | var sum int64 27 | for i := 0; i < permutations; i++ { 28 | sum += int64(expect[i]) 29 | } 30 | 31 | var numgoroutines uint32 32 | var rwg sync.WaitGroup 33 | 34 | var scount uint64 35 | var ssum int64 36 | var swg sync.WaitGroup 37 | sender := func(name string) { 38 | atomic.AddUint32(&numgoroutines, 1) 39 | 40 | // println(name) 41 | swg.Add(1) 42 | rwg.Wait() 43 | 44 | index := atomic.AddUint64(&scount, 1) - 1 45 | for index < permutations { 46 | channel.Send(expect[index]) 47 | atomic.AddInt64(&ssum, int64(expect[index])) 48 | index = atomic.AddUint64(&scount, 1) - 1 49 | } 50 | 51 | swg.Done() 52 | swg.Wait() // wait for all senders to complete. 53 | channel.Close(nil) 54 | } 55 | 56 | receiver := func(t *testing.T) { 57 | rwg.Add(1) 58 | ep, err := channel.NewEndpoint(ReplayAll) 59 | if err != nil { 60 | assert.NoError(t, err) 61 | channel.Close(nil) 62 | return 63 | } 64 | rwg.Done() 65 | 66 | t.Parallel() 67 | atomic.AddUint32(&numgoroutines, 1) 68 | 69 | // println(t.Name()) 70 | var rcount uint64 71 | var rsum int64 72 | ep.Range(func(value int, err error, closed bool) bool { 73 | if !closed { 74 | rsum += int64(value) 75 | rcount++ 76 | } 77 | return true 78 | }, 0) 79 | if count != rcount { 80 | assert.Equalf(t, count, rcount, "ep=%+v", ep) 81 | return 82 | } 83 | assert.Equal(t, sum, rsum) 84 | } 85 | 86 | rwg.Add(1) 87 | for i := 0; i < numSenders; i++ { 88 | name := fmt.Sprintf("Sender%d", i) 89 | go sender(name) 90 | } 91 | 92 | t.Run("receivers", func(t *testing.T) { 93 | for i := 0; i < numReceivers; i++ { 94 | t.Run(fmt.Sprintf("Receiver%d", i), receiver) 95 | } 96 | rwg.Done() 97 | }) 98 | 99 | assert.Equal(t, sum, ssum) 100 | } 101 | -------------------------------------------------------------------------------- /test/internals_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | "unsafe" 7 | 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestMemoryLayout(t *testing.T) { 12 | const sizeofTime = 24 13 | var tm time.Time 14 | result := int(unsafe.Sizeof(tm)) 15 | assert.Equal(t, sizeofTime, result) 16 | 17 | const sizeoferror = 16 18 | var e error 19 | result = int(unsafe.Sizeof(e)) 20 | assert.Equal(t, sizeoferror, result) 21 | 22 | const sizeofinterface = 16 23 | var i interface{} 24 | result = int(unsafe.Sizeof(i)) 25 | assert.Equal(t, sizeofinterface, result) 26 | 27 | const sizeofSlice = 24 28 | var s []interface{} 29 | result = int(unsafe.Sizeof(s)) 30 | assert.Equal(t, sizeofSlice, result) 31 | 32 | const sizeofPointer = 8 33 | p := &ChanInt{} 34 | result = int(unsafe.Sizeof(p)) 35 | assert.Equal(t, sizeofPointer, result) 36 | 37 | type test struct { 38 | first []struct{} 39 | second uint32 40 | pad0 [0]byte // removed 41 | third uint32 42 | pad1 [0]byte // forces 8 byte padding 43 | } 44 | const sizeofStruct = sizeofSlice + 4 + 4 + 8 45 | st := test{} 46 | result = int(unsafe.Sizeof(st)) 47 | assert.Equal(t, sizeofStruct, result) 48 | assert.Equal(t, sizeofSlice, int(unsafe.Offsetof(st.second))) 49 | assert.Equal(t, sizeofSlice+4, int(unsafe.Offsetof(st.third))) 50 | assert.Equal(t, sizeofSlice+4+4, int(unsafe.Offsetof(st.pad1))) 51 | 52 | type endpoint struct { 53 | *int 54 | _____________a pad56 55 | cursor uint64 56 | _____________b pad56 57 | endpointState uint64 58 | _____________c pad56 59 | lastActive time.Time 60 | _____________d pad40 61 | endpointClosed uint64 62 | _____________e pad56 63 | } 64 | 65 | const sizeofendpoints = _PADDING*(_EXTRA_PADDING+(24+4+4+(32))) + (1-_PADDING)*(24+4+4+(8)) 66 | eps := struct { 67 | entry []endpoint 68 | len uint32 69 | activity uint32 // idling, enumerating, creating 70 | ________ pad32 71 | }{} 72 | result = int(unsafe.Sizeof(eps)) 73 | assert.Equal(t, sizeofendpoints, result) 74 | 75 | const sizeofEndpoint = _PADDING*(5*(_EXTRA_PADDING+64)) + (1-_PADDING)*(16+8+8+24+8) 76 | ep := endpoint{} 77 | result = int(unsafe.Sizeof(ep)) 78 | assert.Equal(t, sizeofEndpoint, result) 79 | } 80 | -------------------------------------------------------------------------------- /test/multicast.go: -------------------------------------------------------------------------------- 1 | // Code generated by jig; DO NOT EDIT. 2 | 3 | //go:generate jig 4 | 5 | package test 6 | 7 | import ( 8 | "fmt" 9 | "math" 10 | "runtime" 11 | "sync" 12 | "sync/atomic" 13 | "time" 14 | ) 15 | 16 | //jig:name ChanPadding 17 | 18 | const _PADDING = 1 // 0 turns padding off, 1 turns it on. 19 | 20 | const _EXTRA_PADDING = 0 * 64 // multiples of 64, benefits inconclusive. 21 | 22 | type pad60 [_PADDING * (_EXTRA_PADDING + 60)]byte 23 | 24 | type pad56 [_PADDING * (_EXTRA_PADDING + 56)]byte 25 | 26 | type pad48 [_PADDING * (_EXTRA_PADDING + 48)]byte 27 | 28 | type pad40 [_PADDING * (_EXTRA_PADDING + 40)]byte 29 | 30 | type pad32 [_PADDING * (_EXTRA_PADDING + 32)]byte 31 | 32 | //jig:name ChanState 33 | 34 | // Activity of committer 35 | const ( 36 | resting uint32 = iota 37 | working 38 | ) 39 | 40 | // Activity of endpoints 41 | const ( 42 | idling uint32 = iota 43 | enumerating 44 | creating 45 | ) 46 | 47 | // State of endpoint and channel 48 | const ( 49 | active uint64 = iota 50 | canceled 51 | closed 52 | ) 53 | 54 | // Cursor is parked so it does not influence advancing the commit index. 55 | const ( 56 | parked uint64 = math.MaxUint64 57 | ) 58 | 59 | const ( 60 | // ReplayAll can be passed to NewEndpoint to retain as many of the 61 | // previously sent messages as possible that are still in the buffer. 62 | ReplayAll uint64 = math.MaxUint64 63 | ) 64 | 65 | //jig:name ChanInt 66 | 67 | // ChanInt is a fast, concurrent multi-(casting,sending,receiving) buffered 68 | // channel. It is implemented using only sync/atomic operations. Spinlocks using 69 | // runtime.Gosched() are used in situations where goroutines are waiting or 70 | // contending for resources. 71 | type ChanInt struct { 72 | buffer []int 73 | _________a pad40 74 | begin uint64 75 | _________b pad56 76 | end uint64 77 | _________c pad56 78 | commit uint64 79 | _________d pad56 80 | mod uint64 81 | _________e pad56 82 | endpoints endpointsInt 83 | 84 | err error 85 | ____________f pad48 86 | channelState uint64 // active, closed 87 | ____________g pad56 88 | 89 | write uint64 90 | _________________h pad56 91 | start time.Time 92 | _________________i pad40 93 | written []int64 // nanoseconds since start 94 | _________________j pad40 95 | committerActivity uint32 // resting, working 96 | _________________k pad60 97 | 98 | receivers *sync.Cond 99 | _________________l pad56 100 | } 101 | 102 | type endpointsInt struct { 103 | entry []EndpointInt 104 | len uint32 105 | endpointsActivity uint32 // idling, enumerating, creating 106 | ________ pad32 107 | } 108 | 109 | //jig:name ChannelError 110 | 111 | type ChannelError string 112 | 113 | func (e ChannelError) Error() string { return string(e) } 114 | 115 | //jig:name ErrOutOfEndpoints 116 | 117 | // ErrOutOfEndpoints is returned by NewEndpoint when the maximum number of 118 | // endpoints has already been created. 119 | const ErrOutOfEndpoints = ChannelError("out of endpoints") 120 | 121 | //jig:name endpointsInt 122 | 123 | func (e *endpointsInt) NewForChanInt(c *ChanInt, keep uint64) (*EndpointInt, error) { 124 | for !atomic.CompareAndSwapUint32(&e.endpointsActivity, idling, creating) { 125 | runtime.Gosched() 126 | } 127 | defer atomic.StoreUint32(&e.endpointsActivity, idling) 128 | var start uint64 129 | commit := c.commitData() 130 | begin := atomic.LoadUint64(&c.begin) 131 | if commit-begin <= keep { 132 | start = begin 133 | } else { 134 | start = commit - keep 135 | } 136 | if int(e.len) == len(e.entry) { 137 | for index := uint32(0); index < e.len; index++ { 138 | ep := &e.entry[index] 139 | if atomic.CompareAndSwapUint64(&ep.cursor, parked, start) { 140 | ep.endpointState = atomic.LoadUint64(&c.channelState) 141 | ep.lastActive = time.Now() 142 | return ep, nil 143 | } 144 | } 145 | return nil, ErrOutOfEndpoints 146 | } 147 | ep := &e.entry[e.len] 148 | ep.ChanInt = c 149 | ep.cursor = start 150 | ep.endpointState = atomic.LoadUint64(&c.channelState) 151 | ep.lastActive = time.Now() 152 | e.len++ 153 | return ep, nil 154 | } 155 | 156 | func (e *endpointsInt) Access(access func(*endpointsInt)) bool { 157 | contention := false 158 | for !atomic.CompareAndSwapUint32(&e.endpointsActivity, idling, enumerating) { 159 | runtime.Gosched() 160 | contention = true 161 | } 162 | access(e) 163 | atomic.StoreUint32(&e.endpointsActivity, idling) 164 | return !contention 165 | } 166 | 167 | //jig:name NewChanInt 168 | 169 | // NewChanInt creates a new channel. The parameters bufferCapacity and 170 | // endpointCapacity determine the size of the message buffer and maximum 171 | // number of concurrent receiving endpoints respectively. 172 | // 173 | // Note that bufferCapacity is always scaled up to a power of 2 so e.g. 174 | // specifying 400 will create a buffer of 512 (2^9). Also because of this a 175 | // bufferCapacity of 0 is scaled up to 1 (2^0). 176 | func NewChanInt(bufferCapacity int, endpointCapacity int) *ChanInt { 177 | 178 | size := uint64(1) << uint(math.Ceil(math.Log2(float64(bufferCapacity)))) 179 | c := &ChanInt{ 180 | end: size, 181 | mod: size - 1, 182 | buffer: make([]int, size), 183 | start: time.Now(), 184 | written: make([]int64, size), 185 | endpoints: endpointsInt{ 186 | entry: make([]EndpointInt, endpointCapacity), 187 | }, 188 | } 189 | c.receivers = sync.NewCond(c) 190 | return c 191 | } 192 | 193 | // Lock, empty method so we can pass *ChanInt to sync.NewCond as a Locker. 194 | func (c *ChanInt) Lock() {} 195 | 196 | // Unlock, empty method so we can pass *ChanInt to sync.NewCond as a Locker. 197 | func (c *ChanInt) Unlock() {} 198 | 199 | //jig:name EndpointInt 200 | 201 | // EndpointInt is returned by a call to NewEndpoint on the channel. Every 202 | // endpoint should be used by only a single goroutine, so no sharing between 203 | // goroutines. 204 | type EndpointInt struct { 205 | *ChanInt 206 | _____________a pad56 207 | cursor uint64 208 | _____________b pad56 209 | endpointState uint64 // active, canceled, closed 210 | _____________c pad56 211 | lastActive time.Time // track activity to deterime when to sleep 212 | _____________d pad40 213 | endpointClosed uint64 // active, closed 214 | _____________e pad56 215 | } 216 | 217 | //jig:name ChanInt_commitData 218 | 219 | func (c *ChanInt) commitData() uint64 { 220 | commit := atomic.LoadUint64(&c.commit) 221 | if commit >= atomic.LoadUint64(&c.write) { 222 | return commit 223 | } 224 | if !atomic.CompareAndSwapUint32(&c.committerActivity, resting, working) { 225 | return commit 226 | } 227 | commit = atomic.LoadUint64(&c.commit) 228 | newcommit := commit 229 | for ; atomic.LoadInt64(&c.written[newcommit&c.mod])&1 == 1; newcommit++ { 230 | atomic.AddInt64(&c.written[newcommit&c.mod], -1) 231 | if newcommit >= atomic.LoadUint64(&c.end) { 232 | break 233 | } 234 | } 235 | write := atomic.LoadUint64(&c.write) 236 | if newcommit > write { 237 | panic(fmt.Sprintf("commitData: range error (commit=%d,write=%d,newcommit=%d)", commit, write, newcommit)) 238 | } 239 | if newcommit > commit { 240 | if !atomic.CompareAndSwapUint64(&c.commit, commit, newcommit) { 241 | panic(fmt.Sprintf("commitData; swap error (c.commit=%d,%d,%d)", c.commit, commit, newcommit)) 242 | } 243 | c.receivers.Broadcast() 244 | } 245 | atomic.StoreUint32(&c.committerActivity, resting) 246 | return atomic.LoadUint64(&c.commit) 247 | } 248 | 249 | //jig:name ChanInt_NewEndpoint 250 | 251 | // NewEndpoint will create a new channel endpoint that can be used to receive 252 | // from the channel. The argument keep specifies how many entries of the 253 | // existing channel buffer to keep. 254 | // 255 | // After Close is called on the channel, any endpoints created after that 256 | // will still receive the number of messages as indicated in the keep parameter 257 | // and then subsequently the close. 258 | // 259 | // An endpoint that is canceled or read until it is exhausted (after channel was 260 | // closed) will be reused by NewEndpoint. 261 | func (c *ChanInt) NewEndpoint(keep uint64) (*EndpointInt, error) { 262 | return c.endpoints.NewForChanInt(c, keep) 263 | } 264 | 265 | //jig:name ChanInt_slideBuffer 266 | 267 | func (c *ChanInt) slideBuffer() bool { 268 | slowestCursor := parked 269 | spinlock := c.endpoints.Access(func(endpoints *endpointsInt) { 270 | for i := uint32(0); i < endpoints.len; i++ { 271 | cursor := atomic.LoadUint64(&endpoints.entry[i].cursor) 272 | if cursor < slowestCursor { 273 | slowestCursor = cursor 274 | } 275 | } 276 | if atomic.LoadUint64(&c.begin) < slowestCursor && slowestCursor <= atomic.LoadUint64(&c.end) { 277 | if c.mod < 16 { 278 | atomic.AddUint64(&c.begin, 1) 279 | atomic.AddUint64(&c.end, 1) 280 | } else { 281 | atomic.StoreUint64(&c.begin, slowestCursor) 282 | atomic.StoreUint64(&c.end, slowestCursor+c.mod+1) 283 | } 284 | } else { 285 | slowestCursor = parked 286 | } 287 | }) 288 | if slowestCursor == parked { 289 | if spinlock { 290 | runtime.Gosched() 291 | } 292 | if atomic.LoadUint64(&c.channelState) != active { 293 | return false 294 | } 295 | } 296 | return true 297 | } 298 | 299 | //jig:name ChanInt_Send 300 | 301 | // Send can be used by concurrent goroutines to send values to the channel. 302 | // 303 | // Note, that when the number of unread messages has reached bufferCapacity, then 304 | // the call to Send will block until the slowest Endpoint has read another 305 | // message. 306 | func (c *ChanInt) Send(value int) { 307 | write := atomic.AddUint64(&c.write, 1) - 1 308 | for write >= atomic.LoadUint64(&c.end) { 309 | if !c.slideBuffer() { 310 | return 311 | } 312 | } 313 | c.buffer[write&c.mod] = value 314 | updated := time.Since(c.start).Nanoseconds() 315 | if updated == 0 { 316 | panic("clock failure; zero duration measured") 317 | } 318 | atomic.StoreInt64(&c.written[write&c.mod], updated<<1+1) 319 | c.receivers.Broadcast() 320 | } 321 | 322 | //jig:name ChanInt_Close 323 | 324 | // Close will close the channel. Pass in an error or nil. Endpoints continue to 325 | // receive data until the buffer is empty. Only then will the close notification 326 | // be delivered to the Range function. 327 | func (c *ChanInt) Close(err error) { 328 | if atomic.CompareAndSwapUint64(&c.channelState, active, closed) { 329 | c.err = err 330 | c.endpoints.Access(func(endpoints *endpointsInt) { 331 | for i := uint32(0); i < endpoints.len; i++ { 332 | atomic.CompareAndSwapUint64(&endpoints.entry[i].endpointState, active, closed) 333 | } 334 | }) 335 | } 336 | c.receivers.Broadcast() 337 | } 338 | 339 | //jig:name ChanInt_Closed 340 | 341 | // Closed returns true when the channel was closed using the Close method. 342 | func (c *ChanInt) Closed() bool { 343 | return atomic.LoadUint64(&c.channelState) >= closed 344 | } 345 | 346 | //jig:name ChanInt_FastSend 347 | 348 | // FastSend can be used to send values to the channel from a SINGLE goroutine. 349 | // Also, this does not record the time a message was sent, so the maxAge value 350 | // passed to Range will be ignored. 351 | // 352 | // Note, that when the number of unread messages has reached bufferCapacity, then 353 | // the call to FastSend will block until the slowest Endpoint has read another 354 | // message. 355 | func (c *ChanInt) FastSend(value int) { 356 | for c.commit == c.end { 357 | if !c.slideBuffer() { 358 | return 359 | } 360 | } 361 | c.buffer[c.commit&c.mod] = value 362 | atomic.AddUint64(&c.commit, 1) 363 | c.receivers.Broadcast() 364 | } 365 | 366 | //jig:name EndpointInt_Range 367 | 368 | // Range will call the passed in foreach function with all the messages in 369 | // the buffer, followed by all the messages received. When the foreach function 370 | // returns true Range will continue, when you return false this is the same as 371 | // calling Cancel. When canceled the foreach will never be called again. 372 | // Passing a maxAge duration other than 0 will skip messages that are older 373 | // than maxAge. 374 | // 375 | // When the channel is closed, eventually when the buffer is exhausted the close 376 | // with optional error will be notified by calling foreach one last time with 377 | // the closed parameter set to true. 378 | func (e *EndpointInt) Range(foreach func(value int, err error, closed bool) bool, maxAge time.Duration) { 379 | e.lastActive = time.Now() 380 | for { 381 | commit := e.commitData() 382 | for ; e.cursor == commit; commit = e.commitData() { 383 | if atomic.CompareAndSwapUint64(&e.endpointState, canceled, canceled) { 384 | atomic.StoreUint64(&e.cursor, parked) 385 | return 386 | } 387 | if atomic.LoadUint64(&e.commit) < atomic.LoadUint64(&e.write) { 388 | if e.endpointClosed == 1 { 389 | panic(fmt.Sprintf("data written after closing endpoint; commit(%d) write(%d)", 390 | atomic.LoadUint64(&e.commit), atomic.LoadUint64(&e.write))) 391 | } 392 | runtime.Gosched() 393 | e.lastActive = time.Now() 394 | } else { 395 | now := time.Now() 396 | if now.Before(e.lastActive.Add(1 * time.Millisecond)) { 397 | if atomic.CompareAndSwapUint64(&e.endpointState, closed, closed) { 398 | e.endpointClosed = 1 399 | } 400 | runtime.Gosched() 401 | } else if now.Before(e.lastActive.Add(250 * time.Millisecond)) { 402 | if atomic.CompareAndSwapUint64(&e.endpointState, closed, closed) { 403 | var zero int 404 | foreach(zero, e.err, true) 405 | atomic.StoreUint64(&e.cursor, parked) 406 | return 407 | } 408 | runtime.Gosched() 409 | } else { 410 | e.receivers.Wait() 411 | e.lastActive = time.Now() 412 | } 413 | } 414 | } 415 | 416 | for ; e.cursor != commit; atomic.AddUint64(&e.cursor, 1) { 417 | item := e.buffer[e.cursor&e.mod] 418 | emit := true 419 | if maxAge != 0 { 420 | stale := time.Since(e.start).Nanoseconds() - maxAge.Nanoseconds() 421 | updated := atomic.LoadInt64(&e.written[e.cursor&e.mod]) >> 1 422 | if updated != 0 && updated <= stale { 423 | emit = false 424 | } 425 | } 426 | if emit && !foreach(item, nil, false) { 427 | atomic.StoreUint64(&e.endpointState, canceled) 428 | } 429 | if atomic.LoadUint64(&e.endpointState) == canceled { 430 | atomic.StoreUint64(&e.cursor, parked) 431 | return 432 | } 433 | } 434 | e.lastActive = time.Now() 435 | } 436 | } 437 | -------------------------------------------------------------------------------- /test/testing_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "runtime" 5 | "testing" 6 | "time" 7 | ) 8 | 9 | func TestSleepingReceiver(t *testing.T) { 10 | channel := NewChanInt(128, 1) 11 | ep, err := channel.NewEndpoint(ReplayAll) 12 | if err != nil { 13 | t.Error(err) 14 | } 15 | wait := make(chan struct{}) 16 | go func() { 17 | ep.Range(func(value int, err error, closed bool) bool { 18 | if !closed { 19 | } 20 | return true 21 | }, 0) 22 | close(wait) 23 | }() 24 | time.Sleep(300 * time.Millisecond) 25 | channel.Send(1) 26 | channel.Close(nil) 27 | <-wait 28 | } 29 | 30 | func TestChanMaxAge(t *testing.T) { 31 | channel := NewChanInt(128, 1) 32 | ep, err := channel.NewEndpoint(ReplayAll) 33 | if err != nil { 34 | t.Error(err) 35 | } 36 | 37 | start := time.Now() 38 | for i := 0; i < 100; i++ { 39 | // wait until next millisecond. 40 | for time.Since(start) < time.Duration(i)*time.Millisecond { 41 | runtime.Gosched() 42 | } 43 | channel.Send(i) 44 | } 45 | channel.Close(nil) 46 | 47 | num := 50 48 | count := func(value int, err error, closed bool) bool { 49 | if !closed { 50 | if value != num { 51 | t.Errorf("expected %d, got %d", num, value) 52 | } 53 | num++ 54 | } 55 | return true 56 | } 57 | ep.Range(count /*49.5ms*/, 99*(time.Millisecond/2)) 58 | } 59 | 60 | func TestChanEndpointKeep(t *testing.T) { 61 | channel := NewChanInt(128, 1) 62 | for i := 0; i < 100; i++ { 63 | channel.Send(i) 64 | } 65 | channel.Close(nil) 66 | 67 | ep, err := channel.NewEndpoint(0) 68 | if err != nil { 69 | t.Fatal(err) 70 | } 71 | num := 0 72 | ep.Range(func(value int, err error, closed bool) bool { 73 | if !closed { 74 | num++ 75 | } 76 | return true 77 | }, 0) 78 | if num != 0 { 79 | t.Fatal("Got", num, "buffered values but I ask for none (keep arg was 0)") 80 | } 81 | } 82 | --------------------------------------------------------------------------------