├── .gitignore ├── .gx └── lastpubver ├── .gxignore ├── .travis.yml ├── LICENSE ├── Makefile ├── README.md ├── conn.go ├── doc.go ├── example ├── .gitignore ├── blockhandler │ └── blockhandler.go └── example.go ├── group.go ├── handlers.go ├── listener.go ├── muxtest ├── mux_test.go ├── muxt.go └── package.json ├── package.json ├── stream.go └── swarm.go /.gitignore: -------------------------------------------------------------------------------- 1 | example/closer 2 | -------------------------------------------------------------------------------- /.gx/lastpubver: -------------------------------------------------------------------------------- 1 | 1.3.0: QmNbLFRGG1uHVfQM2fRFrhc7dgjThXkYUAp5qChFqxYNSH 2 | -------------------------------------------------------------------------------- /.gxignore: -------------------------------------------------------------------------------- 1 | muxtest/* 2 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: go 2 | 3 | go: 4 | - 1.6 5 | - tip 6 | 7 | script: 8 | - make test 9 | # - go test -race -cpu=5 ./... 10 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2014 Juan Batiz-Benet 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | test: deps 2 | go test ./muxtest 3 | 4 | test_race: deps 5 | go test -race ./muxtest 6 | 7 | gx-bins: 8 | go get github.com/whyrusleeping/gx 9 | go get github.com/whyrusleeping/gx-go 10 | 11 | deps: gx-bins 12 | gx --verbose install --global 13 | gx-go rewrite 14 | 15 | clean: gx-bins 16 | gx-go rewrite --undo 17 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # go-peerstream p2p multi-multixplexing 2 | 3 | Package peerstream is a peer-to-peer networking library that multiplexes 4 | connections to many hosts. It tried to simplify the complexity of: 5 | 6 | * accepting incoming connections over **multiple** listeners 7 | * dialing outgoing connections over **multiple** transports 8 | * multiplexing **multiple** connections per-peer 9 | * multiplexing **multiple** different servers or protocols 10 | * handling backpressure correctly 11 | * handling stream multiplexing (we use SPDY, but maybe QUIC some day) 12 | * providing a **simple** interface to the user 13 | 14 | ### Godoc: https://godoc.org/github.com/jbenet/go-peerstream 15 | 16 | --- 17 | 18 | See this working [example/example.go](example/example): 19 | 20 | ```Go 21 | package main 22 | 23 | import ( 24 | "fmt" 25 | "io" 26 | "net" 27 | "os" 28 | 29 | ps "github.com/jbenet/go-peerstream" 30 | ) 31 | 32 | func main() { 33 | // create a new Swarm 34 | swarm := ps.NewSwarm() 35 | defer swarm.Close() 36 | 37 | // tell swarm what to do with a new incoming streams. 38 | // EchoHandler just echos back anything they write. 39 | swarm.SetStreamHandler(ps.EchoHandler) 40 | 41 | // Okay, let's try listening on some transports 42 | l1, err := net.Listen("tcp", "localhost:8001") 43 | if err != nil { 44 | panic(err) 45 | } 46 | 47 | l2, err := net.Listen("tcp", "localhost:8002") 48 | if err != nil { 49 | panic(err) 50 | } 51 | 52 | // tell swarm to accept incoming connections on these 53 | // listeners. Swarm will start accepting new connections. 54 | if err := swarm.AddListener(l1); err != nil { 55 | panic(err) 56 | } 57 | if err := swarm.AddListener(l2); err != nil { 58 | panic(err) 59 | } 60 | 61 | // ok, let's try some outgoing connections 62 | nc1, err := net.Dial("tcp", "localhost:8001") 63 | if err != nil { 64 | panic(err) 65 | } 66 | 67 | nc2, err := net.Dial("tcp", "localhost:8002") 68 | if err != nil { 69 | panic(err) 70 | } 71 | 72 | // add them to the swarm 73 | c1, err := swarm.AddConn(nc1) 74 | if err != nil { 75 | panic(err) 76 | } 77 | c2, err := swarm.AddConn(nc2) 78 | if err != nil { 79 | panic(err) 80 | } 81 | 82 | // Swarm treats listeners as sources of new connections and does 83 | // not distinguish between outgoing or incoming connections. 84 | // It provides the net.Conn to the StreamHandler so you can 85 | // distinguish between them however you wish. 86 | 87 | // now let's try opening some streams! 88 | // You can specify what connection you want to use 89 | s1, err := swarm.NewStreamWithConn(c1) 90 | if err != nil { 91 | panic(err) 92 | } 93 | 94 | // Or, you can specify a SelectConn function that picks between all 95 | // (it calls NewStreamWithConn underneath the hood) 96 | s2, err := swarm.NewStreamSelectConn(func(conns []*ps.Conn) *ps.Conn { 97 | if len(conns) > 0 { 98 | return conns[0] 99 | } 100 | return nil 101 | }) 102 | if err != nil { 103 | panic(err) 104 | } 105 | 106 | // Or, you can bind connections to ConnGroup ids. You can bind a conn to 107 | // multiple groups. And, if conn wasn't in swarm, it calls swarm.AddConn. 108 | // You can use any Go `KeyType` as a group A `KeyType` as in maps...) 109 | swarm.AddConnToGroup(c2, 1) 110 | 111 | // And then use that group to select a connection. Swarm will use any 112 | // connection it finds in that group, using a SelectConn you can rebind: 113 | // swarm.SetGroupSelectConn(1, SelectConn) 114 | // swarm.SetDegaultGroupSelectConn(SelectConn) 115 | s3, err := swarm.NewStreamWithGroup(1) 116 | if err != nil { 117 | panic(err) 118 | } 119 | 120 | // Why groups? It's because with many connections, and many transports, 121 | // and many Servers (or Protocols), we can use the Swarm to associate 122 | // a different StreamHandlers per group, and to let us create NewStreams 123 | // on a given group. 124 | 125 | // Ok, we have streams. now what. Use them! Our Streams are basically 126 | // streams from github.com/docker/spdystream, so they work the same 127 | // way: 128 | 129 | for i, stream := range []ps.Stream{s1, s2, s3} { 130 | stream.Wait() 131 | str := "stream %d ready:" 132 | fmt.Fprintf(stream, str, i) 133 | 134 | buf := make([]byte, len(str)) 135 | stream.Read(buf) 136 | fmt.Println(string(buf)) 137 | } 138 | 139 | go io.Copy(os.Stdout, s1) 140 | go io.Copy(os.Stdout, s2) 141 | go io.Copy(os.Stdout, s3) 142 | io.Copy(io.MultiWriter(s1, s2, s3), os.Stdin) 143 | } 144 | 145 | func log(s string) { 146 | fmt.Fprintf(os.Stderr, s+"\n") 147 | } 148 | ``` 149 | -------------------------------------------------------------------------------- /conn.go: -------------------------------------------------------------------------------- 1 | package peerstream 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "net" 7 | "sync" 8 | 9 | smux "gx/ipfs/Qmb1US8uyZeEpMyc56wVZy2cDFdQjNFojAUYVCoo9ieTqp/go-stream-muxer" 10 | ) 11 | 12 | // ConnHandler is a function which receives a Conn. It allows 13 | // clients to set a function to receive newly accepted 14 | // connections. It works like StreamHandler, but is usually 15 | // less useful than usual as most services will only use 16 | // Streams. It is safe to pass or store the *Conn elsewhere. 17 | // Note: the ConnHandler is called sequentially, so spawn 18 | // goroutines or pass the Conn. See EchoHandler. 19 | type ConnHandler func(s *Conn) 20 | 21 | // SelectConn selects a connection out of list. It allows 22 | // delegation of decision making to clients. Clients can 23 | // make SelectConn functons that check things connection 24 | // qualities -- like latency andbandwidth -- or pick from 25 | // a logical set of connections. 26 | type SelectConn func([]*Conn) *Conn 27 | 28 | // ErrInvalidConnSelected signals that a connection selected 29 | // with a SelectConn function is invalid. This may be due to 30 | // the Conn not being part of the original set given to the 31 | // function, or the value being nil. 32 | var ErrInvalidConnSelected = errors.New("invalid selected connection") 33 | 34 | // ErrNoConnections signals that no connections are available 35 | var ErrNoConnections = errors.New("no connections") 36 | 37 | // Conn is a Swarm-associated connection. 38 | type Conn struct { 39 | smuxConn smux.Conn 40 | netConn net.Conn // underlying connection 41 | 42 | swarm *Swarm 43 | groups groupSet 44 | 45 | streams map[*Stream]struct{} 46 | streamLock sync.RWMutex 47 | 48 | closed bool 49 | closeLock sync.Mutex 50 | 51 | closing bool 52 | closingLock sync.Mutex 53 | } 54 | 55 | func newConn(nconn net.Conn, tconn smux.Conn, s *Swarm) *Conn { 56 | return &Conn{ 57 | netConn: nconn, 58 | smuxConn: tconn, 59 | swarm: s, 60 | groups: groupSet{m: make(map[Group]struct{})}, 61 | streams: make(map[*Stream]struct{}), 62 | } 63 | } 64 | 65 | // String returns a string representation of the Conn 66 | func (c *Conn) String() string { 67 | c.streamLock.RLock() 68 | ls := len(c.streams) 69 | c.streamLock.RUnlock() 70 | f := " %s>" 71 | return fmt.Sprintf(f, ls, c.netConn.LocalAddr(), c.netConn.RemoteAddr()) 72 | } 73 | 74 | // Swarm returns the Swarm associated with this Conn 75 | func (c *Conn) Swarm() *Swarm { 76 | return c.swarm 77 | } 78 | 79 | // NetConn returns the underlying net.Conn 80 | func (c *Conn) NetConn() net.Conn { 81 | return c.netConn 82 | } 83 | 84 | // Conn returns the underlying transport Connection we use 85 | // Warning: modifying this object is undefined. 86 | func (c *Conn) Conn() smux.Conn { 87 | return c.smuxConn 88 | } 89 | 90 | // Groups returns the Groups this Conn belongs to 91 | func (c *Conn) Groups() []Group { 92 | return c.groups.Groups() 93 | } 94 | 95 | // InGroup returns whether this Conn belongs to a Group 96 | func (c *Conn) InGroup(g Group) bool { 97 | return c.groups.Has(g) 98 | } 99 | 100 | // AddGroup assigns given Group to Conn 101 | func (c *Conn) AddGroup(g Group) { 102 | c.groups.Add(g) 103 | } 104 | 105 | // Stream returns a stream associated with this Conn 106 | func (c *Conn) NewStream() (*Stream, error) { 107 | return c.swarm.NewStreamWithConn(c) 108 | } 109 | 110 | func (c *Conn) Streams() []*Stream { 111 | c.streamLock.RLock() 112 | defer c.streamLock.RUnlock() 113 | 114 | streams := make([]*Stream, 0, len(c.streams)) 115 | for s := range c.streams { 116 | streams = append(streams, s) 117 | } 118 | return streams 119 | } 120 | 121 | // GoClose spawns off a goroutine to close the connection iff the connection is 122 | // not already being closed and returns immediately 123 | func (c *Conn) GoClose() { 124 | c.closingLock.Lock() 125 | defer c.closingLock.Unlock() 126 | if c.closing { 127 | return 128 | } 129 | c.closing = true 130 | 131 | go c.Close() 132 | } 133 | 134 | // Close closes this connection 135 | func (c *Conn) Close() error { 136 | c.closeLock.Lock() 137 | defer c.closeLock.Unlock() 138 | if c.closed == true { 139 | return nil 140 | } 141 | 142 | c.closingLock.Lock() 143 | c.closing = true 144 | c.closingLock.Unlock() 145 | 146 | c.closed = true 147 | 148 | // close streams 149 | streams := c.Streams() 150 | for _, s := range streams { 151 | s.Close() 152 | } 153 | 154 | // close underlying connection 155 | c.swarm.removeConn(c) 156 | var err error 157 | if c.smuxConn != nil { 158 | err = c.smuxConn.Close() 159 | } else { 160 | err = c.netConn.Close() 161 | } 162 | c.swarm.notifyAll(func(n Notifiee) { 163 | n.Disconnected(c) 164 | }) 165 | return err 166 | } 167 | 168 | // ConnsWithGroup narrows down a set of connections to those in a given group. 169 | func ConnsWithGroup(g Group, conns []*Conn) []*Conn { 170 | var out []*Conn 171 | for _, c := range conns { 172 | if c.InGroup(g) { 173 | out = append(out, c) 174 | } 175 | } 176 | return out 177 | } 178 | 179 | func ConnInConns(c1 *Conn, conns []*Conn) bool { 180 | for _, c2 := range conns { 181 | if c2 == c1 { 182 | return true 183 | } 184 | } 185 | return false 186 | } 187 | 188 | // ------------------------------------------------------------------ 189 | // All the connection setup logic here, in one place. 190 | // these are mostly *Swarm methods, but i wanted a less-crowded place 191 | // for them. 192 | // ------------------------------------------------------------------ 193 | 194 | // addConn is the internal version of AddConn. we need the server bool 195 | // as spdystream requires it. 196 | func (s *Swarm) addConn(netConn net.Conn, isServer bool) (*Conn, error) { 197 | c, err := s.setupConn(netConn, isServer) 198 | if err != nil { 199 | return nil, err 200 | } 201 | 202 | s.ConnHandler()(c) 203 | 204 | if c.smuxConn != nil { 205 | // go listen for incoming streams on this connection 206 | go c.smuxConn.Serve(func(ss smux.Stream) { 207 | // log.Printf("accepted stream %d from %s\n", ssS.Identifier(), netConn.RemoteAddr()) 208 | stream := s.setupStream(ss, c) 209 | s.StreamHandler()(stream) // call our handler 210 | }) 211 | } 212 | 213 | s.notifyAll(func(n Notifiee) { 214 | n.Connected(c) 215 | }) 216 | return c, nil 217 | } 218 | 219 | // setupConn adds the relevant connection to the map, first checking if it 220 | // was already there. 221 | func (s *Swarm) setupConn(netConn net.Conn, isServer bool) (*Conn, error) { 222 | if netConn == nil { 223 | return nil, errors.New("nil conn") 224 | } 225 | 226 | // first, check if we already have it, to avoid constructing it 227 | // if it is already there 228 | s.connLock.Lock() 229 | for c := range s.conns { 230 | if c.netConn == netConn { 231 | s.connLock.Unlock() 232 | return c, nil 233 | } 234 | } 235 | s.connLock.Unlock() 236 | // construct the connection without hanging onto the lock 237 | // (as there could be deadlock if so.) 238 | 239 | var ssConn smux.Conn 240 | if s.transport != nil { 241 | // create a new stream muxer connection 242 | c, err := s.transport.NewConn(netConn, isServer) 243 | if err != nil { 244 | return nil, err 245 | } 246 | 247 | ssConn = c 248 | } 249 | 250 | // take the lock to add it to the map. 251 | s.connLock.Lock() 252 | defer s.connLock.Unlock() 253 | 254 | // check for it again as it may have been added already. (TOCTTOU) 255 | for c := range s.conns { 256 | if c.netConn == netConn { 257 | return c, nil 258 | } 259 | } 260 | 261 | // add the connection 262 | c := newConn(netConn, ssConn, s) 263 | s.conns[c] = struct{}{} 264 | return c, nil 265 | } 266 | 267 | // createStream is the internal function that creates a new stream. assumes 268 | // all validation has happened. 269 | func (s *Swarm) createStream(c *Conn) (*Stream, error) { 270 | 271 | // Create a new smux.Stream 272 | smuxStream, err := c.smuxConn.OpenStream() 273 | if err != nil { 274 | return nil, err 275 | } 276 | 277 | return s.setupStream(smuxStream, c), nil 278 | } 279 | 280 | // newStream is the internal function that creates a new stream. assumes 281 | // all validation has happened. 282 | func (s *Swarm) setupStream(smuxStream smux.Stream, c *Conn) *Stream { 283 | 284 | // create a new stream 285 | stream := newStream(smuxStream, c) 286 | 287 | // add it to our streams maps 288 | s.streamLock.Lock() 289 | c.streamLock.Lock() 290 | s.streams[stream] = struct{}{} 291 | c.streams[stream] = struct{}{} 292 | s.streamLock.Unlock() 293 | c.streamLock.Unlock() 294 | 295 | s.notifyAll(func(n Notifiee) { 296 | n.OpenedStream(stream) 297 | }) 298 | return stream 299 | } 300 | 301 | func (s *Swarm) removeStream(stream *Stream) error { 302 | 303 | // remove from our maps 304 | s.streamLock.Lock() 305 | stream.conn.streamLock.Lock() 306 | delete(s.streams, stream) 307 | delete(stream.conn.streams, stream) 308 | s.streamLock.Unlock() 309 | stream.conn.streamLock.Unlock() 310 | 311 | err := stream.smuxStream.Close() 312 | s.notifyAll(func(n Notifiee) { 313 | n.ClosedStream(stream) 314 | }) 315 | return err 316 | } 317 | 318 | func (s *Swarm) removeConn(conn *Conn) { 319 | // remove from our maps 320 | s.connLock.Lock() 321 | delete(s.conns, conn) 322 | s.connLock.Unlock() 323 | } 324 | -------------------------------------------------------------------------------- /doc.go: -------------------------------------------------------------------------------- 1 | // Package peerstream is a peer-to-peer networking library that multiplexes 2 | // connections to many hosts. It tried to simplify the complexity of: 3 | // 4 | // * accepting incoming connections over **multiple** listeners 5 | // * dialing outgoing connections over **multiple** transports 6 | // * multiplexing **multiple** connections per-peer 7 | // * multiplexing **multiple** different servers or protocols 8 | // * handling backpressure correctly 9 | // * handling stream multiplexing (we use SPDY, but maybe QUIC some day) 10 | // * providing a **simple** interface to the user 11 | // 12 | package peerstream 13 | -------------------------------------------------------------------------------- /example/.gitignore: -------------------------------------------------------------------------------- 1 | example 2 | -------------------------------------------------------------------------------- /example/blockhandler/blockhandler.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "fmt" 6 | "net" 7 | "os" 8 | "time" 9 | 10 | ps "github.com/jbenet/go-peerstream" 11 | spdy "github.com/whyrusleeping/go-smux-spdystream" 12 | ) 13 | 14 | func die(err error) { 15 | fmt.Fprintf(os.Stderr, "error: %s\n") 16 | os.Exit(1) 17 | } 18 | 19 | func main() { 20 | // create a new Swarm 21 | swarm := ps.NewSwarm(spdy.Transport) 22 | defer swarm.Close() 23 | 24 | // tell swarm what to do with a new incoming streams. 25 | // EchoHandler just echos back anything they write. 26 | swarm.SetStreamHandler(ps.EchoHandler) 27 | 28 | l, err := net.Listen("tcp", "localhost:8001") 29 | if err != nil { 30 | die(err) 31 | } 32 | 33 | if _, err := swarm.AddListener(l); err != nil { 34 | die(err) 35 | } 36 | 37 | nc, err := net.Dial("tcp", "localhost:8001") 38 | if err != nil { 39 | die(err) 40 | } 41 | 42 | c, err := swarm.AddConn(nc) 43 | if err != nil { 44 | die(err) 45 | } 46 | 47 | nRcvStream := 0 48 | bio := bufio.NewReader(os.Stdin) 49 | swarm.SetStreamHandler(func(s *ps.Stream) { 50 | log("handling new stream %d", nRcvStream) 51 | nRcvStream++ 52 | 53 | line, err := bio.ReadString('\n') 54 | if err != nil { 55 | die(err) 56 | } 57 | _ = line 58 | // line = "read: " + line 59 | // s.Write([]byte(line)) 60 | s.Close() 61 | }) 62 | 63 | nSndStream := 0 64 | for { 65 | <-time.After(200 * time.Millisecond) 66 | _, err := swarm.NewStreamWithConn(c) 67 | if err != nil { 68 | die(err) 69 | } 70 | log("sender got new stream %d", nSndStream) 71 | nSndStream++ 72 | } 73 | } 74 | 75 | func log(s string, ifs ...interface{}) { 76 | fmt.Fprintf(os.Stderr, s+"\n", ifs...) 77 | } 78 | -------------------------------------------------------------------------------- /example/example.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "net" 7 | "os" 8 | 9 | ps "github.com/jbenet/go-peerstream" 10 | spdy "github.com/whyrusleeping/go-smux-spdystream" 11 | ) 12 | 13 | func main() { 14 | 15 | log("creating a new swarm with spdystream transport") // create a new Swarm 16 | swarm := ps.NewSwarm(spdy.Transport) 17 | defer swarm.Close() 18 | 19 | // tell swarm what to do with a new incoming streams. 20 | // EchoHandler just echos back anything they write. 21 | log("setup EchoHandler") 22 | swarm.SetStreamHandler(ps.EchoHandler) 23 | 24 | // Okay, let's try listening on some transports 25 | log("listening at localhost:8001") 26 | l1, err := net.Listen("tcp", "localhost:8001") 27 | if err != nil { 28 | panic(err) 29 | } 30 | 31 | log("listening at localhost:8002") 32 | l2, err := net.Listen("tcp", "localhost:8002") 33 | if err != nil { 34 | panic(err) 35 | } 36 | 37 | // tell swarm to accept incoming connections on these 38 | // listeners. Swarm will start accepting new connections. 39 | if _, err := swarm.AddListener(l1); err != nil { 40 | panic(err) 41 | } 42 | if _, err := swarm.AddListener(l2); err != nil { 43 | panic(err) 44 | } 45 | 46 | // ok, let's try some outgoing connections 47 | log("dialing localhost:8001") 48 | nc1, err := net.Dial("tcp", "localhost:8001") 49 | if err != nil { 50 | panic(err) 51 | } 52 | 53 | log("dialing localhost:8002") 54 | nc2, err := net.Dial("tcp", "localhost:8002") 55 | if err != nil { 56 | panic(err) 57 | } 58 | 59 | // add them to the swarm 60 | c1, err := swarm.AddConn(nc1) 61 | if err != nil { 62 | panic(err) 63 | } 64 | c2, err := swarm.AddConn(nc2) 65 | if err != nil { 66 | panic(err) 67 | } 68 | 69 | // Swarm treats listeners as sources of new connections and does 70 | // not distinguish between outgoing or incoming connections. 71 | // It provides the net.Conn to the StreamHandler so you can 72 | // distinguish between them however you wish. 73 | 74 | // now let's try opening some streams! 75 | // You can specify what connection you want to use 76 | log("opening stream with NewStreamWithConn(c1)") 77 | s1, err := swarm.NewStreamWithConn(c1) 78 | if err != nil { 79 | panic(err) 80 | } 81 | 82 | // Or, you can specify a SelectConn function that picks between all 83 | // (it calls NewStreamWithConn underneath the hood) 84 | log("opening stream with NewStreamSelectConn(.)") 85 | s2, err := swarm.NewStreamSelectConn(func(conns []*ps.Conn) *ps.Conn { 86 | if len(conns) > 0 { 87 | return conns[0] 88 | } 89 | return nil 90 | }) 91 | if err != nil { 92 | panic(err) 93 | } 94 | 95 | // Or, you can bind connections to ConnGroup ids. You can bind a conn to 96 | // multiple groups. And, if conn wasn't in swarm, it calls swarm.AddConn. 97 | // You can use any Go `KeyType` as a group A `KeyType` as in maps...) 98 | swarm.AddConnToGroup(c2, 1) 99 | 100 | // And then use that group to select a connection. Swarm will use any 101 | // connection it finds in that group, using a SelectConn you can rebind: 102 | // swarm.SetGroupSelectConn(1, SelectConn) 103 | // swarm.SetDegaultGroupSelectConn(SelectConn) 104 | log("opening stream with NewStreamWithGroup(1)") 105 | s3, err := swarm.NewStreamWithGroup(1) 106 | if err != nil { 107 | panic(err) 108 | } 109 | 110 | // Why groups? It's because with many connections, and many transports, 111 | // and many Servers (or Protocols), we can use the Swarm to associate 112 | // a different StreamHandlers per group, and to let us create NewStreams 113 | // on a given group. 114 | 115 | // Ok, we have streams. now what. Use them! Our Streams are basically 116 | // streams from github.com/docker/spdystream, so they work the same 117 | // way: 118 | 119 | log("preparing the streams") 120 | for i, stream := range []*ps.Stream{s1, s2, s3} { 121 | str := "stream %d ready:" 122 | fmt.Fprintf(stream, str, i) 123 | 124 | buf := make([]byte, len(str)) 125 | log(fmt.Sprintf("reading from stream %d", i)) 126 | stream.Read(buf) 127 | fmt.Println(string(buf)) 128 | } 129 | 130 | log("let's test the streams") 131 | log("enter some text below:\n") 132 | go io.Copy(os.Stdout, s1) 133 | go io.Copy(os.Stdout, s2) 134 | go io.Copy(os.Stdout, s3) 135 | io.Copy(io.MultiWriter(s1, s2, s3), os.Stdin) 136 | } 137 | 138 | func log(s string) { 139 | fmt.Fprintf(os.Stderr, s+"\n") 140 | } 141 | -------------------------------------------------------------------------------- /group.go: -------------------------------------------------------------------------------- 1 | package peerstream 2 | 3 | import ( 4 | "errors" 5 | "sync" 6 | "unsafe" 7 | ) 8 | 9 | // ErrGroupNotFound signals no such group exists 10 | var ErrGroupNotFound = errors.New("group not found") 11 | 12 | // Group is an object used to associate a group of 13 | // Streams, Connections, and Listeners. It can be anything, 14 | // it is meant to work like a KeyType in maps 15 | type Group interface{} 16 | 17 | // Groupable is an interface for a set of objects that can 18 | // be assigned groups: Streams, Connections, and Listeners. 19 | // Objects inherit groups (e.g. a Stream inherits the groups 20 | // of its parent Connection, and in turn that of its Listener). 21 | type Groupable interface { 22 | // Groups returns the groups this object belongs to 23 | Groups() []Group 24 | 25 | // InGroup returns whether this object belongs to a Group 26 | InGroup(g Group) bool 27 | 28 | // AddGroup adds this object to a group 29 | AddGroup(g Group) 30 | } 31 | 32 | // groupSet is a struct designed to be embedded and 33 | // give things group memebership 34 | type groupSet struct { 35 | m map[Group]struct{} 36 | sync.Mutex 37 | } 38 | 39 | func (gs *groupSet) Add(g Group) { 40 | gs.Lock() 41 | defer gs.Unlock() 42 | gs.m[g] = struct{}{} 43 | } 44 | 45 | func (gs *groupSet) Remove(g Group) { 46 | gs.Lock() 47 | defer gs.Unlock() 48 | delete(gs.m, g) 49 | } 50 | 51 | func (gs *groupSet) Has(g Group) bool { 52 | gs.Lock() 53 | defer gs.Unlock() 54 | _, ok := gs.m[g] 55 | return ok 56 | } 57 | 58 | func (gs *groupSet) Groups() []Group { 59 | gs.Lock() 60 | defer gs.Unlock() 61 | 62 | out := make([]Group, 0, len(gs.m)) 63 | for k := range gs.m { 64 | out = append(out, k) 65 | } 66 | return out 67 | } 68 | 69 | // AddSet adds all elements in another set. 70 | func (gs *groupSet) AddSet(gs2 *groupSet) { 71 | // acquire locks in order 72 | p1 := uintptr(unsafe.Pointer(gs)) 73 | p2 := uintptr(unsafe.Pointer(gs2)) 74 | switch { 75 | case p1 < p2: 76 | gs.Lock() 77 | gs2.Lock() 78 | defer gs.Unlock() 79 | defer gs2.Unlock() 80 | case p1 > p2: 81 | gs2.Lock() 82 | gs.Lock() 83 | defer gs2.Unlock() 84 | defer gs.Unlock() 85 | default: 86 | return // they're the same! 87 | } 88 | 89 | for g := range gs2.m { 90 | gs.m[g] = struct{}{} 91 | } 92 | } 93 | -------------------------------------------------------------------------------- /handlers.go: -------------------------------------------------------------------------------- 1 | package peerstream 2 | 3 | import ( 4 | "io" 5 | "math/rand" 6 | ) 7 | 8 | var SelectRandomConn = func(conns []*Conn) *Conn { 9 | if len(conns) == 0 { 10 | return nil 11 | } 12 | 13 | return conns[rand.Intn(len(conns))] 14 | } 15 | 16 | func EchoHandler(s *Stream) { 17 | go func() { 18 | io.Copy(s, s) 19 | s.Close() 20 | }() 21 | } 22 | 23 | func CloseHandler(s *Stream) { 24 | s.Close() 25 | } 26 | 27 | func NoOpStreamHandler(s *Stream) {} 28 | 29 | func NoOpConnHandler(c *Conn) {} 30 | -------------------------------------------------------------------------------- /listener.go: -------------------------------------------------------------------------------- 1 | package peerstream 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "net" 7 | "sync" 8 | 9 | tec "gx/ipfs/QmWHgLqrghM9zw77nF6gdvT9ExQ2RB9pLxkd8sDHZf1rWb/go-temp-err-catcher" 10 | ) 11 | 12 | // AcceptConcurrency is how many connections can simultaneously be 13 | // in process of being accepted. Handshakes can sometimes occur as 14 | // part of this process, so it may take some time. It is imporant to 15 | // rate limit lest a malicious influx of connections would cause our 16 | // node to consume all its resources accepting new connections. 17 | var AcceptConcurrency = 200 18 | 19 | type Listener struct { 20 | netList net.Listener 21 | groups groupSet 22 | swarm *Swarm 23 | 24 | acceptErr chan error 25 | } 26 | 27 | func newListener(nl net.Listener, s *Swarm) *Listener { 28 | return &Listener{ 29 | netList: nl, 30 | swarm: s, 31 | acceptErr: make(chan error, 10), 32 | } 33 | } 34 | 35 | // String returns a string representation of the Listener 36 | func (l *Listener) String() string { 37 | f := "" 38 | return fmt.Sprintf(f, l.netList.Addr()) 39 | } 40 | 41 | // NetListener is the underlying net.Listener 42 | func (l *Listener) NetListener() net.Listener { 43 | return l.netList 44 | } 45 | 46 | // Groups returns the groups this Listener belongs to 47 | func (l *Listener) Groups() []Group { 48 | return l.groups.Groups() 49 | } 50 | 51 | // InGroup returns whether this Listener belongs to a Group 52 | func (l *Listener) InGroup(g Group) bool { 53 | return l.groups.Has(g) 54 | } 55 | 56 | // AddGroup assigns given Group to Listener 57 | func (l *Listener) AddGroup(g Group) { 58 | l.groups.Add(g) 59 | } 60 | 61 | // ListenersWithGroup narrows down a set of listeners to those in given group. 62 | func ListenersWithGroup(g Group, ls []*Listener) []*Listener { 63 | var out []*Listener 64 | for _, l := range ls { 65 | if l.InGroup(g) { 66 | out = append(out, l) 67 | } 68 | } 69 | return out 70 | } 71 | 72 | // accept continously accepts incoming connections and 73 | // adds them to the listener's Swarm. is is meant to be 74 | // run in a goroutine. 75 | // TODO: add rate limiting 76 | func (l *Listener) accept() { 77 | var wg sync.WaitGroup 78 | defer func() { 79 | wg.Wait() // must happen before teardown 80 | l.teardown() 81 | }() 82 | 83 | // catching the error here is odd. doing what net/http does: 84 | // http://golang.org/src/net/http/server.go?s=51504:51550#L1728 85 | // Using the lib: https://godoc.org/github.com/jbenet/go-temp-err-catcher 86 | var catcher tec.TempErrCatcher 87 | 88 | // rate limit concurrency 89 | limit := make(chan struct{}, AcceptConcurrency) 90 | 91 | // loop forever accepting connections 92 | for { 93 | conn, err := l.netList.Accept() 94 | if err != nil { 95 | if catcher.IsTemporary(err) { 96 | continue 97 | } 98 | l.acceptErr <- fmt.Errorf("peerstream listener failed: %s", err) 99 | return // ok, problems. bail. 100 | } 101 | 102 | // add conn to swarm and listen for incoming streams 103 | // do this in a goroutine to avoid blocking the Accept loop. 104 | // note that this does not rate limit accepts. 105 | limit <- struct{}{} // sema down 106 | wg.Add(1) 107 | go func(conn net.Conn) { 108 | defer func() { <-limit }() // sema up 109 | defer wg.Done() 110 | 111 | conn2, err := l.swarm.addConn(conn, true) 112 | if err != nil { 113 | l.acceptErr <- err 114 | return 115 | } 116 | conn2.groups.AddSet(&l.groups) // add out groups 117 | }(conn) 118 | } 119 | } 120 | 121 | // AcceptError returns the error that we **might** on listener close 122 | func (l *Listener) AcceptErrors() <-chan error { 123 | return l.acceptErr 124 | } 125 | 126 | func (l *Listener) teardown() { 127 | // in case we exit from network errors (accept fails) but 128 | // (a) client doesn't call Close, and (b) listener remains open) 129 | l.netList.Close() 130 | 131 | close(l.acceptErr) 132 | 133 | // remove self from swarm 134 | l.swarm.listenerLock.Lock() 135 | delete(l.swarm.listeners, l) 136 | l.swarm.listenerLock.Unlock() 137 | } 138 | 139 | func (l *Listener) Close() error { 140 | return l.netList.Close() 141 | } 142 | 143 | // addListener is the internal version of AddListener. 144 | func (s *Swarm) addListener(nl net.Listener) (*Listener, error) { 145 | if nl == nil { 146 | return nil, errors.New("nil listener") 147 | } 148 | 149 | s.listenerLock.Lock() 150 | defer s.listenerLock.Unlock() 151 | 152 | // first, check if we already have it... 153 | for l := range s.listeners { 154 | if l.netList == nl { 155 | return l, nil 156 | } 157 | } 158 | 159 | l := newListener(nl, s) 160 | s.listeners[l] = struct{}{} 161 | go l.accept() 162 | return l, nil 163 | } 164 | -------------------------------------------------------------------------------- /muxtest/mux_test.go: -------------------------------------------------------------------------------- 1 | package muxtest 2 | 3 | import ( 4 | "testing" 5 | 6 | multistream "gx/ipfs/QmVcmcQE9eX4HQ8QwhVXpoHt3ennG7d299NDYFq9D1Uqa1/go-smux-multistream" 7 | spdy "gx/ipfs/QmWMKNLGkYJTZ4Tq3DQ8E9j86QaKvGjKgFzvLzGYXvW69Z/go-smux-spdystream" 8 | yamux "gx/ipfs/QmYaeRqthWTco7oQF4dztuqA94P8JF36gVjd2z2eEqKfrh/go-smux-yamux" 9 | muxado "gx/ipfs/QmZJM54H2j26QwmWAYaW24L8Hwo3ojrQmiGj3gBF9Quj8d/go-smux-muxado" 10 | multiplex "gx/ipfs/Qmao31fmDJxp9gY7YNqkN1i3kGaknL6XSzKtdC1VCU7Qj8/go-smux-multiplex" 11 | ) 12 | 13 | func TestYamuxTransport(t *testing.T) { 14 | SubtestAll(t, yamux.DefaultTransport) 15 | } 16 | 17 | func TestSpdyStreamTransport(t *testing.T) { 18 | SubtestAll(t, spdy.Transport) 19 | } 20 | 21 | func TestMultiplexTransport(t *testing.T) { 22 | SubtestAll(t, multiplex.DefaultTransport) 23 | } 24 | 25 | func TestMuxadoTransport(t *testing.T) { 26 | SubtestAll(t, muxado.Transport) 27 | } 28 | 29 | func TestMultistreamTransport(t *testing.T) { 30 | tpt := multistream.NewBlankTransport() 31 | tpt.AddTransport("/yamux", yamux.DefaultTransport) 32 | tpt.AddTransport("/spdy", spdy.Transport) 33 | SubtestAll(t, tpt) 34 | } 35 | -------------------------------------------------------------------------------- /muxtest/muxt.go: -------------------------------------------------------------------------------- 1 | package muxtest 2 | 3 | import ( 4 | "bytes" 5 | crand "crypto/rand" 6 | "fmt" 7 | "io" 8 | mrand "math/rand" 9 | "net" 10 | "os" 11 | "reflect" 12 | "runtime" 13 | "sync" 14 | "testing" 15 | 16 | ps "github.com/jbenet/go-peerstream" 17 | 18 | smux "gx/ipfs/Qmb1US8uyZeEpMyc56wVZy2cDFdQjNFojAUYVCoo9ieTqp/go-stream-muxer" 19 | ) 20 | 21 | var randomness []byte 22 | var nextPort = 20000 23 | var verbose = false 24 | 25 | func init() { 26 | // read 1MB of randomness 27 | randomness = make([]byte, 1<<20) 28 | if _, err := crand.Read(randomness); err != nil { 29 | panic(err) 30 | } 31 | } 32 | 33 | func randBuf(size int) []byte { 34 | n := len(randomness) - size 35 | if size < 1 { 36 | panic(fmt.Errorf("requested too large buffer (%d). max is %d", size, len(randomness))) 37 | } 38 | 39 | start := mrand.Intn(n) 40 | return randomness[start : start+size] 41 | } 42 | 43 | func checkErr(t *testing.T, err error) { 44 | if err != nil { 45 | t.Fatal(err) 46 | } 47 | } 48 | 49 | func log(s string, v ...interface{}) { 50 | if verbose { 51 | fmt.Fprintf(os.Stderr, "> "+s+"\n", v...) 52 | } 53 | } 54 | 55 | type echoSetup struct { 56 | swarm *ps.Swarm 57 | conns []*ps.Conn 58 | } 59 | 60 | func singleConn(t *testing.T, tr smux.Transport) echoSetup { 61 | swarm := ps.NewSwarm(tr) 62 | swarm.SetStreamHandler(func(s *ps.Stream) { 63 | defer s.Close() 64 | log("accepted stream") 65 | io.Copy(s, s) // echo everything 66 | log("closing stream") 67 | }) 68 | 69 | log("listening at %s", "localhost:0") 70 | l, err := net.Listen("tcp", "localhost:0") 71 | checkErr(t, err) 72 | 73 | _, err = swarm.AddListener(l) 74 | checkErr(t, err) 75 | 76 | log("dialing to %s", l.Addr()) 77 | nc1, err := net.Dial("tcp", l.Addr().String()) 78 | checkErr(t, err) 79 | 80 | c1, err := swarm.AddConn(nc1) 81 | checkErr(t, err) 82 | 83 | return echoSetup{ 84 | swarm: swarm, 85 | conns: []*ps.Conn{c1}, 86 | } 87 | } 88 | 89 | func makeSwarm(t *testing.T, tr smux.Transport, nListeners int) *ps.Swarm { 90 | swarm := ps.NewSwarm(tr) 91 | swarm.SetStreamHandler(func(s *ps.Stream) { 92 | defer s.Close() 93 | log("accepted stream") 94 | io.Copy(s, s) // echo everything 95 | log("closing stream") 96 | }) 97 | 98 | for i := 0; i < nListeners; i++ { 99 | log("%p listening at %s", swarm, "localhost:0") 100 | l, err := net.Listen("tcp", "localhost:0") 101 | checkErr(t, err) 102 | _, err = swarm.AddListener(l) 103 | checkErr(t, err) 104 | } 105 | 106 | return swarm 107 | } 108 | 109 | func makeSwarms(t *testing.T, tr smux.Transport, nSwarms, nListeners int) []*ps.Swarm { 110 | swarms := make([]*ps.Swarm, nSwarms) 111 | for i := 0; i < nSwarms; i++ { 112 | swarms[i] = makeSwarm(t, tr, nListeners) 113 | } 114 | return swarms 115 | } 116 | 117 | func SubtestConstructSwarm(t *testing.T, tr smux.Transport) { 118 | ps.NewSwarm(tr) 119 | } 120 | 121 | func SubtestSimpleWrite(t *testing.T, tr smux.Transport) { 122 | swarm := ps.NewSwarm(tr) 123 | defer swarm.Close() 124 | 125 | piper, pipew := io.Pipe() 126 | swarm.SetStreamHandler(func(s *ps.Stream) { 127 | defer s.Close() 128 | log("accepted stream") 129 | w := io.MultiWriter(s, pipew) 130 | io.Copy(w, s) // echo everything and write it to pipew 131 | log("closing stream") 132 | }) 133 | 134 | log("listening at %s", "localhost:0") 135 | l, err := net.Listen("tcp", "localhost:0") 136 | checkErr(t, err) 137 | 138 | _, err = swarm.AddListener(l) 139 | checkErr(t, err) 140 | 141 | log("dialing to %s", l.Addr().String()) 142 | nc1, err := net.Dial("tcp", l.Addr().String()) 143 | checkErr(t, err) 144 | 145 | c1, err := swarm.AddConn(nc1) 146 | checkErr(t, err) 147 | defer c1.Close() 148 | 149 | log("creating stream") 150 | s1, err := c1.NewStream() 151 | checkErr(t, err) 152 | defer s1.Close() 153 | 154 | buf1 := randBuf(4096) 155 | log("writing %d bytes to stream", len(buf1)) 156 | _, err = s1.Write(buf1) 157 | checkErr(t, err) 158 | 159 | buf2 := make([]byte, len(buf1)) 160 | log("reading %d bytes from stream (echoed)", len(buf2)) 161 | _, err = s1.Read(buf2) 162 | checkErr(t, err) 163 | if string(buf2) != string(buf1) { 164 | t.Error("buf1 and buf2 not equal: %s != %s", string(buf1), string(buf2)) 165 | } 166 | 167 | buf3 := make([]byte, len(buf1)) 168 | log("reading %d bytes from pipe (tee)", len(buf3)) 169 | _, err = piper.Read(buf3) 170 | checkErr(t, err) 171 | if string(buf3) != string(buf1) { 172 | t.Error("buf1 and buf3 not equal: %s != %s", string(buf1), string(buf3)) 173 | } 174 | } 175 | 176 | func SubtestSimpleWrite100msgs(t *testing.T, tr smux.Transport) { 177 | 178 | msgs := 100 179 | msgsize := 1 << 19 180 | es := singleConn(t, tr) 181 | defer es.swarm.Close() 182 | 183 | log("creating stream") 184 | stream, err := es.conns[0].NewStream() 185 | checkErr(t, err) 186 | 187 | bufs := make(chan []byte, msgs) 188 | var wg sync.WaitGroup 189 | 190 | wg.Add(1) 191 | go func() { 192 | defer wg.Done() 193 | 194 | for i := 0; i < msgs; i++ { 195 | buf := randBuf(msgsize) 196 | bufs <- buf 197 | log("writing %d bytes (message %d/%d #%x)", len(buf), i, msgs, buf[:3]) 198 | if _, err := stream.Write(buf); err != nil { 199 | t.Error(fmt.Errorf("stream.Write(buf): %s", err)) 200 | continue 201 | } 202 | } 203 | close(bufs) 204 | }() 205 | 206 | wg.Add(1) 207 | go func() { 208 | defer wg.Done() 209 | 210 | buf2 := make([]byte, msgsize) 211 | i := 0 212 | for buf1 := range bufs { 213 | log("reading %d bytes (message %d/%d #%x)", len(buf1), i, msgs, buf1[:3]) 214 | i++ 215 | 216 | if _, err := io.ReadFull(stream, buf2); err != nil { 217 | t.Error(fmt.Errorf("readFull(stream, buf2): %s", err)) 218 | continue 219 | } 220 | if !bytes.Equal(buf1, buf2) { 221 | t.Error(fmt.Errorf("buffers not equal (%x != %x)", buf1[:3], buf2[:3])) 222 | } 223 | } 224 | }() 225 | 226 | wg.Wait() 227 | } 228 | 229 | func SubtestStressNSwarmNConnNStreamNMsg(t *testing.T, tr smux.Transport, nSwarm, nConn, nStream, nMsg int) { 230 | 231 | msgsize := 1 << 11 232 | 233 | rateLimitN := 5000 234 | rateLimitChan := make(chan struct{}, rateLimitN) // max of 5k funcs. 235 | for i := 0; i < rateLimitN; i++ { 236 | rateLimitChan <- struct{}{} 237 | } 238 | 239 | rateLimit := func(f func()) { 240 | <-rateLimitChan 241 | f() 242 | rateLimitChan <- struct{}{} 243 | } 244 | 245 | writeStream := func(s *ps.Stream, bufs chan<- []byte) { 246 | log("writeStream %p, %d nMsg", s, nMsg) 247 | 248 | for i := 0; i < nMsg; i++ { 249 | buf := randBuf(msgsize) 250 | bufs <- buf 251 | log("%p writing %d bytes (message %d/%d #%x)", s, len(buf), i, nMsg, buf[:3]) 252 | if _, err := s.Write(buf); err != nil { 253 | t.Error(fmt.Errorf("s.Write(buf): %s", err)) 254 | continue 255 | } 256 | } 257 | } 258 | 259 | readStream := func(s *ps.Stream, bufs <-chan []byte) { 260 | log("readStream %p, %d nMsg", s, nMsg) 261 | 262 | buf2 := make([]byte, msgsize) 263 | i := 0 264 | for buf1 := range bufs { 265 | i++ 266 | log("%p reading %d bytes (message %d/%d #%x)", s, len(buf1), i-1, nMsg, buf1[:3]) 267 | 268 | if _, err := io.ReadFull(s, buf2); err != nil { 269 | log("%p failed to read %d bytes (message %d/%d #%x)", s, len(buf1), i-1, nMsg, buf1[:3]) 270 | t.Error(fmt.Errorf("io.ReadFull(s, buf2): %s", err)) 271 | continue 272 | } 273 | if !bytes.Equal(buf1, buf2) { 274 | t.Error(fmt.Errorf("buffers not equal (%x != %x)", buf1[:3], buf2[:3])) 275 | } 276 | } 277 | } 278 | 279 | openStreamAndRW := func(c *ps.Conn) { 280 | log("openStreamAndRW %p, %d nMsg", c, nMsg) 281 | 282 | s, err := c.NewStream() 283 | if err != nil { 284 | t.Error(fmt.Errorf("Failed to create NewStream: %s", err)) 285 | return 286 | } 287 | 288 | bufs := make(chan []byte, nMsg) 289 | go func() { 290 | writeStream(s, bufs) 291 | close(bufs) 292 | }() 293 | 294 | readStream(s, bufs) 295 | s.Close() 296 | } 297 | 298 | openConnAndRW := func(a, b *ps.Swarm) { 299 | log("openConnAndRW %p -> %p, %d nStream", a, b, nConn) 300 | 301 | ls := b.Listeners() 302 | l := ls[mrand.Intn(len(ls))] 303 | nl := l.NetListener() 304 | nla := nl.Addr() 305 | 306 | nc, err := net.Dial(nla.Network(), nla.String()) 307 | if err != nil { 308 | t.Fatal(fmt.Errorf("net.Dial(%s, %s): %s", nla.Network(), nla.String(), err)) 309 | return 310 | } 311 | 312 | c, err := a.AddConn(nc) 313 | if err != nil { 314 | t.Fatal(fmt.Errorf("a.AddConn(%s <--> %s): %s", nc.LocalAddr(), nc.RemoteAddr(), err)) 315 | return 316 | } 317 | 318 | var wg sync.WaitGroup 319 | for i := 0; i < nStream; i++ { 320 | wg.Add(1) 321 | go rateLimit(func() { 322 | defer wg.Done() 323 | openStreamAndRW(c) 324 | }) 325 | } 326 | wg.Wait() 327 | c.Close() 328 | } 329 | 330 | openConnsAndRW := func(a, b *ps.Swarm) { 331 | log("openConnsAndRW %p -> %p, %d conns", a, b, nConn) 332 | 333 | var wg sync.WaitGroup 334 | for i := 0; i < nConn; i++ { 335 | wg.Add(1) 336 | go rateLimit(func() { 337 | defer wg.Done() 338 | openConnAndRW(a, b) 339 | }) 340 | } 341 | wg.Wait() 342 | } 343 | 344 | connectSwarmsAndRW := func(swarms []*ps.Swarm) { 345 | log("connectSwarmsAndRW %d swarms", len(swarms)) 346 | 347 | var wg sync.WaitGroup 348 | for _, a := range swarms { 349 | for _, b := range swarms { 350 | wg.Add(1) 351 | a := a // race 352 | b := b // race 353 | go rateLimit(func() { 354 | defer wg.Done() 355 | openConnsAndRW(a, b) 356 | }) 357 | } 358 | } 359 | wg.Wait() 360 | } 361 | 362 | swarms := makeSwarms(t, tr, nSwarm, 3) // 3 listeners per swarm. 363 | connectSwarmsAndRW(swarms) 364 | for _, s := range swarms { 365 | s.Close() 366 | } 367 | 368 | } 369 | 370 | func SubtestStress1Swarm1Conn1Stream1Msg(t *testing.T, tr smux.Transport) { 371 | SubtestStressNSwarmNConnNStreamNMsg(t, tr, 1, 1, 1, 1) 372 | } 373 | 374 | func SubtestStress1Swarm1Conn1Stream100Msg(t *testing.T, tr smux.Transport) { 375 | SubtestStressNSwarmNConnNStreamNMsg(t, tr, 1, 1, 1, 100) 376 | } 377 | 378 | func SubtestStress1Swarm1Conn100Stream100Msg(t *testing.T, tr smux.Transport) { 379 | SubtestStressNSwarmNConnNStreamNMsg(t, tr, 1, 1, 100, 100) 380 | } 381 | 382 | func SubtestStress1Swarm10Conn50Stream50Msg(t *testing.T, tr smux.Transport) { 383 | SubtestStressNSwarmNConnNStreamNMsg(t, tr, 1, 10, 50, 50) 384 | } 385 | 386 | func SubtestStress5Swarm2Conn20Stream20Msg(t *testing.T, tr smux.Transport) { 387 | SubtestStressNSwarmNConnNStreamNMsg(t, tr, 5, 2, 20, 20) 388 | } 389 | 390 | func SubtestStress10Swarm2Conn100Stream100Msg(t *testing.T, tr smux.Transport) { 391 | SubtestStressNSwarmNConnNStreamNMsg(t, tr, 10, 2, 100, 100) 392 | } 393 | 394 | func SubtestAll(t *testing.T, tr smux.Transport) { 395 | 396 | tests := []TransportTest{ 397 | SubtestConstructSwarm, 398 | SubtestSimpleWrite, 399 | SubtestSimpleWrite100msgs, 400 | SubtestStress1Swarm1Conn1Stream1Msg, 401 | SubtestStress1Swarm1Conn1Stream100Msg, 402 | SubtestStress1Swarm1Conn100Stream100Msg, 403 | SubtestStress1Swarm10Conn50Stream50Msg, 404 | SubtestStress5Swarm2Conn20Stream20Msg, 405 | // SubtestStress10Swarm2Conn100Stream100Msg, <-- this hoses the osx network stack... 406 | } 407 | 408 | for _, f := range tests { 409 | if testing.Verbose() { 410 | fmt.Fprintf(os.Stderr, "==== RUN %s\n", GetFunctionName(f)) 411 | } 412 | f(t, tr) 413 | } 414 | } 415 | 416 | type TransportTest func(t *testing.T, tr smux.Transport) 417 | 418 | func TestNoOp(t *testing.T) {} 419 | 420 | func GetFunctionName(i interface{}) string { 421 | return runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name() 422 | } 423 | -------------------------------------------------------------------------------- /muxtest/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "author": "whyrusleeping", 3 | "bugs": { 4 | "url": "https://github.com/jbenet/go-peerstream" 5 | }, 6 | "gx": { 7 | "dvcsimport": "github.com/jbenet/go-peerstream/muxtest" 8 | }, 9 | "gxDependencies": [ 10 | { 11 | "author": "whyrusleeping", 12 | "hash": "QmZJM54H2j26QwmWAYaW24L8Hwo3ojrQmiGj3gBF9Quj8d", 13 | "name": "go-smux-muxado", 14 | "version": "1.0.0" 15 | }, 16 | { 17 | "author": "whyrusleeping", 18 | "hash": "QmYaeRqthWTco7oQF4dztuqA94P8JF36gVjd2z2eEqKfrh", 19 | "name": "go-smux-yamux", 20 | "version": "1.1.0" 21 | }, 22 | { 23 | "author": "whyrusleeping", 24 | "hash": "QmVcmcQE9eX4HQ8QwhVXpoHt3ennG7d299NDYFq9D1Uqa1", 25 | "name": "go-smux-multistream", 26 | "version": "1.0.0" 27 | }, 28 | { 29 | "author": "whyrusleeping", 30 | "hash": "Qmao31fmDJxp9gY7YNqkN1i3kGaknL6XSzKtdC1VCU7Qj8", 31 | "name": "go-smux-multiplex", 32 | "version": "1.0.0" 33 | }, 34 | { 35 | "author": "whyrusleeping", 36 | "hash": "QmWMKNLGkYJTZ4Tq3DQ8E9j86QaKvGjKgFzvLzGYXvW69Z", 37 | "name": "go-smux-spdystream", 38 | "version": "1.0.0" 39 | } 40 | ], 41 | "gxVersion": "0.7.0", 42 | "language": "go", 43 | "license": "", 44 | "name": "muxtest", 45 | "version": "0.0.0" 46 | } 47 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "author": "whyrusleeping", 3 | "bugs": { 4 | "url": "https://github.com/jbenet/go-peerstream" 5 | }, 6 | "gx": { 7 | "dvcsimport": "github.com/jbenet/go-peerstream" 8 | }, 9 | "gxDependencies": [ 10 | { 11 | "author": "whyrusleeping", 12 | "hash": "Qmb1US8uyZeEpMyc56wVZy2cDFdQjNFojAUYVCoo9ieTqp", 13 | "name": "go-stream-muxer", 14 | "version": "1.0.0" 15 | }, 16 | { 17 | "author": "whyrusleeping", 18 | "hash": "QmWHgLqrghM9zw77nF6gdvT9ExQ2RB9pLxkd8sDHZf1rWb", 19 | "name": "go-temp-err-catcher", 20 | "version": "0.0.0" 21 | }, 22 | { 23 | "author": "whyrusleeping", 24 | "hash": "QmZNkThpqfVXs9GNbexPrfBbXSLNYeKrE7jwFM2oqHbyqN", 25 | "name": "go-libp2p-protocol", 26 | "version": "1.0.0" 27 | } 28 | ], 29 | "gxVersion": "0.7.0", 30 | "language": "go", 31 | "license": "", 32 | "name": "go-peerstream", 33 | "version": "1.3.0" 34 | } 35 | 36 | -------------------------------------------------------------------------------- /stream.go: -------------------------------------------------------------------------------- 1 | package peerstream 2 | 3 | import ( 4 | "fmt" 5 | 6 | protocol "gx/ipfs/QmZNkThpqfVXs9GNbexPrfBbXSLNYeKrE7jwFM2oqHbyqN/go-libp2p-protocol" 7 | smux "gx/ipfs/Qmb1US8uyZeEpMyc56wVZy2cDFdQjNFojAUYVCoo9ieTqp/go-stream-muxer" 8 | ) 9 | 10 | // StreamHandler is a function which receives a Stream. It 11 | // allows clients to set a function to receive newly created 12 | // streams, and decide whether to continue adding them. 13 | // It works sort of like a http.HandleFunc. 14 | // Note: the StreamHandler is called sequentially, so spawn 15 | // goroutines or pass the Stream. See EchoHandler. 16 | type StreamHandler func(s *Stream) 17 | 18 | // Stream is an io.{Read,Write,Close}r to a remote counterpart. 19 | // It wraps a spdystream.Stream, and links it to a Conn and groups 20 | type Stream struct { 21 | smuxStream smux.Stream 22 | 23 | conn *Conn 24 | groups groupSet 25 | protocol protocol.ID 26 | } 27 | 28 | func newStream(ss smux.Stream, c *Conn) *Stream { 29 | s := &Stream{ 30 | conn: c, 31 | smuxStream: ss, 32 | groups: groupSet{m: make(map[Group]struct{})}, 33 | } 34 | s.groups.AddSet(&c.groups) // inherit groups 35 | return s 36 | } 37 | 38 | // String returns a string representation of the Stream 39 | func (s *Stream) String() string { 40 | f := " %s>" 41 | return fmt.Sprintf(f, s.conn.NetConn().LocalAddr(), s.conn.NetConn().RemoteAddr()) 42 | } 43 | 44 | // SPDYStream returns the underlying *spdystream.Stream 45 | func (s *Stream) Stream() smux.Stream { 46 | return s.smuxStream 47 | } 48 | 49 | // Conn returns the Conn associated with this Stream 50 | func (s *Stream) Conn() *Conn { 51 | return s.conn 52 | } 53 | 54 | // Swarm returns the Swarm asociated with this Stream 55 | func (s *Stream) Swarm() *Swarm { 56 | return s.conn.swarm 57 | } 58 | 59 | // Groups returns the Groups this Stream belongs to 60 | func (s *Stream) Groups() []Group { 61 | return s.groups.Groups() 62 | } 63 | 64 | // InGroup returns whether this stream belongs to a Group 65 | func (s *Stream) InGroup(g Group) bool { 66 | return s.groups.Has(g) 67 | } 68 | 69 | // AddGroup assigns given Group to Stream 70 | func (s *Stream) AddGroup(g Group) { 71 | s.groups.Add(g) 72 | } 73 | 74 | func (s *Stream) Read(p []byte) (n int, err error) { 75 | return s.smuxStream.Read(p) 76 | } 77 | 78 | func (s *Stream) Write(p []byte) (n int, err error) { 79 | return s.smuxStream.Write(p) 80 | } 81 | 82 | func (s *Stream) Close() error { 83 | return s.conn.swarm.removeStream(s) 84 | } 85 | 86 | func (s *Stream) Protocol() protocol.ID { 87 | return s.protocol 88 | } 89 | 90 | func (s *Stream) SetProtocol(p protocol.ID) { 91 | s.protocol = p 92 | } 93 | 94 | // StreamsWithGroup narrows down a set of streams to those in given group. 95 | func StreamsWithGroup(g Group, streams []*Stream) []*Stream { 96 | var out []*Stream 97 | for _, s := range streams { 98 | if s.InGroup(g) { 99 | out = append(out, s) 100 | } 101 | } 102 | return out 103 | } 104 | -------------------------------------------------------------------------------- /swarm.go: -------------------------------------------------------------------------------- 1 | package peerstream 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "net" 7 | "sync" 8 | "time" 9 | 10 | smux "gx/ipfs/Qmb1US8uyZeEpMyc56wVZy2cDFdQjNFojAUYVCoo9ieTqp/go-stream-muxer" 11 | ) 12 | 13 | // fd is a (file) descriptor, unix style 14 | type fd uint32 15 | 16 | // GarbageCollectTimeout governs the periodic connection closer. 17 | var GarbageCollectTimeout = 5 * time.Second 18 | 19 | type Swarm struct { 20 | // the transport we'll use. 21 | transport smux.Transport 22 | 23 | // active streams. 24 | streams map[*Stream]struct{} 25 | streamLock sync.RWMutex 26 | 27 | // active connections. generate new Streams 28 | conns map[*Conn]struct{} 29 | connLock sync.RWMutex 30 | 31 | // active listeners. generate new Listeners 32 | listeners map[*Listener]struct{} 33 | listenerLock sync.RWMutex 34 | 35 | // these handlers should be accessed with their getter/setter 36 | // as this pointer may be changed at any time. 37 | handlerLock sync.RWMutex // protects the functions below 38 | connHandler ConnHandler // receives Conns intiated remotely 39 | streamHandler StreamHandler // receives Streams initiated remotely 40 | selectConn SelectConn // default SelectConn function 41 | 42 | // notification listeners 43 | notifiees map[Notifiee]struct{} 44 | notifieeLock sync.RWMutex 45 | 46 | closed chan struct{} 47 | } 48 | 49 | func NewSwarm(t smux.Transport) *Swarm { 50 | s := &Swarm{ 51 | transport: t, 52 | streams: make(map[*Stream]struct{}), 53 | conns: make(map[*Conn]struct{}), 54 | listeners: make(map[*Listener]struct{}), 55 | notifiees: make(map[Notifiee]struct{}), 56 | selectConn: SelectRandomConn, 57 | streamHandler: NoOpStreamHandler, 58 | connHandler: NoOpConnHandler, 59 | closed: make(chan struct{}), 60 | } 61 | go s.connGarbageCollect() 62 | return s 63 | } 64 | 65 | // String returns a string with various internal stats 66 | func (s *Swarm) String() string { 67 | s.listenerLock.Lock() 68 | ls := len(s.listeners) 69 | s.listenerLock.Unlock() 70 | 71 | s.connLock.Lock() 72 | cs := len(s.conns) 73 | s.connLock.Unlock() 74 | 75 | s.streamLock.Lock() 76 | ss := len(s.streams) 77 | s.streamLock.Unlock() 78 | 79 | str := "" 80 | return fmt.Sprintf(str, ls, cs, ss) 81 | } 82 | 83 | // Dump returns a string with all the internal state 84 | func (s *Swarm) Dump() string { 85 | str := s.String() + "\n" 86 | 87 | s.listenerLock.Lock() 88 | for l, _ := range s.listeners { 89 | str += fmt.Sprintf("\t%s %v\n", l, l.Groups()) 90 | } 91 | s.listenerLock.Unlock() 92 | 93 | s.connLock.Lock() 94 | for c, _ := range s.conns { 95 | str += fmt.Sprintf("\t%s %v\n", c, c.Groups()) 96 | } 97 | s.connLock.Unlock() 98 | 99 | s.streamLock.Lock() 100 | for ss, _ := range s.streams { 101 | str += fmt.Sprintf("\t%s %v\n", ss, ss.Groups()) 102 | } 103 | s.streamLock.Unlock() 104 | 105 | return str 106 | } 107 | 108 | // SetStreamHandler assigns the stream handler in the swarm. 109 | // The handler assumes responsibility for closing the stream. 110 | // This need not happen at the end of the handler, leaving the 111 | // stream open (to be used and closed later) is fine. 112 | // It is also fine to keep a pointer to the Stream. 113 | // This is a threadsafe (atomic) operation 114 | func (s *Swarm) SetStreamHandler(sh StreamHandler) { 115 | s.handlerLock.Lock() 116 | defer s.handlerLock.Unlock() 117 | s.streamHandler = sh 118 | } 119 | 120 | // StreamHandler returns the Swarm's current StreamHandler. 121 | // This is a threadsafe (atomic) operation 122 | func (s *Swarm) StreamHandler() StreamHandler { 123 | s.handlerLock.RLock() 124 | defer s.handlerLock.RUnlock() 125 | if s.streamHandler == nil { 126 | return NoOpStreamHandler 127 | } 128 | return s.streamHandler 129 | } 130 | 131 | // SetConnHandler assigns the conn handler in the swarm. 132 | // Unlike the StreamHandler, the ConnHandler has less respon- 133 | // ibility for the Connection. The Swarm is still its client. 134 | // This handler is only a notification. 135 | // This is a threadsafe (atomic) operation 136 | func (s *Swarm) SetConnHandler(ch ConnHandler) { 137 | s.handlerLock.Lock() 138 | defer s.handlerLock.Unlock() 139 | s.connHandler = ch 140 | } 141 | 142 | // ConnHandler returns the Swarm's current ConnHandler. 143 | // This is a threadsafe (atomic) operation 144 | func (s *Swarm) ConnHandler() ConnHandler { 145 | s.handlerLock.RLock() 146 | defer s.handlerLock.RUnlock() 147 | if s.connHandler == nil { 148 | return NoOpConnHandler 149 | } 150 | return s.connHandler 151 | } 152 | 153 | // SetConnSelect assigns the connection selector in the swarm. 154 | // If cs is nil, will use SelectRandomConn 155 | // This is a threadsafe (atomic) operation 156 | func (s *Swarm) SetSelectConn(cs SelectConn) { 157 | s.handlerLock.Lock() 158 | defer s.handlerLock.Unlock() 159 | s.selectConn = cs 160 | } 161 | 162 | // ConnSelect returns the Swarm's current connection selector. 163 | // ConnSelect is used in order to select the best of a set of 164 | // possible connections. The default chooses one at random. 165 | // This is a threadsafe (atomic) operation 166 | func (s *Swarm) SelectConn() SelectConn { 167 | s.handlerLock.RLock() 168 | defer s.handlerLock.RUnlock() 169 | if s.selectConn == nil { 170 | return SelectRandomConn 171 | } 172 | return s.selectConn 173 | } 174 | 175 | // Conns returns all the connections associated with this Swarm. 176 | func (s *Swarm) Conns() []*Conn { 177 | s.connLock.RLock() 178 | conns := make([]*Conn, 0, len(s.conns)) 179 | for c := range s.conns { 180 | conns = append(conns, c) 181 | } 182 | s.connLock.RUnlock() 183 | 184 | open := make([]*Conn, 0, len(conns)) 185 | for _, c := range conns { 186 | // TODO: unmuxed connections won't be garbage collected for now. 187 | // This isnt a common usecase and is only here for a few test applications 188 | // in the future, we will fix this 189 | if c.smuxConn != nil && c.smuxConn.IsClosed() { 190 | c.GoClose() 191 | } else { 192 | open = append(open, c) 193 | } 194 | } 195 | return open 196 | } 197 | 198 | // Listeners returns all the listeners associated with this Swarm. 199 | func (s *Swarm) Listeners() []*Listener { 200 | s.listenerLock.RLock() 201 | out := make([]*Listener, 0, len(s.listeners)) 202 | for c := range s.listeners { 203 | out = append(out, c) 204 | } 205 | s.listenerLock.RUnlock() 206 | return out 207 | } 208 | 209 | // Streams returns all the streams associated with this Swarm. 210 | func (s *Swarm) Streams() []*Stream { 211 | s.streamLock.RLock() 212 | out := make([]*Stream, 0, len(s.streams)) 213 | for c := range s.streams { 214 | out = append(out, c) 215 | } 216 | s.streamLock.RUnlock() 217 | return out 218 | } 219 | 220 | // AddListener adds net.Listener to the Swarm, and immediately begins 221 | // accepting incoming connections. 222 | func (s *Swarm) AddListener(l net.Listener) (*Listener, error) { 223 | return s.addListener(l) 224 | } 225 | 226 | // AddListenerWithRateLimit adds Listener to the Swarm, and immediately 227 | // begins accepting incoming connections. The rate of connection acceptance 228 | // depends on the RateLimit option 229 | // func (s *Swarm) AddListenerWithRateLimit(net.Listner, RateLimit) // TODO 230 | 231 | // AddConn gives the Swarm ownership of net.Conn. The Swarm will open a 232 | // SPDY session and begin listening for Streams. 233 | // Returns the resulting Swarm-associated peerstream.Conn. 234 | // Idempotent: if the Connection has already been added, this is a no-op. 235 | func (s *Swarm) AddConn(netConn net.Conn) (*Conn, error) { 236 | return s.addConn(netConn, false) 237 | } 238 | 239 | // NewStream opens a new Stream on the best available connection, 240 | // as selected by current swarm.SelectConn. 241 | func (s *Swarm) NewStream() (*Stream, error) { 242 | return s.NewStreamSelectConn(s.SelectConn()) 243 | } 244 | 245 | func (s *Swarm) newStreamSelectConn(selConn SelectConn, conns []*Conn) (*Stream, error) { 246 | if selConn == nil { 247 | return nil, errors.New("nil SelectConn") 248 | } 249 | 250 | best := selConn(conns) 251 | if best == nil || !ConnInConns(best, conns) { 252 | return nil, ErrInvalidConnSelected 253 | } 254 | return s.NewStreamWithConn(best) 255 | } 256 | 257 | // NewStreamWithSelectConn opens a new Stream on a connection selected 258 | // by selConn. 259 | func (s *Swarm) NewStreamSelectConn(selConn SelectConn) (*Stream, error) { 260 | if selConn == nil { 261 | return nil, errors.New("nil SelectConn") 262 | } 263 | 264 | conns := s.Conns() 265 | if len(conns) == 0 { 266 | return nil, ErrNoConnections 267 | } 268 | return s.newStreamSelectConn(selConn, conns) 269 | } 270 | 271 | // NewStreamWithGroup opens a new Stream on an available connection in 272 | // the given group. Uses the current swarm.SelectConn to pick between 273 | // multiple connections. 274 | func (s *Swarm) NewStreamWithGroup(group Group) (*Stream, error) { 275 | conns := s.ConnsWithGroup(group) 276 | return s.newStreamSelectConn(s.SelectConn(), conns) 277 | } 278 | 279 | // NewStreamWithNetConn opens a new Stream on given net.Conn. 280 | // Calls s.AddConn(netConn). 281 | func (s *Swarm) NewStreamWithNetConn(netConn net.Conn) (*Stream, error) { 282 | c, err := s.AddConn(netConn) 283 | if err != nil { 284 | return nil, err 285 | } 286 | return s.NewStreamWithConn(c) 287 | } 288 | 289 | // NewStreamWithConnection opens a new Stream on given connection. 290 | func (s *Swarm) NewStreamWithConn(conn *Conn) (*Stream, error) { 291 | if conn == nil { 292 | return nil, errors.New("nil Conn") 293 | } 294 | if conn.Swarm() != s { 295 | return nil, errors.New("connection not associated with swarm") 296 | } 297 | 298 | if conn.smuxConn.IsClosed() { 299 | go conn.Close() 300 | return nil, errors.New("conn is closed") 301 | } 302 | 303 | s.connLock.RLock() 304 | if _, found := s.conns[conn]; !found { 305 | s.connLock.RUnlock() 306 | return nil, errors.New("connection not associated with swarm") 307 | } 308 | s.connLock.RUnlock() 309 | return s.createStream(conn) 310 | } 311 | 312 | // AddConnToGroup assigns given Group to conn 313 | func (s *Swarm) AddConnToGroup(conn *Conn, g Group) { 314 | conn.groups.Add(g) 315 | } 316 | 317 | // ConnsWithGroup returns all the connections with a given Group 318 | func (s *Swarm) ConnsWithGroup(g Group) []*Conn { 319 | return ConnsWithGroup(g, s.Conns()) 320 | } 321 | 322 | // StreamsWithGroup returns all the streams with a given Group 323 | func (s *Swarm) StreamsWithGroup(g Group) []*Stream { 324 | return StreamsWithGroup(g, s.Streams()) 325 | } 326 | 327 | // Close shuts down the Swarm, and it's listeners. 328 | func (s *Swarm) Close() error { 329 | defer close(s.closed) 330 | 331 | // automatically close everything new we get. 332 | s.SetConnHandler(func(c *Conn) { c.Close() }) 333 | s.SetStreamHandler(func(s *Stream) { s.Close() }) 334 | 335 | var wgl sync.WaitGroup 336 | for _, l := range s.Listeners() { 337 | wgl.Add(1) 338 | go func(list *Listener) { 339 | list.Close() 340 | wgl.Done() 341 | }(l) 342 | } 343 | wgl.Wait() 344 | 345 | var wgc sync.WaitGroup 346 | for _, c := range s.Conns() { 347 | wgc.Add(1) 348 | go func(conn *Conn) { 349 | conn.Close() 350 | wgc.Done() 351 | }(c) 352 | } 353 | wgc.Wait() 354 | return nil 355 | } 356 | 357 | // connGarbageCollect periodically sweeps conns to make sure 358 | // they're still alive. if any are closed, remvoes them. 359 | func (s *Swarm) connGarbageCollect() { 360 | for { 361 | select { 362 | case <-s.closed: 363 | return 364 | case <-time.After(GarbageCollectTimeout): 365 | } 366 | 367 | for _, c := range s.Conns() { 368 | if c.smuxConn != nil && c.smuxConn.IsClosed() { 369 | go c.Close() 370 | } 371 | } 372 | } 373 | } 374 | 375 | // Notify signs up Notifiee to receive signals when events happen 376 | func (s *Swarm) Notify(n Notifiee) { 377 | s.notifieeLock.Lock() 378 | s.notifiees[n] = struct{}{} 379 | s.notifieeLock.Unlock() 380 | } 381 | 382 | // StopNotify unregisters Notifiee fromr receiving signals 383 | func (s *Swarm) StopNotify(n Notifiee) { 384 | s.notifieeLock.Lock() 385 | delete(s.notifiees, n) 386 | s.notifieeLock.Unlock() 387 | } 388 | 389 | // notifyAll runs the notification function on all Notifiees 390 | func (s *Swarm) notifyAll(notification func(n Notifiee)) { 391 | s.notifieeLock.RLock() 392 | for n := range s.notifiees { 393 | // make sure we dont block 394 | // and they dont block each other. 395 | go notification(n) 396 | } 397 | s.notifieeLock.RUnlock() 398 | } 399 | 400 | // Notifiee is an interface for an object wishing to receive 401 | // notifications from a Swarm 402 | type Notifiee interface { 403 | Connected(*Conn) // called when a connection opened 404 | Disconnected(*Conn) // called when a connection closed 405 | OpenedStream(*Stream) // called when a stream opened 406 | ClosedStream(*Stream) // called when a stream closed 407 | } 408 | --------------------------------------------------------------------------------