├── .travis.yml ├── Makefile ├── renovate.json5 ├── bin ├── tuplespaced │ └── main.go └── tuplespace │ └── main.go ├── README.md ├── service ├── service_test.go ├── service.go ├── client.go └── cluster │ └── client.go ├── tuplespace_test.go └── tuplespace.go /.travis.yml: -------------------------------------------------------------------------------- 1 | language: go 2 | install: 3 | - go get -t -d -v ./... 4 | - go build -v ./... 5 | go: 1.3 6 | script: go test -bench=. -v ./... 7 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | service/client_rapid.go: service/service.go 2 | go run ./bin/tuplespaced/main.go --go > service/client_rapid.go~ && mv service/client_rapid.go~ service/client_rapid.go 3 | -------------------------------------------------------------------------------- /renovate.json5: -------------------------------------------------------------------------------- 1 | { 2 | $schema: "https://docs.renovatebot.com/renovate-schema.json", 3 | extends: [ 4 | "config:recommended", 5 | ":semanticCommits", 6 | ":semanticCommitTypeAll(chore)", 7 | ":semanticCommitScope(deps)", 8 | "group:allNonMajor", 9 | "schedule:earlyMondays", // Run once a week. 10 | ], 11 | } 12 | -------------------------------------------------------------------------------- /bin/tuplespaced/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "io" 5 | "net" 6 | "net/rpc" 7 | 8 | "github.com/hashicorp/yamux" 9 | 10 | "github.com/alecthomas/go-logging" 11 | "github.com/alecthomas/kingpin" 12 | "github.com/alecthomas/util" 13 | 14 | "github.com/alecthomas/tuplespace/service" 15 | ) 16 | 17 | var ( 18 | log = logging.MustGetLogger("tuplespaced") 19 | 20 | bindFlag = kingpin.Flag("bind", "Bind address for service.").Default("127.0.0.1:2619").TCP() 21 | ) 22 | 23 | func main() { 24 | util.Bootstrap(kingpin.CommandLine, util.AllModules, nil) 25 | s := service.New() 26 | err := rpc.Register(s) 27 | kingpin.FatalIfError(err, "") 28 | bind, err := net.Listen("tcp", (*bindFlag).String()) 29 | kingpin.FatalIfError(err, "") 30 | log.Infof("Started TupleSpace on %s", *bindFlag) 31 | for { 32 | conn, err := bind.Accept() 33 | kingpin.FatalIfError(err, "") 34 | log.Infof("New connection %s -> %s", conn.RemoteAddr(), conn.LocalAddr()) 35 | go func(conn net.Conn) { 36 | session, err := yamux.Server(conn, nil) 37 | if err != nil { 38 | log.Errorf("Failed to start multiplexer: %s", err) 39 | return 40 | } 41 | defer session.Close() 42 | for { 43 | stream, err := session.Accept() 44 | if err == io.EOF { 45 | log.Infof("Connection %s -> %s terminated", conn.RemoteAddr(), conn.LocalAddr()) 46 | return 47 | } 48 | log.Infof("New session on %s -> %s", conn.RemoteAddr(), conn.LocalAddr()) 49 | if err != nil { 50 | log.Errorf("Failed to accept new stream on multiplexed connection: %s", err) 51 | return 52 | } 53 | go rpc.ServeConn(stream) 54 | } 55 | }(conn) 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # A tuple space server for Go. [![Build Status](https://travis-ci.org/alecthomas/tuplespace.png)](https://travis-ci.org/alecthomas/tuplespace) 2 | 3 | This is an implementation of a [tuple space](http://www.mcs.anl.gov/~itf/dbpp/text/node44.html) as a [Go RPC service](http://golang.org/pkg/net/rpc/). 4 | 5 | - "Tuples" are maps. 6 | - Tuples are inherently unreliable and may disappear at any time, including before their timeout. 7 | - Tuples can be matched using arbitrary expressions. eg. `age > 36 && id % 2 == 0`. 8 | - Senders can wait (with optional timeout) for a tuple to be processed by a receiver. 9 | - Subsequent reads may return the same tuple. 10 | - Take operations are performed inside a [reservation](#reservations). 11 | 12 | ## Example 13 | 14 | In two separate terminals, run the following commands at the same time. 15 | 16 | Send a tuple and wait for it to be processed: 17 | 18 | $ curl --data-binary '{"tuples": [{"age": 20}], "ack": true}' http://localhost:2619/tuplespaces/users/tuples 19 | {"s":201} 20 | 21 | Read a tuple (also marking it as processed): 22 | 23 | $ curl http://localhost:2619/tuplespaces/users/tuples 24 | {"s":200,"d":[{"age":20}]} 25 | 26 | ## Operations 27 | 28 | The following operations are supported: 29 | 30 | - `Read(match, timeout) -> Tuple` - Read a single tuple from the tuple space. 31 | - `ReadAll(match, timeout) -> []Tuple` - Read all matching tuples from the tuple space. 32 | - `Send(tuple, expires)` - Send a tuple. 33 | - `SendMany(tuples, expires)` - Send many tuples at once. 34 | - `SendWithAcknowledgement(tuple, expires)` - Send a tuple and wait for it to be processed. 35 | - `Take(match, timeout, reservationTimeout) -> Reservation` 36 | 37 | `TakeAll()` is not supported. To support such an operation in a distributed environment would require server-server coordination, which is explicitly outside the scope of this implementation. 38 | 39 | ## Features 40 | 41 | ### Reservations 42 | 43 | A reservation (`Take()`) provides timed, exclusive access to a tuple. If neither a `Complete()` nor a `Cancel()` are called on the reservation before the reservation times out, it is returned to the tuple space. 44 | 45 | ### Scalability 46 | 47 | Due to the constraints around the provided operations, the tuple space service can be scaled by simply adding nodes. Clients discover new server nodes by periodically reading all tuples from a management space. All clients connect to all servers simultaneously. 48 | 49 | - `Read()` should be issued to multiple servers at once. One result is then used, and the remainder discarded. 50 | - `Take()` should be issued to multiple servers at once. One reservation is then used and the rest are cancelled. 51 | - `Send()` and `SendWithAcknowledgement()` should be sent round-robin to all servers. 52 | 53 | ### Installation 54 | 55 | Install the server and client with: 56 | 57 | ```bash 58 | $ go get github.com/alecthomas/tuplespace/bin/tuplespaced 59 | $ go get github.com/alecthomas/tuplespace/bin/tuplespace 60 | ``` 61 | -------------------------------------------------------------------------------- /service/service_test.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "net" 7 | "net/rpc" 8 | "runtime" 9 | "sync" 10 | "testing" 11 | "time" 12 | 13 | "github.com/alecthomas/tuplespace" 14 | "github.com/stretchrcom/testify/assert" 15 | ) 16 | 17 | var ( 18 | serverAddr string 19 | server sync.Once 20 | ) 21 | 22 | func listenTCP() (net.Listener, string) { 23 | l, e := net.Listen("tcp", "127.0.0.1:0") // any available address 24 | if e != nil { 25 | log.Fatalf("net.Listen tcp :0: %v", e) 26 | } 27 | return l, l.Addr().String() 28 | } 29 | 30 | func startServer() { 31 | runtime.GOMAXPROCS(runtime.NumCPU() * 2) 32 | rpc.Register(New()) 33 | var l net.Listener 34 | l, serverAddr = listenTCP() 35 | log.Println("Test RPC server listening on", serverAddr) 36 | go rpc.Accept(l) 37 | } 38 | 39 | func dial(space string) *ClientSpace { 40 | server.Do(startServer) 41 | conn, err := net.Dial("tcp", serverAddr) 42 | if err != nil { 43 | log.Fatalln(err) 44 | } 45 | return HijackConn(serverAddr, space, conn) 46 | } 47 | 48 | func TestServerSend(t *testing.T) { 49 | c := dial("TestServerSend") 50 | bob := tuplespace.Tuple{"name": "bob", "age": 60} 51 | err := c.Send(bob, 0) 52 | assert.NoError(t, err) 53 | err = c.Send(tuplespace.Tuple{"name": "fred", "age": 30}, 0) 54 | assert.NoError(t, err) 55 | 56 | status, err := c.Status() 57 | assert.NoError(t, err) 58 | assert.Equal(t, 2, status.Tuples.Seen) 59 | 60 | tuple, err := c.Read(`age > 50`, 0) 61 | assert.NoError(t, err) 62 | assert.Equal(t, bob, tuple) 63 | } 64 | 65 | func BenchmarkServerSend(b *testing.B) { 66 | wg := sync.WaitGroup{} 67 | n := runtime.NumCPU() 68 | for i := 0; i < n; i++ { 69 | wg.Add(1) 70 | go func() { 71 | defer wg.Done() 72 | c := dial("BenchmarkServerSend") 73 | defer c.Close() 74 | bob := tuplespace.Tuple{"name": "bob", "age": 60} 75 | for i := 0; i < b.N/n; i++ { 76 | c.Send(bob, time.Second*5) 77 | } 78 | 79 | }() 80 | } 81 | wg.Wait() 82 | } 83 | 84 | func benchmarkServerTakeN(n int, b *testing.B) { 85 | wg := sync.WaitGroup{} 86 | ncpu := runtime.NumCPU() 87 | for i := 0; i < ncpu; i++ { 88 | wg.Add(1) 89 | go func() { 90 | defer wg.Done() 91 | c := dial(fmt.Sprintf("BenchmarkServerTake%d", n)) 92 | defer c.Close() 93 | tuple := tuplespace.Tuple{"i": 0} 94 | for i := 0; i < b.N/n/ncpu; i++ { 95 | for j := 0; j < n; j++ { 96 | c.Send(tuple, 0) 97 | } 98 | for j := 0; j < n/ncpu; j++ { 99 | r, err := c.Take("", 0, 0) 100 | if err != nil { 101 | panic(err) 102 | } 103 | r.Complete() 104 | } 105 | } 106 | }() 107 | } 108 | wg.Wait() 109 | } 110 | 111 | func BenchmarkServerTake1(b *testing.B) { 112 | benchmarkServerTakeN(1, b) 113 | } 114 | 115 | func BenchmarkServerTake10(b *testing.B) { 116 | benchmarkServerTakeN(10, b) 117 | } 118 | 119 | func BenchmarkServerTake100(b *testing.B) { 120 | benchmarkServerTakeN(100, b) 121 | } 122 | 123 | func BenchmarkServerTake1000(b *testing.B) { 124 | benchmarkServerTakeN(1000, b) 125 | } 126 | -------------------------------------------------------------------------------- /bin/tuplespace/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "strconv" 7 | 8 | "github.com/alecthomas/kingpin" 9 | 10 | "github.com/alecthomas/tuplespace" 11 | "github.com/alecthomas/tuplespace/service" 12 | ) 13 | 14 | var ( 15 | tuplespaceFlag = kingpin.Flag("tuplespace", "Address of TupleSpace service.").Default("127.0.0.1:2619").TCP() 16 | spaceFlag = kingpin.Flag("space", "Default space to act on.").Default("test").String() 17 | timeoutFlag = kingpin.Flag("timeout", "Tuple timeout.").Default("0s").Duration() 18 | 19 | statusCommand = kingpin.Command("status", "Status of TupleSpace.") 20 | 21 | sendCommand = kingpin.Command("send", "Send a tuple.") 22 | sendCopiesflag = sendCommand.Flag("copies", "Number of copies of the tuple to send.").PlaceHolder("N").Int() 23 | sendtupleArg = sendCommand.Arg("tuple", "Tuple to send.").Required().StringMap() 24 | 25 | readCommand = kingpin.Command("read", "Read a tuple.") 26 | readMatchArg = readCommand.Arg("expr", "Expression to match with.").String() 27 | 28 | readallCommand = kingpin.Command("readall", "Read all matching tuples.") 29 | readallMatchArg = readallCommand.Arg("expr", "Expression to match with.").String() 30 | 31 | takeCommand = kingpin.Command("take", "Take a tuple.") 32 | takeReservationTimeoutFlag = takeCommand.Flag("reservation_timeout", "Reservation timeout.").Default("0s").Duration() 33 | takeMatchArg = takeCommand.Arg("expr", "Expression to match with.").String() 34 | ) 35 | 36 | func main() { 37 | command := kingpin.Parse() 38 | client, err := service.Dial((*tuplespaceFlag).String()) 39 | kingpin.FatalIfError(err, "") 40 | defer client.Close() 41 | space, err := client.Space(*spaceFlag) 42 | kingpin.FatalIfError(err, "") 43 | defer space.Close() 44 | 45 | switch command { 46 | case "status": 47 | status, err := space.Status() 48 | kingpin.FatalIfError(err, "") 49 | b, _ := json.Marshal(status) 50 | fmt.Printf("%s\n", b) 51 | 52 | case "send": 53 | tuple := tuplespace.Tuple{} 54 | for k, v := range *sendtupleArg { 55 | n, err := strconv.ParseFloat(v, 64) 56 | if err == nil { 57 | tuple[k] = n 58 | } else { 59 | tuple[k] = v 60 | } 61 | } 62 | if *sendCopiesflag > 0 { 63 | tuples := []tuplespace.Tuple{} 64 | for i := 0; i < *sendCopiesflag; i++ { 65 | tuples = append(tuples, tuple) 66 | } 67 | err = space.SendMany(tuples, *timeoutFlag) 68 | } else { 69 | err = space.Send(tuple, *timeoutFlag) 70 | } 71 | kingpin.FatalIfError(err, "") 72 | 73 | case "read": 74 | tuple, err := space.Read(*readMatchArg, *timeoutFlag) 75 | kingpin.FatalIfError(err, "") 76 | b, _ := json.Marshal(tuple) 77 | fmt.Printf("%s\n", b) 78 | 79 | case "readall": 80 | tuples, err := space.ReadAll(*readallMatchArg, *timeoutFlag) 81 | kingpin.FatalIfError(err, "") 82 | for _, tuple := range tuples { 83 | b, _ := json.Marshal(tuple) 84 | fmt.Printf("%s\n", b) 85 | } 86 | 87 | case "take": 88 | reservation, err := space.Take(*takeMatchArg, *timeoutFlag, *takeReservationTimeoutFlag) 89 | kingpin.FatalIfError(err, "") 90 | b, _ := json.Marshal(reservation.Tuple()) 91 | fmt.Printf("%s\n", b) 92 | err = reservation.Complete() 93 | kingpin.FatalIfError(err, "") 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /service/service.go: -------------------------------------------------------------------------------- 1 | // Package service is the implementation of the TupleSpace service and its client. 2 | package service 3 | 4 | import ( 5 | "fmt" 6 | "sync" 7 | "sync/atomic" 8 | "time" 9 | 10 | "gopkg.in/tomb.v2" 11 | 12 | "github.com/alecthomas/tuplespace" 13 | ) 14 | 15 | type TupleSpace struct { 16 | lock sync.Mutex 17 | tomb tomb.Tomb 18 | spaces map[string]*tuplespace.TupleSpace 19 | reservationCounter int64 20 | reservations map[int64]*tuplespace.Reservation 21 | } 22 | 23 | // New creates a new TupleSpace RPC service. 24 | func New() *TupleSpace { 25 | ts := &TupleSpace{ 26 | spaces: map[string]*tuplespace.TupleSpace{}, 27 | reservationCounter: time.Now().UnixNano(), 28 | reservations: map[int64]*tuplespace.Reservation{}, 29 | } 30 | ts.tomb.Go(ts.reaper) 31 | return ts 32 | } 33 | 34 | // Periodically clear out expired reservations. 35 | func (t *TupleSpace) reaper() error { 36 | for { 37 | select { 38 | case <-t.tomb.Dying(): 39 | return nil 40 | 41 | case <-time.After(time.Second * 15): 42 | t.lock.Lock() 43 | for id, reservation := range t.reservations { 44 | if reservation.Expired() { 45 | delete(t.reservations, id) 46 | } 47 | } 48 | t.lock.Unlock() 49 | } 50 | } 51 | } 52 | 53 | func (t *TupleSpace) close() error { 54 | t.tomb.Kill(nil) 55 | return t.tomb.Wait() 56 | } 57 | 58 | func (t *TupleSpace) get(space string) *tuplespace.TupleSpace { 59 | t.lock.Lock() 60 | defer t.lock.Unlock() 61 | s, ok := t.spaces[space] 62 | if !ok { 63 | s = tuplespace.New() 64 | t.spaces[space] = s 65 | } 66 | return s 67 | } 68 | 69 | type StatusRequest struct { 70 | Space string 71 | } 72 | 73 | func (t *TupleSpace) Status(req *StatusRequest, rep *tuplespace.Status) error { 74 | space := t.get(req.Space) 75 | status := space.Status() 76 | *rep = *status 77 | return nil 78 | } 79 | 80 | type SendRequest struct { 81 | Space string 82 | Tuples []tuplespace.Tuple 83 | Expires time.Duration 84 | Acknowledge bool 85 | } 86 | 87 | type SendResponse struct{} 88 | 89 | func (t *TupleSpace) Send(req *SendRequest, rep *SendResponse) error { 90 | space := t.get(req.Space) 91 | var err error 92 | if req.Acknowledge { 93 | if len(req.Tuples) != 1 { 94 | err = fmt.Errorf("expected exactly one tuple to ack") 95 | } 96 | err = space.SendWithAcknowledgement(req.Tuples[0], req.Expires) 97 | } else { 98 | err = space.SendMany(req.Tuples, req.Expires) 99 | } 100 | *rep = struct{}{} 101 | return err 102 | } 103 | 104 | type ReadRequest struct { 105 | Space string 106 | Match string 107 | Timeout time.Duration 108 | All bool 109 | } 110 | 111 | func (t *TupleSpace) Read(req *ReadRequest, rep *[]tuplespace.Tuple) error { 112 | space := t.get(req.Space) 113 | if req.All { 114 | tuples, err := space.ReadAll(req.Match, req.Timeout) 115 | *rep = tuples 116 | return err 117 | } 118 | tuple, err := space.Read(req.Match, req.Timeout) 119 | *rep = append(*rep, tuple) 120 | return err 121 | } 122 | 123 | type TakeRequest struct { 124 | Space string 125 | Match string 126 | Timeout time.Duration 127 | ReservationTimeout time.Duration 128 | } 129 | 130 | type TakeResponse struct { 131 | Reservation int64 132 | Tuple tuplespace.Tuple 133 | } 134 | 135 | func (t *TupleSpace) Take(req *TakeRequest, rep *TakeResponse) error { 136 | space := t.get(req.Space) 137 | res, err := space.Take(req.Match, req.Timeout, req.ReservationTimeout) 138 | if err != nil { 139 | return err 140 | } 141 | t.lock.Lock() 142 | defer t.lock.Unlock() 143 | id := atomic.AddInt64(&t.reservationCounter, 1) 144 | t.reservations[id] = res 145 | rep.Reservation = id 146 | rep.Tuple = res.Tuple() 147 | return nil 148 | } 149 | 150 | type EndTakeRequest struct { 151 | Reservation int64 152 | Cancel bool 153 | } 154 | 155 | type EndTakeResponse struct{} 156 | 157 | func (t *TupleSpace) EndTake(req *EndTakeRequest, rep *EndTakeResponse) error { 158 | t.lock.Lock() 159 | defer t.lock.Unlock() 160 | res, ok := t.reservations[req.Reservation] 161 | if !ok { 162 | return fmt.Errorf("unknown reservation") 163 | } 164 | delete(t.reservations, req.Reservation) 165 | if req.Cancel { 166 | return res.Cancel() 167 | } 168 | return res.Complete() 169 | } 170 | -------------------------------------------------------------------------------- /service/client.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "net" 5 | "net/rpc" 6 | "time" 7 | 8 | "github.com/hashicorp/yamux" 9 | 10 | "github.com/alecthomas/tuplespace" 11 | ) 12 | 13 | type Client struct { 14 | addr string 15 | session *yamux.Session 16 | } 17 | 18 | // Dial creats a new connection to a TupleSpace server. 19 | func Dial(addr string) (*Client, error) { 20 | conn, err := net.Dial("tcp", addr) 21 | if err != nil { 22 | return nil, err 23 | } 24 | session, err := yamux.Client(conn, nil) 25 | if err != nil { 26 | return nil, err 27 | } 28 | return &Client{addr: conn.RemoteAddr().String(), session: session}, nil 29 | } 30 | 31 | func (c *Client) RemoteAddr() string { 32 | return c.addr 33 | } 34 | 35 | func (c *Client) Close() error { 36 | return c.session.Close() 37 | } 38 | 39 | // Space creates a new multiplexed connection to the given space. This is a 40 | // low-overhead operation, but not without cost. 41 | func (c *Client) Space(space string) (*ClientSpace, error) { 42 | conn, err := c.session.Open() 43 | if err != nil { 44 | return nil, err 45 | } 46 | return HijackConn(c.addr, space, conn), nil 47 | } 48 | 49 | type ClientSpace struct { 50 | addr string 51 | space string 52 | rpc *rpc.Client 53 | } 54 | 55 | // HijackConn starts a new RPC session for a tuple space, on an existing net.Conn. 56 | func HijackConn(addr, space string, conn net.Conn) *ClientSpace { 57 | return &ClientSpace{addr: addr, rpc: rpc.NewClient(conn), space: space} 58 | } 59 | 60 | func (c *ClientSpace) RemoteAddr() string { 61 | return c.addr 62 | } 63 | 64 | func (c *ClientSpace) Name() string { 65 | return c.space 66 | } 67 | 68 | func (c *ClientSpace) Close() error { 69 | return c.rpc.Close() 70 | } 71 | 72 | func (c *ClientSpace) Status() (*tuplespace.Status, error) { 73 | req := &StatusRequest{ 74 | Space: c.space, 75 | } 76 | rep := &tuplespace.Status{} 77 | return rep, c.rpc.Call("TupleSpace.Status", req, rep) 78 | } 79 | 80 | func (c *ClientSpace) send(ack bool, tuples []tuplespace.Tuple, expires time.Duration) error { 81 | req := &SendRequest{ 82 | Space: c.space, 83 | Tuples: tuples, 84 | Expires: expires, 85 | Acknowledge: ack, 86 | } 87 | rep := &SendResponse{} 88 | return c.rpc.Call("TupleSpace.Send", req, rep) 89 | } 90 | 91 | func (c *ClientSpace) Send(tuple tuplespace.Tuple, expires time.Duration) error { 92 | return c.send(false, []tuplespace.Tuple{tuple}, expires) 93 | } 94 | 95 | func (c *ClientSpace) SendMany(tuples []tuplespace.Tuple, expires time.Duration) error { 96 | return c.send(false, tuples, expires) 97 | } 98 | 99 | func (c *ClientSpace) SendWithAcknowledgement(tuple tuplespace.Tuple, expires time.Duration) error { 100 | return c.send(true, []tuplespace.Tuple{tuple}, expires) 101 | } 102 | 103 | func (c *ClientSpace) read(all bool, match string, timeout time.Duration) ([]tuplespace.Tuple, error) { 104 | req := &ReadRequest{ 105 | Space: c.space, 106 | Match: match, 107 | Timeout: timeout, 108 | All: all, 109 | } 110 | rep := []tuplespace.Tuple{} 111 | err := c.rpc.Call("TupleSpace.Read", req, &rep) 112 | return rep, err 113 | } 114 | 115 | func (c *ClientSpace) Read(match string, timeout time.Duration) (tuplespace.Tuple, error) { 116 | tuples, err := c.read(false, match, timeout) 117 | if err != nil { 118 | return nil, err 119 | } 120 | return tuples[0], nil 121 | } 122 | 123 | func (c *ClientSpace) ReadAll(match string, timeout time.Duration) ([]tuplespace.Tuple, error) { 124 | return c.read(true, match, timeout) 125 | } 126 | 127 | func (c *ClientSpace) Take(match string, timeout time.Duration, reservationTimeout time.Duration) (*ClientReservation, error) { 128 | req := &TakeRequest{ 129 | Space: c.space, 130 | Match: match, 131 | Timeout: timeout, 132 | ReservationTimeout: reservationTimeout, 133 | } 134 | rep := &TakeResponse{} 135 | err := c.rpc.Call("TupleSpace.Take", req, rep) 136 | if err != nil { 137 | return nil, err 138 | } 139 | return &ClientReservation{ 140 | rpc: c.rpc, 141 | tuple: rep.Tuple, 142 | id: rep.Reservation, 143 | }, nil 144 | } 145 | 146 | type ClientReservation struct { 147 | rpc *rpc.Client 148 | tuple tuplespace.Tuple 149 | id int64 150 | } 151 | 152 | func (c *ClientReservation) Tuple() tuplespace.Tuple { 153 | return c.tuple 154 | } 155 | 156 | func (c *ClientReservation) Complete() error { 157 | return c.end(false) 158 | } 159 | 160 | func (c *ClientReservation) Cancel() error { 161 | return c.end(true) 162 | } 163 | 164 | func (c *ClientReservation) end(cancel bool) error { 165 | req := &EndTakeRequest{ 166 | Reservation: c.id, 167 | Cancel: cancel, 168 | } 169 | rep := &EndTakeResponse{} 170 | return c.rpc.Call("TupleSpace.EndTake", req, rep) 171 | } 172 | -------------------------------------------------------------------------------- /tuplespace_test.go: -------------------------------------------------------------------------------- 1 | package tuplespace 2 | 3 | import ( 4 | "sort" 5 | "sync" 6 | "testing" 7 | "time" 8 | 9 | "github.com/stretchrcom/testify/assert" 10 | ) 11 | 12 | type tupleEntriesSlice []*tupleEntry 13 | 14 | func (t tupleEntriesSlice) Len() int { return len(t) } 15 | func (t tupleEntriesSlice) Swap(i, j int) { t[i], t[j] = t[j], t[i] } 16 | func (t tupleEntriesSlice) Less(i, j int) bool { return t[i].Expires.Before(t[j].Expires) } 17 | 18 | func makeSampleTuples() (*tupleEntries, tupleEntriesSlice) { 19 | tuples := &tupleEntries{ 20 | entries: map[*tupleEntry]struct{}{}, 21 | } 22 | expected := tupleEntriesSlice{ 23 | &tupleEntry{Expires: time.Now().Add(time.Second)}, 24 | &tupleEntry{Expires: time.Now().Add(time.Second * 2)}, 25 | &tupleEntry{Expires: time.Now().Add(time.Second * 3)}, 26 | &tupleEntry{Expires: time.Now().Add(time.Second * 4)}, 27 | } 28 | tuples.Add(expected[0]) 29 | tuples.Add(expected[1]) 30 | tuples.Add(expected[2]) 31 | tuples.Add(expected[3]) 32 | sort.Sort(expected) 33 | return tuples, expected 34 | } 35 | 36 | func TestTuplesAdd(t *testing.T) { 37 | tuples, expected := makeSampleTuples() 38 | actual := (tupleEntriesSlice)(tuples.Copy()) 39 | sort.Sort(actual) 40 | assert.Equal(t, expected, actual) 41 | } 42 | 43 | func TestTuplesRemove(t *testing.T) { 44 | tuples, expected := makeSampleTuples() 45 | i := 0 46 | for entry := range tuples.entries { 47 | if i == 1 { 48 | delete(tuples.entries, entry) 49 | break 50 | } 51 | i++ 52 | } 53 | count := tuples.Size() 54 | assert.Equal(t, len(expected)-1, count) 55 | assert.Equal(t, 3, len(tuples.entries)) 56 | } 57 | 58 | func TestTupleSpaceTake(t *testing.T) { 59 | ts := New() 60 | ts.Send(Tuple{"a": 10}, 0) 61 | assert.Equal(t, 1, len(ts.entries.entries)) 62 | reservation, err := ts.Take("a == 10", 0, 0) 63 | assert.NoError(t, err) 64 | tuple := reservation.Tuple() 65 | reservation.Complete() 66 | assert.NotNil(t, tuple) 67 | assert.Equal(t, 10, tuple["a"]) 68 | } 69 | 70 | func TestTupleSpaceTakeWaitTimeout(t *testing.T) { 71 | errors := make(chan error) 72 | ts := New() 73 | ts.Send(Tuple{"a": 9}, 0) 74 | 75 | go func() { 76 | reservation, err := ts.Take("a >= 10", time.Millisecond*10, 0) 77 | errors <- err 78 | if reservation != nil { 79 | reservation.Complete() 80 | } 81 | }() 82 | 83 | ts.Send(Tuple{"a": 9}, 0) 84 | err := <-errors 85 | assert.Equal(t, ErrTimeout, err) 86 | } 87 | 88 | func TestTupleSpaceCompleteAfterReservationTimeout(t *testing.T) { 89 | ts := New() 90 | ts.Send(Tuple{"a": 1}, 0) 91 | r, err := ts.Take("", 0, time.Millisecond*100) 92 | assert.NoError(t, err) 93 | time.Sleep(time.Millisecond * 200) 94 | assert.Equal(t, ErrReservationTimeout, r.Complete()) 95 | } 96 | 97 | func TestTupleSpaceTakeConcurrent(t *testing.T) { 98 | ts := New() 99 | go func() { 100 | for i := 0; i < 1000; i++ { 101 | ts.Send(Tuple{"i": i}, 0) 102 | } 103 | }() 104 | tuples := make(chan Tuple, 1000) 105 | wg := sync.WaitGroup{} 106 | errors := make(chan error, 10) 107 | for i := 0; i < 10; i++ { 108 | wg.Add(1) 109 | go func() { 110 | defer wg.Done() 111 | for i := 0; i < 100; i++ { 112 | reservation, err := ts.Take("", 0, 0) 113 | if err != nil { 114 | errors <- err 115 | return 116 | } 117 | reservation.Complete() 118 | tuples <- reservation.Tuple() 119 | } 120 | }() 121 | } 122 | wg.Wait() 123 | close(tuples) 124 | count := 0 125 | for _ = range tuples { 126 | count++ 127 | } 128 | assert.Equal(t, 1000, count) 129 | } 130 | 131 | func TestTupleSpaceReadAll(t *testing.T) { 132 | ts := New() 133 | for i := 0; i < 100; i++ { 134 | ts.Send(Tuple{"i": i}, 0) 135 | } 136 | tuples, err := ts.ReadAll("i >= 50", 0) 137 | assert.NoError(t, err) 138 | assert.Equal(t, 50, len(tuples)) 139 | } 140 | 141 | func TestTupleSpaceReadIsRandomish(t *testing.T) { 142 | ts := New() 143 | for i := 0; i < 100; i++ { 144 | ts.Send(Tuple{"i": i}, 0) 145 | } 146 | notequal := 0 147 | for i := 0; i < 100; i++ { 148 | tuple, err := ts.Read("i", 0) 149 | assert.NoError(t, err) 150 | j := tuple["i"].(int) 151 | if i != j { 152 | notequal++ 153 | } 154 | } 155 | assert.True(t, notequal > 50) 156 | } 157 | 158 | func TestSendWithAcknowledgementTimesOut(t *testing.T) { 159 | ts := New() 160 | errors := make(chan error, 1) 161 | go func() { 162 | err := ts.SendWithAcknowledgement(Tuple{"i": 10}, time.Millisecond*100) 163 | errors <- err 164 | }() 165 | assert.Equal(t, ErrTimeout, <-errors) 166 | } 167 | 168 | func TestSendWithAcknowledgement(t *testing.T) { 169 | ts := New() 170 | errors := make(chan error, 1) 171 | go func() { 172 | err := ts.SendWithAcknowledgement(Tuple{"i": 10}, 0) 173 | errors <- err 174 | }() 175 | reservation, err := ts.Take("i", 0, 0) 176 | assert.NoError(t, err) 177 | reservation.Complete() 178 | assert.Equal(t, Tuple{"i": 10}, reservation.Tuple()) 179 | assert.NoError(t, <-errors) 180 | } 181 | 182 | func TestSendWithAcknowledgementAndMultipleReads(t *testing.T) { 183 | ts := New() 184 | errors := make(chan error, 1) 185 | go func() { 186 | errors <- ts.SendWithAcknowledgement(Tuple{"i": 10}, time.Second*1) 187 | }() 188 | tuple, err := ts.Read("i", 0) 189 | assert.NoError(t, <-errors) 190 | assert.NoError(t, err) 191 | assert.Equal(t, Tuple{"i": 10}, tuple) 192 | 193 | tuple, err = ts.Read("i", 0) 194 | assert.NoError(t, err) 195 | assert.Equal(t, Tuple{"i": 10}, tuple) 196 | } 197 | 198 | func TestTakeTimeout(t *testing.T) { 199 | ts := New() 200 | ts.Send(Tuple{}, 0) 201 | r, err := ts.Take("", time.Second, time.Millisecond*50) 202 | assert.Equal(t, 0, ts.Status().Tuples.Count) 203 | assert.NoError(t, err) 204 | err = r.Wait() 205 | assert.Equal(t, ErrReservationTimeout, err) 206 | assert.Equal(t, 1, ts.Status().Tuples.Count) 207 | } 208 | 209 | func TestTakeComplete(t *testing.T) { 210 | ts := New() 211 | ts.Send(Tuple{}, 0) 212 | r, err := ts.Take("", time.Second, time.Millisecond*50) 213 | assert.Equal(t, 0, ts.Status().Tuples.Count) 214 | err = r.Complete() 215 | assert.NoError(t, err) 216 | assert.Equal(t, 0, ts.Status().Tuples.Count) 217 | time.Sleep(time.Millisecond * 100) 218 | assert.Equal(t, 0, ts.Status().Tuples.Count) 219 | } 220 | 221 | func TestTakeCancel(t *testing.T) { 222 | ts := New() 223 | ts.Send(Tuple{}, 0) 224 | r, err := ts.Take("", time.Second, time.Millisecond*50) 225 | assert.Equal(t, 0, ts.Status().Tuples.Count) 226 | err = r.Cancel() 227 | assert.NoError(t, err) 228 | assert.Equal(t, 1, ts.Status().Tuples.Count) 229 | } 230 | 231 | func TestTakeCompleteWithAcknowledgement(t *testing.T) { 232 | ts := New() 233 | errors := make(chan error) 234 | go func() { 235 | errors <- ts.SendWithAcknowledgement(Tuple{"a": 10}, 0) 236 | }() 237 | r, err := ts.Take("", 0, time.Second*10) 238 | assert.NoError(t, err) 239 | r.Complete() 240 | err = <-errors 241 | assert.NoError(t, err) 242 | } 243 | 244 | func BenchmarkTupleSend(b *testing.B) { 245 | ts := New() 246 | tuple := Tuple{"i": 0} 247 | for i := 0; i < b.N; i++ { 248 | ts.Send(tuple, 0) 249 | } 250 | } 251 | 252 | func benchTupleSendTakeN(b *testing.B, n int) { 253 | ts := New() 254 | tuple := Tuple{"i": 0} 255 | for i := 0; i < b.N/n; i++ { 256 | for j := 0; j < n; j++ { 257 | ts.Send(tuple, 0) 258 | } 259 | for j := 0; j < n; j++ { 260 | r, err := ts.Take("", 0, 0) 261 | if err != nil { 262 | panic(err) 263 | } 264 | r.Complete() 265 | } 266 | } 267 | 268 | } 269 | 270 | func BenchmarkTupleSendTake10(b *testing.B) { 271 | benchTupleSendTakeN(b, 10) 272 | } 273 | 274 | func BenchmarkTupleSendTake100(b *testing.B) { 275 | benchTupleSendTakeN(b, 100) 276 | } 277 | 278 | func BenchmarkTupleSendTake1000(b *testing.B) { 279 | benchTupleSendTakeN(b, 1000) 280 | } 281 | 282 | func BenchmarkTupleSendTake10000(b *testing.B) { 283 | benchTupleSendTakeN(b, 10000) 284 | } 285 | -------------------------------------------------------------------------------- /tuplespace.go: -------------------------------------------------------------------------------- 1 | package tuplespace 2 | 3 | import ( 4 | "errors" 5 | "sync" 6 | "time" 7 | 8 | "github.com/alecthomas/expr" 9 | "gopkg.in/tomb.v2" 10 | ) 11 | 12 | const ( 13 | // MaxTupleLifetime is the maximum time a tuple can be alive in the TupleSpace. 14 | MaxTupleLifetime = time.Second * 60 15 | MaxConsumeTimeout = time.Hour * 24 16 | MaxReservationTimeout = time.Second * 60 17 | ) 18 | 19 | var ( 20 | ErrTimeout = errors.New("timeout") 21 | ErrReservationTimeout = errors.New("reservation timeout") 22 | ErrTupleSpaceDeleted = errors.New("tuplespace deleted") 23 | ErrCancelled = errors.New("cancelled") 24 | ) 25 | 26 | type Tuple expr.V 27 | 28 | type ConsumeOptions struct { 29 | Match string 30 | Timeout time.Duration 31 | All bool 32 | Take bool 33 | Cancel <-chan bool // Cancel can be used to cancel a waiting consume. 34 | } 35 | 36 | type tupleEntry struct { 37 | Tuple Tuple 38 | Expires time.Time 39 | lock sync.Mutex 40 | ack chan error 41 | } 42 | 43 | func (t *tupleEntry) Processed(err error) { 44 | t.lock.Lock() 45 | defer t.lock.Unlock() 46 | if t.ack == nil { 47 | return 48 | } 49 | t.ack <- err 50 | close(t.ack) 51 | t.ack = nil 52 | } 53 | 54 | type tupleEntries struct { 55 | lock sync.Mutex 56 | entries map[*tupleEntry]struct{} 57 | } 58 | 59 | func (t *tupleEntries) Close() error { 60 | t.lock.Lock() 61 | defer t.lock.Unlock() 62 | t.entries = nil 63 | return nil 64 | } 65 | 66 | func (t *tupleEntries) Size() int { 67 | t.lock.Lock() 68 | defer t.lock.Unlock() 69 | return len(t.entries) 70 | } 71 | 72 | func (t *tupleEntries) Add(tuple *tupleEntry) { 73 | t.lock.Lock() 74 | t.entries[tuple] = struct{}{} 75 | t.lock.Unlock() 76 | } 77 | 78 | func (t *tupleEntries) Copy() []*tupleEntry { 79 | t.lock.Lock() 80 | defer t.lock.Unlock() 81 | entries := []*tupleEntry{} 82 | for entry := range t.entries { 83 | entries = append(entries, entry) 84 | } 85 | return entries 86 | } 87 | 88 | func (t *tupleEntries) ConsumeMatch(take, all bool, m *expr.Expression) (entries []*tupleEntry) { 89 | t.lock.Lock() 90 | defer t.lock.Unlock() 91 | now := time.Now() 92 | for entry := range t.entries { 93 | if entry.Expires.Before(now) { 94 | entry.Processed(ErrTimeout) 95 | delete(t.entries, entry) 96 | } else if m == nil || m.Bool(expr.V(entry.Tuple)) { 97 | entry.Processed(nil) 98 | if take { 99 | delete(t.entries, entry) 100 | } 101 | entries = append(entries, entry) 102 | if !all { 103 | return 104 | } 105 | } 106 | } 107 | return 108 | } 109 | 110 | type waiter struct { 111 | match *expr.Expression 112 | expires <-chan time.Time 113 | tuple chan *tupleEntry 114 | take bool 115 | } 116 | 117 | func (w *waiter) Close() error { 118 | close(w.tuple) 119 | return nil 120 | } 121 | 122 | type waiters struct { 123 | lock sync.Mutex 124 | waiters map[*waiter]struct{} 125 | } 126 | 127 | func (w *waiters) add(a *waiter) int { 128 | w.lock.Lock() 129 | defer w.lock.Unlock() 130 | w.waiters[a] = struct{}{} 131 | return len(w.waiters) - 1 132 | } 133 | 134 | func (w *waiters) remove(waiter *waiter) { 135 | w.lock.Lock() 136 | defer w.lock.Unlock() 137 | delete(w.waiters, waiter) 138 | } 139 | 140 | func (w *waiters) Try(entry *tupleEntry) (taken bool, ok bool) { 141 | w.lock.Lock() 142 | defer w.lock.Unlock() 143 | 144 | for waiter := range w.waiters { 145 | if waiter.match.Bool(expr.V(entry.Tuple)) { 146 | waiter.tuple <- entry 147 | delete(w.waiters, waiter) 148 | return waiter.take, true 149 | } 150 | } 151 | return false, false 152 | } 153 | 154 | func (w *waiters) Size() int { 155 | w.lock.Lock() 156 | defer w.lock.Unlock() 157 | return len(w.waiters) 158 | } 159 | 160 | func (w *waiters) Close() error { 161 | w.lock.Lock() 162 | defer w.lock.Unlock() 163 | for waiter := range w.waiters { 164 | waiter.Close() 165 | } 166 | w.waiters = nil 167 | return nil 168 | } 169 | 170 | type TupleSpace struct { 171 | lock sync.Mutex 172 | entries tupleEntries 173 | waiters waiters 174 | status Status 175 | } 176 | 177 | func New() *TupleSpace { 178 | return &TupleSpace{ 179 | entries: tupleEntries{ 180 | entries: map[*tupleEntry]struct{}{}, 181 | }, 182 | waiters: waiters{ 183 | waiters: map[*waiter]struct{}{}, 184 | }, 185 | } 186 | } 187 | 188 | func (t *TupleSpace) Close() error { 189 | t.lock.Lock() 190 | defer t.lock.Unlock() 191 | t.entries.Close() 192 | return t.waiters.Close() 193 | } 194 | 195 | type TuplesStatus struct { 196 | Count int `json:"count"` 197 | Seen int `json:"seen"` 198 | } 199 | 200 | type WaitersStatus struct { 201 | Count int `json:"count"` 202 | Seen int `json:"seen"` 203 | } 204 | 205 | type Status struct { 206 | Tuples TuplesStatus `json:"tuples"` 207 | Waiters WaitersStatus `json:"waiters"` 208 | } 209 | 210 | func (t *TupleSpace) Status() *Status { 211 | t.lock.Lock() 212 | defer t.lock.Unlock() 213 | t.status.Tuples.Count = t.entries.Size() 214 | t.status.Waiters.Count = t.waiters.Size() 215 | return &t.status 216 | } 217 | 218 | func (t *TupleSpace) Send(tuple Tuple, expires time.Duration) error { 219 | t.sendTuple(tuple, expires, false) 220 | return nil 221 | } 222 | 223 | func (t *TupleSpace) SendMany(tuples []Tuple, expires time.Duration) error { 224 | for _, tuple := range tuples { 225 | t.sendTuple(tuple, expires, false) 226 | } 227 | return nil 228 | } 229 | 230 | // SendWithAcknowledgement sends a tuple and waits for an acknowledgement that 231 | // the tuple has been processed. 232 | func (t *TupleSpace) SendWithAcknowledgement(tuple Tuple, expires time.Duration) error { 233 | if expires == 0 { 234 | expires = MaxTupleLifetime 235 | } 236 | timeout := time.After(expires) 237 | entry := t.sendTuple(tuple, expires, true) 238 | select { 239 | case <-timeout: 240 | // FIXME: There's a race here. The tuple may still be consumed in the 241 | // window after this timeout. 242 | return ErrTimeout 243 | 244 | case err := <-entry.ack: 245 | return err 246 | } 247 | } 248 | 249 | func (t *TupleSpace) sendTuple(tuple Tuple, expires time.Duration, ack bool) *tupleEntry { 250 | if expires == 0 || expires > MaxTupleLifetime { 251 | expires = MaxTupleLifetime 252 | } 253 | var ackch chan error 254 | if ack { 255 | ackch = make(chan error, 1) 256 | } 257 | entry := &tupleEntry{ 258 | Tuple: tuple, 259 | Expires: time.Now().Add(expires), 260 | ack: ackch, 261 | } 262 | t.lock.Lock() 263 | t.status.Tuples.Seen++ 264 | taken, ok := t.waiters.Try(entry) 265 | if !taken || !ok { 266 | t.addEntry(entry) 267 | } 268 | t.lock.Unlock() 269 | return entry 270 | } 271 | 272 | // Add an entry to the tuplespace. 273 | func (t *TupleSpace) addEntry(entry *tupleEntry) { 274 | t.entries.Add(entry) 275 | } 276 | 277 | // Read an entry from the TupleSpace. Consecutive reads can return the same tuple. 278 | func (t *TupleSpace) Read(match string, timeout time.Duration) (Tuple, error) { 279 | entries, err := t.consume(&ConsumeOptions{ 280 | Match: match, 281 | Timeout: timeout, 282 | }) 283 | if err != nil { 284 | return nil, err 285 | } 286 | entries[0].Processed(nil) 287 | return entries[0].Tuple, nil 288 | } 289 | 290 | func (t *TupleSpace) ReadAll(match string, timeout time.Duration) ([]Tuple, error) { 291 | entries, err := t.consume(&ConsumeOptions{ 292 | Match: match, 293 | Timeout: timeout, 294 | All: true, 295 | }) 296 | if err != nil { 297 | return nil, err 298 | } 299 | tuples := make([]Tuple, len(entries)) 300 | for i, entry := range entries { 301 | tuples[i] = entry.Tuple 302 | entry.Processed(nil) 303 | } 304 | return tuples, nil 305 | } 306 | 307 | func (t *TupleSpace) TakeWithCancel(match string, timeout time.Duration, reservationTimeout time.Duration, cancel <-chan bool) (*Reservation, error) { 308 | if reservationTimeout == 0 { 309 | reservationTimeout = MaxReservationTimeout 310 | } 311 | entries, err := t.consume(&ConsumeOptions{ 312 | Match: match, 313 | Timeout: timeout, 314 | Take: true, 315 | Cancel: cancel, 316 | }) 317 | if err != nil { 318 | return nil, err 319 | } 320 | r := &Reservation{ 321 | entry: entries[0], 322 | space: t, 323 | timeout: time.After(reservationTimeout), 324 | } 325 | r.tomb.Go(r.run) 326 | return r, nil 327 | } 328 | 329 | // Take a tuple within a reservation. Takes must always be performed inside a 330 | // reservation due so that in a multi-server setup, redundant taken tuples can 331 | // be returned. 332 | func (t *TupleSpace) Take(match string, timeout time.Duration, reservationTimeout time.Duration) (*Reservation, error) { 333 | return t.TakeWithCancel(match, timeout, reservationTimeout, nil) 334 | } 335 | 336 | func (t *TupleSpace) consume(req *ConsumeOptions) ([]*tupleEntry, error) { 337 | m, err := expr.Compile(req.Match) 338 | if err != nil { 339 | return nil, err 340 | } 341 | 342 | // Lock around searching and configuring waiters 343 | t.lock.Lock() 344 | entries := t.entries.ConsumeMatch(req.Take, req.All, m) 345 | if len(entries) > 0 { 346 | t.lock.Unlock() 347 | return entries, nil 348 | } 349 | var expires <-chan time.Time 350 | if req.Timeout != 0 { 351 | expires = time.After(req.Timeout) 352 | } 353 | waiter := &waiter{ 354 | match: m, 355 | expires: expires, 356 | tuple: make(chan *tupleEntry), 357 | take: req.Take, 358 | } 359 | t.status.Waiters.Seen++ 360 | t.waiters.add(waiter) 361 | t.lock.Unlock() 362 | 363 | // NOTE: We don't remove() the waiter if the tuple space has been deleted, 364 | // thus no "defer t.waiters.remove(id)"" here. 365 | select { 366 | case <-req.Cancel: 367 | t.waiters.remove(waiter) 368 | return nil, ErrCancelled 369 | 370 | case <-waiter.expires: 371 | t.waiters.remove(waiter) 372 | return nil, ErrTimeout 373 | 374 | case entry := <-waiter.tuple: 375 | if entry == nil { 376 | return nil, ErrTupleSpaceDeleted 377 | } 378 | t.waiters.remove(waiter) 379 | return []*tupleEntry{entry}, nil 380 | } 381 | } 382 | 383 | // Consume provides low-level control over consume operations Read, ReadAll and Take. 384 | func (t *TupleSpace) Consume(req *ConsumeOptions) ([]Tuple, error) { 385 | entries, err := t.consume(req) 386 | if err != nil { 387 | return nil, err 388 | } 389 | tuples := make([]Tuple, len(entries)) 390 | for i, entry := range entries { 391 | tuples[i] = entry.Tuple 392 | entry.Processed(nil) 393 | } 394 | return tuples, nil 395 | } 396 | 397 | type Reservation struct { 398 | tomb tomb.Tomb 399 | lock sync.Mutex 400 | entry *tupleEntry 401 | space *TupleSpace 402 | timeout <-chan time.Time 403 | } 404 | 405 | func (r *Reservation) run() error { 406 | select { 407 | case <-r.timeout: 408 | r.lock.Lock() 409 | defer r.lock.Unlock() 410 | r.cancel() 411 | return ErrReservationTimeout 412 | 413 | case <-r.tomb.Dying(): 414 | return nil 415 | } 416 | } 417 | 418 | // Expired returns true if the reservation is no longer valid, for any reason. 419 | func (r *Reservation) Expired() bool { 420 | return r.tomb.Err() != tomb.ErrStillAlive 421 | } 422 | 423 | func (r *Reservation) Wait() error { 424 | return r.tomb.Wait() 425 | } 426 | 427 | func (r *Reservation) Tuple() Tuple { 428 | return r.entry.Tuple 429 | } 430 | 431 | func (r *Reservation) Complete() error { 432 | r.lock.Lock() 433 | if !r.tomb.Alive() { 434 | r.lock.Unlock() 435 | return r.tomb.Err() 436 | } 437 | r.entry.Processed(nil) 438 | r.tomb.Kill(nil) 439 | r.lock.Unlock() 440 | return r.tomb.Wait() 441 | } 442 | 443 | func (r *Reservation) Cancel() error { 444 | r.lock.Lock() 445 | defer r.lock.Unlock() 446 | if !r.tomb.Alive() { 447 | return r.tomb.Err() 448 | } 449 | r.tomb.Kill(nil) 450 | r.cancel() 451 | return r.tomb.Wait() 452 | } 453 | 454 | func (r *Reservation) cancel() { 455 | r.space.addEntry(r.entry) 456 | } 457 | -------------------------------------------------------------------------------- /service/cluster/client.go: -------------------------------------------------------------------------------- 1 | package cluster 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "math/rand" 7 | "sync" 8 | "time" 9 | 10 | "github.com/alecthomas/tuplespace" 11 | 12 | "github.com/cenkalti/backoff" 13 | 14 | "gopkg.in/tomb.v2" 15 | 16 | "github.com/alecthomas/tuplespace/service" 17 | ) 18 | 19 | var ( 20 | // PeerHeartbeatPeriod is the amount of time between 21 | PeerHeartbeatPeriod = time.Second * 5 22 | PeerTimeout = time.Second * 10 23 | // OperationTimeout is the amount of time to wait for operations such as 24 | // Send() and Read() to complete. 25 | OperationTimeout = time.Second * 15 26 | 27 | ErrNoPeers = errors.New("no peers remaining") 28 | ErrTimeout = errors.New("timeout") 29 | ) 30 | 31 | type Logger interface { 32 | Infof(fmt string, args ...interface{}) error 33 | Warningf(fmt string, args ...interface{}) error 34 | Errorf(fmt string, args ...interface{}) error 35 | } 36 | 37 | // Observe adding and removal of clients to the cluster client. 38 | type clientObserver interface { 39 | // Notify observer about a client being added. If the method returns an 40 | // error the client will be closed. 41 | addClient(client *service.Client) error 42 | // Notify observer that a client has been removed. 43 | removeClient(addr string) 44 | } 45 | 46 | // Maintains client connections to the active peers in the cluster. 47 | type clients struct { 48 | lock sync.Mutex 49 | log Logger 50 | keys []string 51 | clients map[string]*service.Client 52 | backoff map[string]backoff.BackOff 53 | next map[string]time.Time 54 | observers map[clientObserver]struct{} 55 | } 56 | 57 | func newClients(log Logger) *clients { 58 | c := &clients{ 59 | clients: map[string]*service.Client{}, 60 | backoff: map[string]backoff.BackOff{}, 61 | next: map[string]time.Time{}, 62 | observers: map[clientObserver]struct{}{}, 63 | } 64 | c.addObserver(c) 65 | return c 66 | } 67 | 68 | func (c *clients) Space(space string) *ClientSpace { 69 | s := newClientSpace(space, c) 70 | c.addObserver(s) 71 | return s 72 | } 73 | 74 | func (c *clients) addClient(client *service.Client) error { 75 | go c.monitor(client) 76 | return nil 77 | } 78 | 79 | func (c *clients) removeClient(addr string) {} 80 | 81 | func (c *clients) Close() error { 82 | c.lock.Lock() 83 | defer c.lock.Unlock() 84 | for addr := range c.clients { 85 | c.delete(addr) 86 | } 87 | return nil 88 | } 89 | 90 | // Monitor the peer list on a single server, removing and adding as required. 91 | func (c *clients) monitor(client *service.Client) { 92 | space, err := client.Space(".cluster.peers") 93 | if err != nil { 94 | c.log.Errorf("Could not connect to .cluster.peers on %s, marking dead.", client.RemoteAddr()) 95 | c.Dead(client.RemoteAddr()) 96 | return 97 | } 98 | for { 99 | start := time.Now() 100 | peers, err := space.ReadAll("", PeerHeartbeatPeriod) 101 | if err != nil { 102 | c.log.Errorf("Failed to read peer list from %s, marking dead.", client.RemoteAddr()) 103 | c.Dead(client.RemoteAddr()) 104 | return 105 | } 106 | for _, peer := range peers { 107 | addr, ok := peer["addr"].(string) 108 | if !ok { 109 | c.log.Errorf("Invalid peer tuple %+v, continuing.", peer["addr"]) 110 | continue 111 | } 112 | if c.canUpdate(addr) { 113 | if err := c.update(addr); err != nil { 114 | c.log.Warningf("Failed to update peer %s, continuing.", addr) 115 | continue 116 | } 117 | } 118 | } 119 | elapsed := time.Now().Sub(start) 120 | time.Sleep(PeerHeartbeatPeriod - elapsed) 121 | } 122 | } 123 | 124 | // Update the client connection for an address. If the connection exists, do 125 | // nothing. If it does not exist, attempt to reconnect. Reconnects are rate 126 | // limited. 127 | func (c *clients) update(addr string) error { 128 | c.lock.Lock() 129 | defer c.lock.Unlock() 130 | if _, ok := c.clients[addr]; ok { 131 | return nil 132 | } 133 | boff, ok := c.backoff[addr] 134 | if !ok { 135 | boff = backoff.NewExponentialBackOff() 136 | boff.Reset() 137 | c.backoff[addr] = boff 138 | } else { 139 | if time.Now().Before(c.next[addr]) { 140 | return fmt.Errorf("Too many reconnects to %s", addr) 141 | } 142 | } 143 | c.next[addr] = time.Now().Add(boff.NextBackOff()) 144 | client, err := service.Dial(addr) 145 | if err != nil { 146 | return err 147 | } 148 | c.clients[addr] = client 149 | c.updateKeys() 150 | boff.Reset() 151 | return nil 152 | } 153 | 154 | // Returns true if the given address can be updated. This will return false if 155 | // too many reconnect attempts have been made in a short period. 156 | func (c *clients) canUpdate(addr string) bool { 157 | next, ok := c.next[addr] 158 | return !ok || time.Now().After(next) 159 | } 160 | 161 | // Mark a client as dead. 162 | func (c *clients) Dead(addr string) { 163 | c.lock.Lock() 164 | defer c.lock.Unlock() 165 | c.delete(addr) 166 | } 167 | 168 | func (c *clients) delete(addr string) { 169 | // TODO: This should deal with observers blocking... 170 | for observer := range c.observers { 171 | observer.removeClient(addr) 172 | } 173 | if client, ok := c.clients[addr]; ok { 174 | delete(c.clients, addr) 175 | c.updateKeys() 176 | client.Close() 177 | } 178 | } 179 | 180 | func (c *clients) updateKeys() { 181 | c.keys = make([]string, 0, len(c.clients)) 182 | for k := range c.clients { 183 | c.keys = append(c.keys, k) 184 | } 185 | } 186 | 187 | func (c *clients) Size() int { 188 | c.lock.Lock() 189 | defer c.lock.Unlock() 190 | return len(c.clients) 191 | } 192 | 193 | // Random returns a random client from available clients. 194 | func (c *clients) Random() (*service.Client, error) { 195 | c.lock.Lock() 196 | defer c.lock.Unlock() 197 | count := len(c.clients) 198 | if count == 0 { 199 | return nil, ErrNoPeers 200 | } 201 | n := rand.Intn(count) 202 | key := c.keys[n] 203 | return c.clients[key], nil 204 | } 205 | 206 | func (c *clients) addObserver(observer clientObserver) { 207 | c.lock.Lock() 208 | defer c.lock.Unlock() 209 | for addr, client := range c.clients { 210 | // TODO: This should deal with observers blocking... 211 | err := observer.addClient(client) 212 | if err != nil { 213 | c.delete(addr) 214 | } 215 | } 216 | c.observers[observer] = struct{}{} 217 | } 218 | 219 | func (c *clients) removeObserver(observer clientObserver) { 220 | c.lock.Lock() 221 | defer c.lock.Unlock() 222 | delete(c.observers, observer) 223 | } 224 | 225 | type nullLogger struct{} 226 | 227 | func (nullLogger) Infof(fmt string, args ...interface{}) error { return nil } 228 | func (nullLogger) Warningf(fmt string, args ...interface{}) error { return nil } 229 | func (nullLogger) Errorf(fmt string, args ...interface{}) error { return nil } 230 | 231 | type Client struct { 232 | lock sync.Mutex 233 | log Logger 234 | clients *clients 235 | } 236 | 237 | func Dial(seeds []string) (*Client, error) { 238 | clients := newClients(nullLogger{}) 239 | for _, addr := range seeds { 240 | if clients.update(addr) != nil { 241 | continue 242 | } 243 | } 244 | if clients.Size() == 0 { 245 | return nil, ErrNoPeers 246 | } 247 | return &Client{clients: clients}, nil 248 | } 249 | 250 | func (c *Client) Close() error { 251 | c.lock.Lock() 252 | defer c.lock.Unlock() 253 | return c.clients.Close() 254 | } 255 | 256 | func (c *Client) SetLogger(log Logger) { 257 | c.lock.Lock() 258 | defer c.lock.Unlock() 259 | c.log = log 260 | } 261 | 262 | func (c *Client) Space(space string) (*ClientSpace, error) { 263 | return c.clients.Space(space), nil 264 | } 265 | 266 | // ClientSpace maintains a current list of clients to all cluster peers, for a 267 | // particular space. 268 | type ClientSpace struct { 269 | lock sync.Mutex 270 | space string 271 | clients *clients 272 | keys []string 273 | spaces map[string]*service.ClientSpace 274 | } 275 | 276 | func newClientSpace(space string, clients *clients) *ClientSpace { 277 | return &ClientSpace{ 278 | space: space, 279 | clients: clients, 280 | spaces: map[string]*service.ClientSpace{}, 281 | } 282 | } 283 | 284 | func (c *ClientSpace) addClient(client *service.Client) error { 285 | c.lock.Lock() 286 | defer c.lock.Unlock() 287 | space, err := client.Space(c.space) 288 | if err != nil { 289 | return err 290 | } 291 | c.keys = append(c.keys, client.RemoteAddr()) 292 | c.spaces[client.RemoteAddr()] = space 293 | return nil 294 | } 295 | 296 | func (c *ClientSpace) removeClient(addr string) { 297 | c.lock.Lock() 298 | defer c.lock.Unlock() 299 | delete(c.spaces, addr) 300 | // Rebuild key slice. 301 | c.keys = []string{} 302 | for key := range c.spaces { 303 | c.keys = append(c.keys, key) 304 | } 305 | } 306 | 307 | // Get a copy of all known clients. 308 | func (c *ClientSpace) allClients() map[string]*service.ClientSpace { 309 | c.lock.Lock() 310 | defer c.lock.Unlock() 311 | clients := make(map[string]*service.ClientSpace, len(c.spaces)) 312 | for addr, client := range c.spaces { 313 | clients[addr] = client 314 | } 315 | return clients 316 | } 317 | 318 | func (c *ClientSpace) randomClient() (string, *service.ClientSpace) { 319 | c.lock.Lock() 320 | defer c.lock.Unlock() 321 | n := rand.Intn(len(c.keys)) 322 | key := c.keys[n] 323 | return key, c.spaces[key] 324 | } 325 | 326 | func (c *ClientSpace) Close() error { 327 | c.lock.Lock() 328 | defer c.lock.Unlock() 329 | c.clients.removeObserver(c) 330 | for _, client := range c.spaces { 331 | client.Close() 332 | } 333 | return nil 334 | } 335 | 336 | func (c *ClientSpace) Status() (*tuplespace.Status, error) { 337 | // Collect status from all clients into statuses. 338 | lock := sync.Mutex{} 339 | statuses := []*tuplespace.Status{} 340 | action := func(c *service.ClientSpace) error { 341 | status, err := c.Status() 342 | if err == nil { 343 | lock.Lock() 344 | statuses = append(statuses, status) 345 | lock.Unlock() 346 | } 347 | return err 348 | } 349 | 350 | if err := newBatch(c).All(action).Wait(); err != nil { 351 | return nil, err 352 | } 353 | status := &tuplespace.Status{} 354 | for _, s := range statuses { 355 | status.Tuples.Count += s.Tuples.Count 356 | status.Tuples.Seen += s.Tuples.Seen 357 | status.Waiters.Count += s.Waiters.Count 358 | status.Waiters.Seen += s.Waiters.Seen 359 | } 360 | return status, nil 361 | } 362 | 363 | func (c *ClientSpace) Send(tuple tuplespace.Tuple, expires time.Duration) error { 364 | return c.SendMany([]tuplespace.Tuple{tuple}, expires) 365 | } 366 | 367 | func (c *ClientSpace) SendMany(tuples []tuplespace.Tuple, expires time.Duration) error { 368 | batch := newBatch(c) 369 | for _, tuple := range tuples { 370 | batch.Random(func(s *service.ClientSpace) error { 371 | return s.Send(tuple, expires) 372 | }) 373 | } 374 | return batch.Wait() 375 | } 376 | 377 | func (c *ClientSpace) SendWithAcknowledgement(tuple tuplespace.Tuple, expires time.Duration) error { 378 | return newBatch(c). 379 | Random(func(s *service.ClientSpace) error { return s.SendWithAcknowledgement(tuple, expires) }). 380 | Wait() 381 | } 382 | 383 | func (c *ClientSpace) Read(match string, timeout time.Duration) (out tuplespace.Tuple, err error) { 384 | err = newBatch(c). 385 | Random(func(c *service.ClientSpace) error { 386 | return nil 387 | }). 388 | Wait() 389 | return 390 | } 391 | 392 | func (c *ClientSpace) ReadAll(match string, timeout time.Duration) ([]tuplespace.Tuple, error) { 393 | b := newBatch(c) 394 | tuples := make(chan []tuplespace.Tuple, b.Size()) 395 | b.All(func(c *service.ClientSpace) error { 396 | t, err := c.ReadAll(match, timeout) 397 | if err != nil { 398 | return err 399 | } 400 | tuples <- t 401 | return nil 402 | }) 403 | if err := b.Wait(); err != nil { 404 | return nil, err 405 | } 406 | out := []tuplespace.Tuple{} 407 | for t := range tuples { 408 | out = append(out, t...) 409 | } 410 | return out, nil 411 | } 412 | 413 | func (c *ClientSpace) Take(match string, timeout time.Duration, reservationTimeout time.Duration) (reservation *service.ClientReservation, err error) { 414 | reservations := make(chan *service.ClientReservation) 415 | err = newBatch(c).First(func(cancel cancelAction, c *service.ClientSpace) error { 416 | errors := make(chan error) 417 | // reservations := make(chan *service.ClientReservation) 418 | go func() { 419 | reservation, err := c.Take(match, timeout, reservationTimeout) 420 | if err != nil { 421 | errors <- err 422 | } else { 423 | reservations <- reservation 424 | } 425 | }() 426 | select { 427 | case err := <-errors: 428 | return err 429 | case reservation = <-reservations: 430 | close(cancel) 431 | case <-cancel: 432 | c.Close() 433 | } 434 | return err 435 | }).Wait() 436 | return 437 | } 438 | 439 | // A batch operation on a space. 440 | type batch struct { 441 | tomb tomb.Tomb 442 | c *ClientSpace 443 | } 444 | 445 | func newBatch(c *ClientSpace) *batch { 446 | return &batch{c: c} 447 | } 448 | 449 | func (b *batch) Wait() error { 450 | return b.tomb.Wait() 451 | } 452 | 453 | func (b *batch) Size() int { 454 | return b.c.clients.Size() 455 | } 456 | 457 | // Run action on all clients in parallel. If all clients fail, the batch will 458 | // fail. A single success results in success. 459 | func (b *batch) All(action func(*service.ClientSpace) error) *batch { 460 | clients := b.c.allClients() 461 | errors := make(chan error) 462 | 463 | // Goroutine to manage errors. 464 | b.tomb.Go(func() error { 465 | var lastError error 466 | expected := len(clients) 467 | failed := 0 468 | for i := 0; i < expected; i++ { 469 | err := <-errors 470 | if err != nil { 471 | failed++ 472 | lastError = err 473 | } 474 | } 475 | if failed == len(clients) { 476 | return lastError 477 | } 478 | return nil 479 | }) 480 | 481 | for addr, client := range clients { 482 | b.tomb.Go(func() error { 483 | err := action(client) 484 | if err != nil { 485 | b.c.clients.Dead(addr) 486 | } 487 | errors <- err 488 | return nil 489 | }) 490 | } 491 | return b 492 | } 493 | 494 | type cancelAction chan struct{} 495 | 496 | func (c cancelAction) Cancel() { 497 | close(c) 498 | } 499 | 500 | // Issue action to all clients in parallel, and accept the first. 501 | func (b *batch) First(action func(cancelAction, *service.ClientSpace) error) *batch { 502 | cancel := make(cancelAction) 503 | b.All(func(c *service.ClientSpace) error { 504 | errors := make(chan error) 505 | go func() { errors <- action(cancel, c) }() 506 | select { 507 | case err := <-errors: 508 | if err != nil { 509 | cancel.Cancel() 510 | } 511 | return err 512 | 513 | case <-cancel: 514 | c.Close() 515 | // FIXME: Don't close the TCP connection here. Instead, close and 516 | // reopen the muxed connection. 517 | b.c.clients.Dead(c.RemoteAddr()) 518 | return nil 519 | } 520 | }) 521 | return b 522 | } 523 | 524 | // Run action repeatedly until it succeeds or it reaches the maximum number of 525 | // retries. 526 | func (b *batch) Random(action func(*service.ClientSpace) error) *batch { 527 | boff := backoff.NewExponentialBackOff() 528 | boff.Reset() 529 | work := func() error { 530 | errors := make(chan error) 531 | for { 532 | key, client := b.c.randomClient() 533 | go func() { errors <- action(client) }() 534 | select { 535 | case err := <-errors: 536 | if err == nil { 537 | return nil 538 | } 539 | b.c.clients.Dead(key) 540 | d := boff.NextBackOff() 541 | if d == backoff.Stop { 542 | return err 543 | } 544 | time.Sleep(d) 545 | 546 | case <-b.tomb.Dying(): 547 | client.Close() 548 | return nil 549 | } 550 | } 551 | } 552 | b.tomb.Go(work) 553 | return b 554 | } 555 | --------------------------------------------------------------------------------