├── CONTRIBUTORS.md
├── LICENSE.md
├── errors.go
├── logger.go
├── limits.go
├── common_test.go
├── topic.go
├── iris_test.go
├── service.go
├── events.go
├── tunnel_test.go
├── tunnel.go
├── broadcast_test.go
├── connection.go
├── doc.go
├── pubsub_test.go
├── reqrep_test.go
├── README.md
└── proto.go
/CONTRIBUTORS.md:
--------------------------------------------------------------------------------
1 | The current language binding is an official support library of the Iris cloud messaging framework, and as such, the list contributors can be found merged with all others in the master repository at https://github.com/project-iris/iris/blob/master/CONTRIBUTORS.md.
2 |
--------------------------------------------------------------------------------
/LICENSE.md:
--------------------------------------------------------------------------------
1 | Copyright (c) 2013 Project Iris. All rights reserved.
2 |
3 | The current language binding is an official support library of the Iris
4 | cloud messaging framework, and as such, the same licensing terms apply.
5 | For details please see http://iris.karalabe.com/downloads#License
6 |
--------------------------------------------------------------------------------
/errors.go:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2014 Project Iris. All rights reserved.
2 | //
3 | // The current language binding is an official support library of the Iris
4 | // cloud messaging framework, and as such, the same licensing terms apply.
5 | // For details please see http://iris.karalabe.com/downloads#License
6 |
7 | package iris
8 |
9 | import "errors"
10 |
11 | // Returned whenever a time-limited operation expires.
12 | var ErrTimeout = errors.New("operation timed out")
13 |
14 | // Returned if an operation is requested on a closed entity.
15 | var ErrClosed = errors.New("entity closed")
16 |
17 | // Wrapper to differentiate between local and remote errors.
18 | type RemoteError struct {
19 | error
20 | }
21 |
--------------------------------------------------------------------------------
/logger.go:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2014 Project Iris. All rights reserved.
2 | //
3 | // The current language binding is an official support library of the Iris
4 | // cloud messaging framework, and as such, the same licensing terms apply.
5 | // For details please see http://iris.karalabe.com/downloads#License
6 |
7 | // Contains the user configurable logger instance.
8 |
9 | package iris
10 |
11 | import (
12 | "fmt"
13 | "time"
14 |
15 | "gopkg.in/inconshreveable/log15.v2"
16 | )
17 |
18 | // User configurable leveled logger.
19 | var Log = log15.New()
20 |
21 | // Disables logging by default.
22 | func init() {
23 | Log.SetHandler(log15.DiscardHandler())
24 |
25 | //Log.SetHandler(log15.LvlFilterHandler(log15.LvlCrit, log15.StderrHandler))
26 | //Log.SetHandler(log15.LvlFilterHandler(log15.LvlError, log15.StderrHandler))
27 | Log.SetHandler(log15.LvlFilterHandler(log15.LvlInfo, log15.StderrHandler))
28 | //Log.SetHandler(log15.LvlFilterHandler(log15.LvlDebug, log15.StderrHandler))
29 | }
30 |
31 | // Creates a lazy value that flattens and truncates a data blob for logging.
32 | func logLazyBlob(data []byte) log15.Lazy {
33 | return log15.Lazy{func() string {
34 | if len(data) > 256 {
35 | return fmt.Sprintf("%v ...", data[:256])
36 | }
37 | return fmt.Sprintf("%v", data)
38 | }}
39 | }
40 |
41 | // Creates a lazy value that flattens a timeout, with the possibility of having
42 | // infinity as the result (timeout == 0).
43 | func logLazyTimeout(timeout time.Duration) log15.Lazy {
44 | return log15.Lazy{func() string {
45 | if timeout == 0 {
46 | return "infinity"
47 | }
48 | return fmt.Sprintf("%v", timeout)
49 | }}
50 | }
51 |
--------------------------------------------------------------------------------
/limits.go:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2014 Project Iris. All rights reserved.
2 | //
3 | // The current language binding is an official support library of the Iris
4 | // cloud messaging framework, and as such, the same licensing terms apply.
5 | // For details please see http://iris.karalabe.com/downloads#License
6 |
7 | // Contains the default service, topic and tunnel limits used by the binding.
8 |
9 | package iris
10 |
11 | import "runtime"
12 |
13 | // User limits of the threading and memory usage of a registered service.
14 | type ServiceLimits struct {
15 | BroadcastThreads int // Broadcast handlers to execute concurrently
16 | BroadcastMemory int // Memory allowance for pending broadcasts
17 | RequestThreads int // Request handlers to execute concurrently
18 | RequestMemory int // Memory allowance for pending requests
19 | }
20 |
21 | // User limits of the threading and memory usage of a subscription.
22 | type TopicLimits struct {
23 | EventThreads int // Event handlers to execute concurrently
24 | EventMemory int // Memory allowance for pending events
25 | }
26 |
27 | // Default limits of the threading and memory usage of a registered service.
28 | var defaultServiceLimits = ServiceLimits{
29 | BroadcastThreads: 4 * runtime.NumCPU(),
30 | BroadcastMemory: 64 * 1024 * 1024,
31 | RequestThreads: 4 * runtime.NumCPU(),
32 | RequestMemory: 64 * 1024 * 1024,
33 | }
34 |
35 | // Default limits of the threading and memory usage of a subscription.
36 | var defaultTopicLimits = TopicLimits{
37 | EventThreads: 4 * runtime.NumCPU(),
38 | EventMemory: 64 * 1024 * 1024,
39 | }
40 |
41 | // Size of a tunnel's input buffer.
42 | var defaultTunnelBuffer = 64 * 1024 * 1024
43 |
--------------------------------------------------------------------------------
/common_test.go:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2014 Project Iris. All rights reserved.
2 | //
3 | // The current language binding is an official support library of the Iris
4 | // cloud messaging framework, and as such, the same licensing terms apply.
5 | // For details please see http://iris.karalabe.com/downloads#License
6 |
7 | package iris
8 |
9 | import "sync"
10 |
11 | // Configuration values shared by all the tests.
12 | var config = struct {
13 | relay int
14 | cluster string
15 | topic string
16 | }{
17 | relay: 55555,
18 | cluster: "go-binding-test-cluster",
19 | topic: "go-binding-test-topic",
20 | }
21 |
22 | // Simple barrier to support synchronizing a batch of goroutines.
23 | type barrier struct {
24 | pend sync.WaitGroup
25 | hold sync.WaitGroup
26 | pass sync.WaitGroup
27 | cont sync.WaitGroup
28 | errc chan error
29 | }
30 |
31 | func newBarrier(size int) *barrier {
32 | b := &barrier{
33 | errc: make(chan error, size),
34 | }
35 | b.pend.Add(size)
36 | b.hold.Add(1)
37 | return b
38 | }
39 |
40 | // Syncs the goroutines up to begin the next phase.
41 | func (b *barrier) Sync() {
42 | b.pass.Add(1)
43 | b.pend.Done()
44 | b.hold.Wait()
45 | b.pend.Add(1)
46 | b.pass.Done()
47 | b.cont.Wait()
48 | }
49 |
50 | // Removes one goroutine from the barrier after the phase.
51 | func (b *barrier) Exit(err error) {
52 | if err != nil {
53 | b.errc <- err
54 | }
55 | b.pass.Add(1)
56 | b.pend.Done()
57 | b.hold.Wait()
58 | b.pass.Done()
59 | }
60 |
61 | // Waits for all goroutines to reach the barrier and permits continuation.
62 | func (b *barrier) Wait() []error {
63 | // Wait for all the goroutines to arrive
64 | b.pend.Wait()
65 | b.cont.Add(1)
66 |
67 | // Collect all the occurred errors
68 | errs := []error{}
69 | for done := false; !done; {
70 | select {
71 | case err := <-b.errc:
72 | errs = append(errs, err)
73 | default:
74 | done = true
75 | }
76 | }
77 | // Permit all goroutines to continue
78 | b.hold.Done()
79 | b.pass.Wait()
80 | b.hold.Add(1)
81 | b.cont.Done()
82 |
83 | // Report the results
84 | return errs
85 | }
86 |
--------------------------------------------------------------------------------
/topic.go:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2014 Project Iris. All rights reserved.
2 | //
3 | // The current language binding is an official support library of the Iris
4 | // cloud messaging framework, and as such, the same licensing terms apply.
5 | // For details please see http://iris.karalabe.com/downloads#License
6 |
7 | package iris
8 |
9 | import (
10 | "sync/atomic"
11 |
12 | "github.com/project-iris/iris/pool"
13 | "gopkg.in/inconshreveable/log15.v2"
14 | )
15 |
16 | // Callback interface for processing events from a single subscribed topic.
17 | type TopicHandler interface {
18 | // Callback invoked whenever an event is published to the topic subscribed to
19 | // by this particular handler.
20 | HandleEvent(event []byte)
21 | }
22 |
23 | // Topic subscription, responsible for enforcing the quality of service limits.
24 | type topic struct {
25 | // Application layer fields
26 | handler TopicHandler // Handler for topic events
27 |
28 | // Quality of service fields
29 | limits *TopicLimits // Limits on the inbound message processing
30 |
31 | eventIdx uint64 // Index to assign to inbound events for logging purposes
32 | eventPool *pool.ThreadPool // Queue and concurrency limiter for the event handlers
33 | eventUsed int32 // Actual memory usage of the event queue
34 |
35 | // Bookkeeping fields
36 | logger log15.Logger
37 | }
38 |
39 | // Creates a new topic subscription.
40 | func newTopic(handler TopicHandler, limits *TopicLimits, logger log15.Logger) *topic {
41 | top := &topic{
42 | // Application layer
43 | handler: handler,
44 |
45 | // Quality of service
46 | limits: limits,
47 | eventPool: pool.NewThreadPool(limits.EventThreads),
48 |
49 | // Bookkeeping
50 | logger: logger,
51 | }
52 | // Start the event processing and return
53 | top.eventPool.Start()
54 | return top
55 | }
56 |
57 | // Merges the user requested limits with the defaults.
58 | func finalizeTopicLimits(user *TopicLimits) *TopicLimits {
59 | // If the user didn't specify anything, load the full default set
60 | if user == nil {
61 | return &defaultTopicLimits
62 | }
63 | // Check each field and merge only non-specified ones
64 | limits := new(TopicLimits)
65 | *limits = *user
66 |
67 | if user.EventThreads == 0 {
68 | limits.EventThreads = defaultTopicLimits.EventThreads
69 | }
70 | if user.EventMemory == 0 {
71 | limits.EventMemory = defaultTopicLimits.EventMemory
72 | }
73 | return limits
74 | }
75 |
76 | // Schedules a topic event for the subscription handler to process.
77 | func (t *topic) handlePublish(event []byte) {
78 | id := int(atomic.AddUint64(&t.eventIdx, 1))
79 | t.logger.Debug("scheduling arrived event", "event", id, "data", logLazyBlob(event))
80 |
81 | // Make sure there is enough memory for the event
82 | used := int(atomic.LoadInt32(&t.eventUsed)) // Safe, since only 1 thread increments!
83 | if used+len(event) <= t.limits.EventMemory {
84 | // Increment the memory usage of the queue and schedule the event
85 | atomic.AddInt32(&t.eventUsed, int32(len(event)))
86 | t.eventPool.Schedule(func() {
87 | // Start the processing by decrementing the memory usage
88 | atomic.AddInt32(&t.eventUsed, -int32(len(event)))
89 | t.logger.Debug("handling scheduled event", "event", id)
90 | t.handler.HandleEvent(event)
91 | })
92 | return
93 | }
94 | // Not enough memory in the event queue
95 | t.logger.Error("event exceeded memory allowance", "event", id, "limit", t.limits.EventMemory, "used", used, "size", len(event))
96 | }
97 |
98 | // Terminates a topic subscription's internal processing pool.
99 | func (t *topic) terminate() {
100 | // Wait for queued events to finish running
101 | t.eventPool.Terminate(false)
102 | }
103 |
--------------------------------------------------------------------------------
/iris_test.go:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2013 Project Iris. All rights reserved.
2 | //
3 | // The current language binding is an official support library of the Iris
4 | // cloud messaging framework, and as such, the same licensing terms apply.
5 | // For details please see http://iris.karalabe.com/downloads#License
6 |
7 | package iris
8 |
9 | import (
10 | "fmt"
11 | "testing"
12 | )
13 |
14 | // Tests multiple concurrent client connections.
15 | func TestConnect(t *testing.T) {
16 | // Test specific configurations
17 | conf := struct {
18 | clients int
19 | }{100}
20 |
21 | // Start a batch of parallel connections
22 | barrier := newBarrier(conf.clients)
23 | for i := 0; i < conf.clients; i++ {
24 | go func() {
25 | // Connect to the local relay
26 | conn, err := Connect(config.relay)
27 | if err != nil {
28 | barrier.Exit(fmt.Errorf("connection failed: %v", err))
29 | return
30 | }
31 | barrier.Sync()
32 |
33 | // Disconnect from the local relay
34 | if err := conn.Close(); err != nil {
35 | barrier.Exit(fmt.Errorf("connection close failed: %v", err))
36 | return
37 | }
38 | barrier.Exit(nil)
39 | }()
40 | }
41 | // Schedule the parallel operations
42 | if errs := barrier.Wait(); len(errs) != 0 {
43 | t.Fatalf("connection phase failed: %v.", errs)
44 | }
45 | if errs := barrier.Wait(); len(errs) != 0 {
46 | t.Fatalf("termination phase failed: %v.", errs)
47 | }
48 | }
49 |
50 | // Service handler for the registration tests.
51 | type registerTestHandler struct{}
52 |
53 | func (r *registerTestHandler) Init(conn *Connection) error { return nil }
54 | func (r *registerTestHandler) HandleBroadcast(msg []byte) { panic("not implemented") }
55 | func (r *registerTestHandler) HandleRequest(req []byte) ([]byte, error) { panic("not implemented") }
56 | func (r *registerTestHandler) HandleTunnel(tun *Tunnel) { panic("not implemented") }
57 | func (r *registerTestHandler) HandleDrop(reason error) { panic("not implemented") }
58 |
59 | // Tests multiple concurrent service registrations.
60 | func TestRegister(t *testing.T) {
61 | // Test specific configurations
62 | conf := struct {
63 | services int
64 | }{100}
65 |
66 | // Start a batch of parallel connections
67 | barrier := newBarrier(conf.services)
68 | for i := 0; i < conf.services; i++ {
69 | go func() {
70 | // Register a new service to the relay
71 | serv, err := Register(config.relay, config.cluster, new(registerTestHandler), nil)
72 | if err != nil {
73 | barrier.Exit(fmt.Errorf("registration failed: %v", err))
74 | return
75 | }
76 | barrier.Sync()
77 |
78 | // Unregister the service
79 | if err := serv.Unregister(); err != nil {
80 | barrier.Exit(fmt.Errorf("unregistration failed: %v", err))
81 | return
82 | }
83 | barrier.Exit(nil)
84 | }()
85 | }
86 | // Schedule the parallel operations
87 | if errs := barrier.Wait(); len(errs) != 0 {
88 | t.Fatalf("registration phase failed: %v.", errs)
89 | }
90 | if errs := barrier.Wait(); len(errs) != 0 {
91 | t.Fatalf("unregistration phase failed: %v.", errs)
92 | }
93 | }
94 |
95 | // Benchmarks client connection.
96 | func BenchmarkConnect(b *testing.B) {
97 | for i := 0; i < b.N; i++ {
98 | if conn, err := Connect(config.relay); err != nil {
99 | b.Fatalf("iteration %d: connection failed: %v.", i, err)
100 | } else {
101 | defer conn.Close()
102 | }
103 | }
104 | // Stop the timer (don't measure deferred cleanup)
105 | b.StopTimer()
106 | }
107 |
108 | // Benchmarks service registration.
109 | func BenchmarkRegister(b *testing.B) {
110 | for i := 0; i < b.N; i++ {
111 | if serv, err := Register(config.relay, config.cluster, new(registerTestHandler), nil); err != nil {
112 | b.Fatalf("iteration %d: register failed: %v.", i, err)
113 | } else {
114 | defer serv.Unregister()
115 | }
116 | }
117 | // Stop the timer (don't measure deferred cleanup)
118 | b.StopTimer()
119 | }
120 |
--------------------------------------------------------------------------------
/service.go:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2014 Project Iris. All rights reserved.
2 | //
3 | // The current language binding is an official support library of the Iris
4 | // cloud messaging framework, and as such, the same licensing terms apply.
5 | // For details please see http://iris.karalabe.com/downloads#License
6 |
7 | package iris
8 |
9 | import (
10 | "errors"
11 | "fmt"
12 | "sync/atomic"
13 |
14 | "gopkg.in/inconshreveable/log15.v2"
15 | )
16 |
17 | // Callback interface for processing inbound messages designated to a particular
18 | // service instance.
19 | type ServiceHandler interface {
20 | // Called once after the service is registered in the Iris network, but before
21 | // and handlers are activated. Its goal is to initialize any internal state
22 | // dependent on the connection.
23 | Init(conn *Connection) error
24 |
25 | // Callback invoked whenever a broadcast message arrives designated to the
26 | // cluster of which this particular service instance is part of.
27 | HandleBroadcast(message []byte)
28 |
29 | // Callback invoked whenever a request designated to the service's cluster is
30 | // load-balanced to this particular service instance.
31 | //
32 | // The method should service the request and return either a reply or the
33 | // error encountered, which will be delivered to the request originator.
34 | //
35 | // Returning nil for both or none of the results will result in a panic. Also,
36 | // since the requests cross language boundaries, only the error string gets
37 | // delivered remotely (any associated type information is effectively lost).
38 | HandleRequest(request []byte) ([]byte, error)
39 |
40 | // Callback invoked whenever a tunnel designated to the service's cluster is
41 | // constructed from a remote node to this particular instance.
42 | HandleTunnel(tunnel *Tunnel)
43 |
44 | // Callback notifying the service that the local relay dropped its connection.
45 | HandleDrop(reason error)
46 | }
47 |
48 | // Service instance belonging to a particular cluster in the network.
49 | type Service struct {
50 | conn *Connection // Network connection to the local Iris relay
51 | Log log15.Logger // Logger with service id injected
52 | }
53 |
54 | // Id to assign to the next service (used for logging purposes).
55 | var nextServId uint64
56 |
57 | // Connects to the Iris network and registers a new service instance as a member
58 | // of the specified service cluster.
59 | func Register(port int, cluster string, handler ServiceHandler, limits *ServiceLimits) (*Service, error) {
60 | // Sanity check on the arguments
61 | if len(cluster) == 0 {
62 | return nil, errors.New("empty cluster identifier")
63 | }
64 | if handler == nil {
65 | return nil, errors.New("nil service handler")
66 | }
67 | // Make sure the service limits have valid values
68 | limits = finalizeServiceLimits(limits)
69 |
70 | logger := Log.New("service", atomic.AddUint64(&nextServId, 1))
71 | logger.Info("registering new service", "relay_port", port, "cluster", cluster,
72 | "broadcast_limits", log15.Lazy{func() string {
73 | return fmt.Sprintf("%dT|%dB", limits.BroadcastThreads, limits.BroadcastMemory)
74 | }},
75 | "request_limits", log15.Lazy{func() string {
76 | return fmt.Sprintf("%dT|%dB", limits.RequestThreads, limits.RequestMemory)
77 | }})
78 |
79 | // Connect to the Iris relay as a service
80 | conn, err := newConnection(port, cluster, handler, limits, logger)
81 | if err != nil {
82 | logger.Warn("failed to register new service", "reason", err)
83 | return nil, err
84 | }
85 | // Assemble the service object and initialize it
86 | serv := &Service{
87 | conn: conn,
88 | Log: logger,
89 | }
90 | if err := handler.Init(conn); err != nil {
91 | logger.Warn("user failed to initialize service", "reason", err)
92 | conn.Close()
93 | return nil, err
94 | }
95 | logger.Info("service registration completed")
96 |
97 | // Start the handler pools
98 | conn.bcastPool.Start()
99 | conn.reqPool.Start()
100 |
101 | return serv, nil
102 | }
103 |
104 | // Merges the user requested limits with the defaults.
105 | func finalizeServiceLimits(user *ServiceLimits) *ServiceLimits {
106 | // If the user didn't specify anything, load the full default set
107 | if user == nil {
108 | return &defaultServiceLimits
109 | }
110 | // Check each field and merge only non-specified ones
111 | limits := new(ServiceLimits)
112 | *limits = *user
113 |
114 | if user.BroadcastThreads == 0 {
115 | limits.BroadcastThreads = defaultServiceLimits.BroadcastThreads
116 | }
117 | if user.BroadcastMemory == 0 {
118 | limits.BroadcastMemory = defaultServiceLimits.BroadcastMemory
119 | }
120 | if user.RequestThreads == 0 {
121 | limits.RequestThreads = defaultServiceLimits.RequestThreads
122 | }
123 | if user.RequestMemory == 0 {
124 | limits.RequestMemory = defaultServiceLimits.RequestMemory
125 | }
126 | return limits
127 | }
128 |
129 | // Unregisters the service instance from the Iris network, removing all
130 | // subscriptions and closing all active tunnels.
131 | //
132 | // The call blocks until the tear-down is confirmed by the Iris node.
133 | func (s *Service) Unregister() error {
134 | // Tear-down the connection
135 | err := s.conn.Close()
136 |
137 | // Stop all the thread pools (drop unprocessed messages)
138 | s.conn.reqPool.Terminate(true)
139 | s.conn.bcastPool.Terminate(true)
140 |
141 | // Return the result of the connection close
142 | return err
143 | }
144 |
--------------------------------------------------------------------------------
/events.go:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2013 Project Iris. All rights reserved.
2 | //
3 | // The current language binding is an official support library of the Iris
4 | // cloud messaging framework, and as such, the same licensing terms apply.
5 | // For details please see http://iris.karalabe.com/downloads#License
6 |
7 | // Event handlers for relay side messages.
8 |
9 | package iris
10 |
11 | import (
12 | "errors"
13 | "sync/atomic"
14 | "time"
15 | )
16 |
17 | // Schedules an application broadcast message for the service handler to process.
18 | func (c *Connection) handleBroadcast(message []byte) {
19 | id := int(atomic.AddUint64(&c.bcastIdx, 1))
20 | c.Log.Debug("scheduling arrived broadcast", "broadcast", id, "data", logLazyBlob(message))
21 |
22 | // Make sure there is enough memory for the message
23 | used := int(atomic.LoadInt32(&c.bcastUsed)) // Safe, since only 1 thread increments!
24 | if used+len(message) <= c.limits.BroadcastMemory {
25 | // Increment the memory usage of the queue and schedule the broadcast
26 | atomic.AddInt32(&c.bcastUsed, int32(len(message)))
27 | c.bcastPool.Schedule(func() {
28 | // Start the processing by decrementing the memory usage
29 | atomic.AddInt32(&c.bcastUsed, -int32(len(message)))
30 | c.Log.Debug("handling scheduled broadcast", "broadcast", id)
31 | c.handler.HandleBroadcast(message)
32 | })
33 | return
34 | }
35 | // Not enough memory in the broadcast queue
36 | c.Log.Error("broadcast exceeded memory allowance", "broadcast", id, "limit", c.limits.BroadcastMemory, "used", used, "size", len(message))
37 | }
38 |
39 | // Schedules an application request for the service handler to process.
40 | func (c *Connection) handleRequest(id uint64, request []byte, timeout time.Duration) {
41 | logger := c.Log.New("remote_request", id)
42 | logger.Debug("scheduling arrived request", "data", logLazyBlob(request), "timeout", timeout)
43 |
44 | // Make sure there is enough memory for the request
45 | used := int(atomic.LoadInt32(&c.reqUsed)) // Safe, since only 1 thread increments!
46 | if used+len(request) <= c.limits.RequestMemory {
47 | // Increment the memory usage of the queue
48 | atomic.AddInt32(&c.reqUsed, int32(len(request)))
49 |
50 | // Create the expiration timer and schedule the request
51 | expiration := time.After(timeout)
52 | c.reqPool.Schedule(func() {
53 | // Start the processing by decrementing the memory usage
54 | atomic.AddInt32(&c.reqUsed, -int32(len(request)))
55 |
56 | // Make sure the request didn't expire while enqueued
57 | select {
58 | case expired := <-expiration:
59 | exp := time.Since(expired)
60 | logger.Error("dumping expired scheduled request", "scheduled", exp+timeout, "timeout", timeout, "expired", exp)
61 | return
62 | default:
63 | // All ok, continue
64 | }
65 | // Handle the request and return a reply
66 | logger.Debug("handling scheduled request")
67 | reply, err := c.handler.HandleRequest(request)
68 | fault := ""
69 | if err != nil {
70 | fault = err.Error()
71 | }
72 | logger.Debug("replying to handled request", "data", logLazyBlob(reply), "error", err)
73 | if err := c.sendReply(id, reply, fault); err != nil {
74 | logger.Error("failed to send reply", "reason", err)
75 | }
76 | })
77 | return
78 | }
79 | // Not enough memory in the request queue
80 | logger.Error("request exceeded memory allowance", "limit", c.limits.RequestMemory, "used", used, "size", len(request))
81 | }
82 |
83 | // Looks up a pending request and delivers the result.
84 | func (c *Connection) handleReply(id uint64, reply []byte, fault string) {
85 | c.reqLock.RLock()
86 | defer c.reqLock.RUnlock()
87 |
88 | if reply == nil && len(fault) == 0 {
89 | c.reqErrs[id] <- ErrTimeout
90 | } else if reply == nil {
91 | c.reqErrs[id] <- &RemoteError{errors.New(fault)}
92 | } else {
93 | c.reqReps[id] <- reply
94 | }
95 | }
96 |
97 | // Forwards a topic publish event to the topic subscription.
98 | func (c *Connection) handlePublish(topic string, event []byte) {
99 | // Fetch the handler and release the lock fast
100 | c.subLock.RLock()
101 | top, ok := c.subLive[topic]
102 | c.subLock.RUnlock()
103 |
104 | // Make sure the subscription is still live
105 | if ok {
106 | top.handlePublish(event)
107 | } else {
108 | c.Log.Warn("stale publish arrived", "topic", topic)
109 | }
110 | }
111 |
112 | // Notifies the application of the relay link going down.
113 | func (c *Connection) handleClose(reason error) {
114 | // Notify the client of the drop if premature
115 | if reason != nil {
116 | c.Log.Crit("connection dropped", "reason", reason)
117 |
118 | // Only server connections have registered handlers
119 | if c.handler != nil {
120 | c.handler.HandleDrop(reason)
121 | }
122 | }
123 | // Close all open tunnels
124 | c.tunLock.Lock()
125 | for _, tun := range c.tunLive {
126 | tun.handleClose("connection dropped")
127 | }
128 | c.tunLive = nil
129 | c.tunLock.Unlock()
130 | }
131 |
132 | // Opens a new local tunnel endpoint and binds it to the remote side.
133 | func (c *Connection) handleTunnelInit(id uint64, chunkLimit int) {
134 | go func() {
135 | if tun, err := c.acceptTunnel(id, chunkLimit); err == nil {
136 | c.handler.HandleTunnel(tun)
137 | }
138 | // Else: failure already logged by the acceptor
139 | }()
140 | }
141 |
142 | // Forwards the tunnel construction result to the requested tunnel.
143 | func (c *Connection) handleTunnelResult(id uint64, chunkLimit int) {
144 | // Retrieve the tunnel
145 | c.tunLock.RLock()
146 | tun := c.tunLive[id]
147 | c.tunLock.RUnlock()
148 |
149 | // Finalize initialization
150 | tun.handleInitResult(chunkLimit)
151 | }
152 |
153 | // Forwards a tunnel data allowance to the requested tunnel.
154 | func (c *Connection) handleTunnelAllowance(id uint64, space int) {
155 | // Retrieve the tunnel
156 | c.tunLock.RLock()
157 | tun, ok := c.tunLive[id]
158 | c.tunLock.RUnlock()
159 |
160 | // Notify it of the granted data allowance
161 | if ok {
162 | tun.handleAllowance(space)
163 | }
164 | }
165 |
166 | // Forwards a message chunk transfer to the requested tunnel.
167 | func (c *Connection) handleTunnelTransfer(id uint64, size int, chunk []byte) {
168 | // Retrieve the tunnel
169 | c.tunLock.RLock()
170 | tun, ok := c.tunLive[id]
171 | c.tunLock.RUnlock()
172 |
173 | // Notify it of the arrived message chunk
174 | if ok {
175 | tun.handleTransfer(size, chunk)
176 | }
177 | }
178 |
179 | // Terminates a tunnel, stopping all data transfers.
180 | func (c *Connection) handleTunnelClose(id uint64, reason string) {
181 | c.tunLock.Lock()
182 | defer c.tunLock.Unlock()
183 |
184 | // Make sure the tunnel is still alive
185 | if tun, ok := c.tunLive[id]; ok {
186 | tun.handleClose(reason)
187 | delete(c.tunLive, id)
188 | }
189 | }
190 |
--------------------------------------------------------------------------------
/tunnel_test.go:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2013 Project Iris. All rights reserved.
2 | //
3 | // The current language binding is an official support library of the Iris
4 | // cloud messaging framework, and as such, the same licensing terms apply.
5 | // For details please see http://iris.karalabe.com/downloads#License
6 |
7 | package iris
8 |
9 | import (
10 | "bytes"
11 | "fmt"
12 | "sync"
13 | "testing"
14 | "time"
15 | )
16 |
17 | // Service handler for the tunnel tests.
18 | type tunnelTestHandler struct {
19 | conn *Connection
20 | }
21 |
22 | func (t *tunnelTestHandler) Init(conn *Connection) error { t.conn = conn; return nil }
23 | func (t *tunnelTestHandler) HandleBroadcast(msg []byte) { panic("not implemented") }
24 | func (t *tunnelTestHandler) HandleRequest(req []byte) ([]byte, error) { panic("not implemented") }
25 | func (t *tunnelTestHandler) HandleDrop(reason error) { panic("not implemented") }
26 |
27 | func (t *tunnelTestHandler) HandleTunnel(tun *Tunnel) {
28 | defer tun.Close()
29 |
30 | for {
31 | msg, err := tun.Recv(0)
32 | switch {
33 | case err == ErrClosed:
34 | return
35 | case err == nil:
36 | if err := tun.Send(msg, 0); err != nil {
37 | panic(fmt.Sprintf("tunnel send failed: %v", err))
38 | }
39 | default:
40 | panic(fmt.Sprintf("tunnel receive failed: %v", err))
41 | }
42 | }
43 | }
44 |
45 | // Tests multiple concurrent client and service tunnels.
46 | func TestTunnel(t *testing.T) {
47 | // Test specific configurations
48 | conf := struct {
49 | clients int
50 | servers int
51 | tunnels int
52 | exchanges int
53 | }{7, 7, 7, 7}
54 |
55 | barrier := newBarrier(conf.clients + conf.servers)
56 | shutdown := new(sync.WaitGroup)
57 |
58 | // Start up the concurrent tunneling clients
59 | for i := 0; i < conf.clients; i++ {
60 | shutdown.Add(1)
61 | go func(client int) {
62 | defer shutdown.Done()
63 |
64 | // Connect to the local relay
65 | conn, err := Connect(config.relay)
66 | if err != nil {
67 | barrier.Exit(fmt.Errorf("connection failed: %v", err))
68 | return
69 | }
70 | defer conn.Close()
71 | barrier.Sync()
72 |
73 | // Execute the tunnel construction, message exchange and verification
74 | id := fmt.Sprintf("client #%d", client)
75 | if err := tunnelBuildExchangeVerify(id, conn, conf.tunnels, conf.exchanges); err != nil {
76 | barrier.Exit(fmt.Errorf("exchanges failed: %v", err))
77 | return
78 | }
79 | barrier.Exit(nil)
80 | }(i)
81 | }
82 | // Start up the concurrent request services
83 | for i := 0; i < conf.servers; i++ {
84 | shutdown.Add(1)
85 | go func(server int) {
86 | defer shutdown.Done()
87 |
88 | // Create the service handler
89 | handler := new(tunnelTestHandler)
90 |
91 | // Register a new service to the relay
92 | serv, err := Register(config.relay, config.cluster, handler, nil)
93 | if err != nil {
94 | barrier.Exit(fmt.Errorf("registration failed: %v", err))
95 | return
96 | }
97 | defer serv.Unregister()
98 | barrier.Sync()
99 |
100 | // Execute the tunnel construction, message exchange and verification
101 | id := fmt.Sprintf("server #%d", server)
102 | if err := tunnelBuildExchangeVerify(id, handler.conn, conf.tunnels, conf.exchanges); err != nil {
103 | barrier.Exit(fmt.Errorf("exchanges failed: %v", err))
104 | return
105 | }
106 | barrier.Exit(nil)
107 | }(i)
108 | }
109 | // Schedule the parallel operations
110 | if errs := barrier.Wait(); len(errs) != 0 {
111 | t.Fatalf("startup phase failed: %v.", errs)
112 | }
113 | if errs := barrier.Wait(); len(errs) != 0 {
114 | t.Fatalf("tunneling phase failed: %v.", errs)
115 | }
116 | // Make sure all children terminated
117 | shutdown.Wait()
118 | }
119 |
120 | // Opens a batch of concurrent tunnels, and executes a data exchange.
121 | func tunnelBuildExchangeVerify(id string, conn *Connection, tunnels, exchanges int) error {
122 | barrier := newBarrier(tunnels)
123 | for i := 0; i < tunnels; i++ {
124 | go func(tunnel int) {
125 | // Open a tunnel to the service cluster
126 | tun, err := conn.Tunnel(config.cluster, time.Second)
127 | if err != nil {
128 | barrier.Exit(fmt.Errorf("tunnel construction failed: %v", err))
129 | return
130 | }
131 | defer tun.Close()
132 |
133 | // Serialize a batch of messages
134 | for i := 0; i < exchanges; i++ {
135 | message := fmt.Sprintf("%s, tunnel #%d, message #%d", id, tunnel, i)
136 | if err := tun.Send([]byte(message), time.Second); err != nil {
137 | barrier.Exit(fmt.Errorf("tunnel send failed: %v", err))
138 | return
139 | }
140 | }
141 | // Read back the echo stream and verify
142 | for i := 0; i < exchanges; i++ {
143 | message, err := tun.Recv(time.Second)
144 | if err != nil {
145 | barrier.Exit(fmt.Errorf("tunnel receive failed: %v", err))
146 | return
147 | }
148 | original := fmt.Sprintf("%s, tunnel #%d, message #%d", id, tunnel, i)
149 | if string(message) != original {
150 | barrier.Exit(fmt.Errorf("message mismatch: have %v, want %v", string(message), original))
151 | return
152 | }
153 | }
154 | barrier.Exit(nil)
155 | }(i)
156 | }
157 | if errs := barrier.Wait(); len(errs) != 0 {
158 | return fmt.Errorf("%v", errs)
159 | }
160 | return nil
161 | }
162 |
163 | // Tests that unanswered tunnels timeout correctly.
164 | func TestTunnelTimeout(t *testing.T) {
165 | // Connect to the local relay
166 | conn, err := Connect(config.relay)
167 | if err != nil {
168 | t.Fatalf("connection failed: %v", err)
169 | }
170 | defer conn.Close()
171 |
172 | // Open a new tunnel to a non existent server
173 | if tun, err := conn.Tunnel(config.cluster, 100*time.Millisecond); err != ErrTimeout {
174 | t.Fatalf("mismatching tunneling result: have %v/%v, want %v/%v", tun, err, nil, ErrTimeout)
175 | }
176 | }
177 |
178 | // Tests that large messages get delivered properly.
179 | func TestTunnelChunking(t *testing.T) {
180 | // Create the service handler
181 | handler := new(tunnelTestHandler)
182 |
183 | // Register a new service to the relay
184 | serv, err := Register(config.relay, config.cluster, handler, nil)
185 | if err != nil {
186 | t.Fatalf("registration failed: %v.", err)
187 | }
188 | defer serv.Unregister()
189 |
190 | // Construct the tunnel
191 | tunnel, err := handler.conn.Tunnel(config.cluster, time.Second)
192 | if err != nil {
193 | t.Fatalf("tunnel construction failed: %v.", err)
194 | }
195 | defer tunnel.Close()
196 |
197 | // Create and transfer a huge message
198 | blob := make([]byte, 16*1024*1024)
199 | for i := 0; i < len(blob); i++ {
200 | blob[i] = byte(i)
201 | }
202 | if err := tunnel.Send(blob, 10*time.Second); err != nil {
203 | t.Fatalf("failed to send blob: %v.", err)
204 | }
205 | back, err := tunnel.Recv(10 * time.Second)
206 | if err != nil {
207 | t.Fatalf("failed to retrieve blob: %v.", err)
208 | }
209 | // Verify that they indeed match
210 | if bytes.Compare(back, blob) != 0 {
211 | t.Fatalf("data blob mismatch")
212 | }
213 | }
214 |
215 | // Tests that a tunnel remains operational even after overloads (partially
216 | // transferred huge messages timeouting).
217 | func TestTunnelOverload(t *testing.T) {
218 | // Create the service handler
219 | handler := new(tunnelTestHandler)
220 |
221 | // Register a new service to the relay
222 | serv, err := Register(config.relay, config.cluster, handler, nil)
223 | if err != nil {
224 | t.Fatalf("registration failed: %v.", err)
225 | }
226 | defer serv.Unregister()
227 |
228 | // Construct the tunnel
229 | tunnel, err := handler.conn.Tunnel(config.cluster, time.Second)
230 | if err != nil {
231 | t.Fatalf("tunnel construction failed: %v.", err)
232 | }
233 | defer tunnel.Close()
234 |
235 | // Overload the tunnel by partially transferring huge messages
236 | blob := make([]byte, 64*1024*1024)
237 | for i := 0; i < 10; i++ {
238 | if err := tunnel.Send(blob, 10*time.Millisecond); err != ErrTimeout {
239 | t.Fatalf("unexpected send result: have %v, want %v.", err, ErrTimeout)
240 | }
241 | }
242 | // Verify that the tunnel is still operational
243 | data := []byte{0x00, 0x01, 0x00, 0x02, 0x00, 0x03, 0x00, 0x04}
244 | for i := 0; i < 10; i++ { // Iteration's important, the first will always cross (allowance ignore)
245 | if err := tunnel.Send(data, time.Second); err != nil {
246 | t.Fatalf("failed to send data: %v.", err)
247 | }
248 | back, err := tunnel.Recv(time.Second)
249 | if err != nil {
250 | t.Fatalf("failed to retrieve data: %v.", err)
251 | }
252 | // Verify that they indeed match
253 | if bytes.Compare(back, data) != 0 {
254 | t.Fatalf("data mismatch: have %v, want %v.", back, data)
255 | }
256 | }
257 | }
258 |
259 | // Benchmarks the latency of the tunnel data transfer (actually two way, so
260 | // halves it).
261 | func BenchmarkTunnelLatency(b *testing.B) {
262 | // Create the service handler
263 | handler := new(tunnelTestHandler)
264 |
265 | // Register a new service to the relay
266 | serv, err := Register(config.relay, config.cluster, handler, nil)
267 | if err != nil {
268 | b.Fatalf("registration failed: %v.", err)
269 | }
270 | defer serv.Unregister()
271 |
272 | // Construct the tunnel
273 | tunnel, err := handler.conn.Tunnel(config.cluster, time.Second)
274 | if err != nil {
275 | b.Fatalf("tunnel construction failed: %v.", err)
276 | }
277 | defer tunnel.Close()
278 |
279 | // Reset the timer and measure the latency
280 | b.ResetTimer()
281 | for i := 0; i < b.N/2; i++ {
282 | if err := tunnel.Send([]byte{byte(i)}, time.Second); err != nil {
283 | b.Fatalf("tunnel send failed: %v.", err)
284 | }
285 | if _, err := tunnel.Recv(time.Second); err != nil {
286 | b.Fatalf("tunnel receive failed: %v.", err)
287 | }
288 | }
289 | // Stop the timer (don't measure deferred cleanup)
290 | b.StopTimer()
291 | }
292 |
293 | // Measures the throughput of the tunnel data transfer (actually two ways, so
294 | // halves it).
295 | func BenchmarkTunnelThroughput(b *testing.B) {
296 | // Create the service handler
297 | handler := new(tunnelTestHandler)
298 |
299 | // Register a new service to the relay
300 | serv, err := Register(config.relay, config.cluster, handler, nil)
301 | if err != nil {
302 | b.Fatalf("registration failed: %v.", err)
303 | }
304 | defer serv.Unregister()
305 |
306 | // Construct the tunnel
307 | tunnel, err := handler.conn.Tunnel(config.cluster, time.Second)
308 | if err != nil {
309 | b.Fatalf("tunnel construction failed: %v.", err)
310 | }
311 | defer tunnel.Close()
312 |
313 | // Reset the timer and measure the throughput
314 | b.ResetTimer()
315 | go func() {
316 | for i := 0; i < b.N/2; i++ {
317 | if err := tunnel.Send([]byte{byte(i)}, time.Second); err != nil {
318 | b.Fatalf("tunnel send failed: %v.", err)
319 | }
320 | }
321 | }()
322 | for i := 0; i < b.N/2; i++ {
323 | if _, err := tunnel.Recv(time.Second); err != nil {
324 | b.Fatalf("tunnel receive failed: %v.", err)
325 | }
326 | }
327 | // Stop the timer (don't measure deferred cleanup)
328 | b.StopTimer()
329 | }
330 |
--------------------------------------------------------------------------------
/tunnel.go:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2013 Project Iris. All rights reserved.
2 | //
3 | // The current language binding is an official support library of the Iris
4 | // cloud messaging framework, and as such, the same licensing terms apply.
5 | // For details please see http://iris.karalabe.com/downloads#License
6 |
7 | package iris
8 |
9 | import (
10 | "errors"
11 | "fmt"
12 | "sync"
13 | "time"
14 |
15 | "github.com/project-iris/iris/container/queue"
16 | "gopkg.in/inconshreveable/log15.v2"
17 | )
18 |
19 | // Communication stream between the local application and a remote endpoint. The
20 | // ordered delivery of messages is guaranteed and the message flow between the
21 | // peers is throttled.
22 | type Tunnel struct {
23 | id uint64 // Tunnel identifier for de/multiplexing
24 | conn *Connection // Connection to the local relay
25 |
26 | // Chunking fields
27 | chunkLimit int // Maximum length of a data payload
28 | chunkBuf []byte // Current message being assembled
29 |
30 | // Quality of service fields
31 | itoaBuf *queue.Queue // Iris to application message buffer
32 | itoaSign chan struct{} // Message arrival signaler
33 | itoaLock sync.Mutex // Protects the buffer and signaler
34 |
35 | atoiSpace int // Application to Iris space allowance
36 | atoiSign chan struct{} // Allowance grant signaler
37 | atoiLock sync.Mutex // Protects the allowance and signaler
38 |
39 | // Bookkeeping fields
40 | init chan bool // Initialization channel for outbound tunnels
41 | term chan struct{} // Channel to signal termination to blocked go-routines
42 | stat error // Failure reason, if any received
43 |
44 | Log log15.Logger // Logger with connection and tunnel ids injected
45 | }
46 |
47 | func (c *Connection) newTunnel() (*Tunnel, error) {
48 | c.tunLock.Lock()
49 | defer c.tunLock.Unlock()
50 |
51 | // Make sure the connection is still up
52 | if c.tunLive == nil {
53 | return nil, ErrClosed
54 | }
55 | // Assign a new locally unique id to the tunnel
56 | tunId := c.tunIdx
57 | c.tunIdx++
58 |
59 | // Assemble and store the live tunnel
60 | tun := &Tunnel{
61 | id: tunId,
62 | conn: c,
63 |
64 | itoaBuf: queue.New(),
65 | itoaSign: make(chan struct{}, 1),
66 | atoiSign: make(chan struct{}, 1),
67 |
68 | init: make(chan bool),
69 | term: make(chan struct{}),
70 |
71 | Log: c.Log.New("tunnel", tunId),
72 | }
73 | c.tunLive[tunId] = tun
74 |
75 | return tun, nil
76 | }
77 |
78 | // Initiates a new tunnel to a remote cluster.
79 | func (c *Connection) initTunnel(cluster string, timeout time.Duration) (*Tunnel, error) {
80 | // Sanity check on the arguments
81 | if len(cluster) == 0 {
82 | return nil, errors.New("empty cluster identifier")
83 | }
84 | timeoutms := int(timeout.Nanoseconds() / 1000000)
85 | if timeoutms < 1 {
86 | return nil, fmt.Errorf("invalid timeout %v < 1ms", timeout)
87 | }
88 | // Create a potential tunnel
89 | tun, err := c.newTunnel()
90 | if err != nil {
91 | return nil, err
92 | }
93 | tun.Log.Info("constructing outbound tunnel", "cluster", cluster, "timeout", timeout)
94 |
95 | // Try and construct the tunnel
96 | err = c.sendTunnelInit(tun.id, cluster, timeoutms)
97 | if err == nil {
98 | // Wait for tunneling completion or a timeout
99 | select {
100 | case init := <-tun.init:
101 | if init {
102 | // Send the data allowance
103 | if err = c.sendTunnelAllowance(tun.id, defaultTunnelBuffer); err == nil {
104 | tun.Log.Info("tunnel construction completed", "chunk_limit", tun.chunkLimit)
105 | return tun, nil
106 | }
107 | } else {
108 | err = ErrTimeout
109 | }
110 | case <-c.term:
111 | err = ErrClosed
112 | }
113 | }
114 | // Clean up and return the failure
115 | c.tunLock.Lock()
116 | delete(c.tunLive, tun.id)
117 | c.tunLock.Unlock()
118 |
119 | tun.Log.Warn("tunnel construction failed", "reason", err)
120 | return nil, err
121 | }
122 |
123 | // Accepts an incoming tunneling request and confirms its local id.
124 | func (c *Connection) acceptTunnel(initId uint64, chunkLimit int) (*Tunnel, error) {
125 | // Create the local tunnel endpoint
126 | tun, err := c.newTunnel()
127 | if err != nil {
128 | return nil, err
129 | }
130 | tun.chunkLimit = chunkLimit
131 | tun.Log.Info("accepting inbound tunnel", "chunk_limit", chunkLimit)
132 |
133 | // Confirm the tunnel creation to the relay node
134 | err = c.sendTunnelConfirm(initId, tun.id)
135 | if err == nil {
136 | // Send the data allowance
137 | err = c.sendTunnelAllowance(tun.id, defaultTunnelBuffer)
138 | if err == nil {
139 | tun.Log.Info("tunnel acceptance completed")
140 | return tun, nil
141 | }
142 | }
143 | c.tunLock.Lock()
144 | delete(c.tunLive, tun.id)
145 | c.tunLock.Unlock()
146 |
147 | tun.Log.Warn("tunnel acceptance failed", "reason", err)
148 | return nil, err
149 | }
150 |
151 | // Sends a message over the tunnel to the remote pair, blocking until the local
152 | // Iris node receives the message or the operation times out.
153 | //
154 | // Infinite blocking is supported with by setting the timeout to zero (0).
155 | func (t *Tunnel) Send(message []byte, timeout time.Duration) error {
156 | t.Log.Debug("sending message", "data", logLazyBlob(message), "timeout", logLazyTimeout(timeout))
157 |
158 | // Sanity check on the arguments
159 | if message == nil || len(message) == 0 {
160 | return errors.New("nil or empty message")
161 | }
162 | // Create timeout signaler
163 | var deadline <-chan time.Time
164 | if timeout != 0 {
165 | deadline = time.After(timeout)
166 | }
167 | // Split the original message into bounded chunks
168 | for pos := 0; pos < len(message); pos += t.chunkLimit {
169 | end := pos + t.chunkLimit
170 | if end > len(message) {
171 | end = len(message)
172 | }
173 | sizeOrCont := len(message)
174 | if pos != 0 {
175 | sizeOrCont = 0
176 | }
177 | if err := t.sendChunk(message[pos:end], sizeOrCont, deadline); err != nil {
178 | return err
179 | }
180 | }
181 | return nil
182 | }
183 |
184 | // Sends a single message chunk to the remote endpoint.
185 | func (t *Tunnel) sendChunk(chunk []byte, sizeOrCont int, deadline <-chan time.Time) error {
186 | for {
187 | // Short circuit if there's enough space allowance already
188 | if t.drainAllowance(len(chunk)) {
189 | return t.conn.sendTunnelTransfer(t.id, sizeOrCont, chunk)
190 | }
191 | // Query for a send allowance
192 | select {
193 | case <-t.term:
194 | return ErrClosed
195 | case <-deadline:
196 | return ErrTimeout
197 | case <-t.atoiSign:
198 | // Potentially enough space allowance, retry
199 | continue
200 | }
201 | }
202 | }
203 |
204 | // Checks whether there is enough space allowance available to send a message.
205 | // If yes, the allowance is reduced accordingly.
206 | func (t *Tunnel) drainAllowance(need int) bool {
207 | t.atoiLock.Lock()
208 | defer t.atoiLock.Unlock()
209 |
210 | if t.atoiSpace >= need {
211 | t.atoiSpace -= need
212 | return true
213 | }
214 | // Not enough, reset allowance grant flag
215 | select {
216 | case <-t.atoiSign:
217 | default:
218 | }
219 | return false
220 | }
221 |
222 | // Retrieves a message from the tunnel, blocking until one is available or the
223 | // operation times out.
224 | //
225 | // Infinite blocking is supported with by setting the timeout to zero (0).
226 | func (t *Tunnel) Recv(timeout time.Duration) ([]byte, error) {
227 | // Short circuit if there's a message already buffered
228 | if msg := t.fetchMessage(); msg != nil {
229 | return msg, nil
230 | }
231 | // Create the timeout signaler
232 | var after <-chan time.Time
233 | if timeout != 0 {
234 | after = time.After(timeout)
235 | }
236 | // Wait for a message to arrive
237 | select {
238 | case <-t.term:
239 | return nil, ErrClosed
240 | case <-after:
241 | return nil, ErrTimeout
242 | case <-t.itoaSign:
243 | if msg := t.fetchMessage(); msg != nil {
244 | return msg, nil
245 | }
246 | panic("signal raised but message unavailable")
247 | }
248 | }
249 |
250 | // Fetches the next buffered message, or nil if none is available. If a message
251 | // was available, grants the remote side the space allowance just consumed.
252 | func (t *Tunnel) fetchMessage() []byte {
253 | t.itoaLock.Lock()
254 | defer t.itoaLock.Unlock()
255 |
256 | if !t.itoaBuf.Empty() {
257 | message := t.itoaBuf.Pop().([]byte)
258 | go t.conn.sendTunnelAllowance(t.id, len(message))
259 |
260 | t.Log.Debug("fetching queued message", "data", logLazyBlob(message))
261 | return message
262 | }
263 | // No message, reset arrival flag
264 | select {
265 | case <-t.itoaSign:
266 | default:
267 | }
268 | return nil
269 | }
270 |
271 | // Closes the tunnel between the pair. Any blocked read and write operation will
272 | // terminate with a failure.
273 | //
274 | // The method blocks until the local relay node acknowledges the tear-down.
275 | func (t *Tunnel) Close() error {
276 | // Short circuit if remote end already closed
277 | select {
278 | case <-t.term:
279 | return t.stat
280 | default:
281 | }
282 | // Signal the relay and wait for closure
283 | t.Log.Info("closing tunnel")
284 | if err := t.conn.sendTunnelClose(t.id); err != nil {
285 | return err
286 | }
287 | <-t.term
288 | return t.stat
289 | }
290 |
291 | // Finalizes the tunnel construction.
292 | func (t *Tunnel) handleInitResult(chunkLimit int) {
293 | if chunkLimit > 0 {
294 | t.chunkLimit = chunkLimit
295 | }
296 | t.init <- (chunkLimit > 0)
297 | }
298 |
299 | // Increases the available data allowance of the remote endpoint.
300 | func (t *Tunnel) handleAllowance(space int) {
301 | t.atoiLock.Lock()
302 | defer t.atoiLock.Unlock()
303 |
304 | t.atoiSpace += space
305 | select {
306 | case t.atoiSign <- struct{}{}:
307 | default:
308 | }
309 | }
310 |
311 | // Adds the chunk to the currently building message and delivers it upon
312 | // completion. If a new message starts, the old is discarded.
313 | func (t *Tunnel) handleTransfer(size int, chunk []byte) {
314 | // If a new message is arriving, dump anything stored before
315 | if size != 0 {
316 | if t.chunkBuf != nil {
317 | t.Log.Warn("incomplete message discarded", "size", cap(t.chunkBuf), "arrived", len(t.chunkBuf))
318 |
319 | // A large transfer timed out, new started, grant the partials allowance
320 | go t.conn.sendTunnelAllowance(t.id, len(t.chunkBuf))
321 | }
322 | t.chunkBuf = make([]byte, 0, size)
323 | }
324 | // Append the new chunk and check completion
325 | t.chunkBuf = append(t.chunkBuf, chunk...)
326 | if len(t.chunkBuf) == cap(t.chunkBuf) {
327 | t.itoaLock.Lock()
328 | defer t.itoaLock.Unlock()
329 |
330 | t.Log.Debug("queuing arrived message", "data", logLazyBlob(t.chunkBuf))
331 | t.itoaBuf.Push(t.chunkBuf)
332 | t.chunkBuf = nil
333 |
334 | select {
335 | case t.itoaSign <- struct{}{}:
336 | default:
337 | }
338 | }
339 | }
340 |
341 | // Handles the graceful remote closure of the tunnel.
342 | func (t *Tunnel) handleClose(reason string) {
343 | if reason != "" {
344 | t.Log.Warn("tunnel dropped", "reason", reason)
345 | t.stat = fmt.Errorf("remote error: %s", reason)
346 | } else {
347 | t.Log.Info("tunnel closed gracefully")
348 | }
349 | close(t.term)
350 | }
351 |
--------------------------------------------------------------------------------
/broadcast_test.go:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2013 Project Iris. All rights reserved.
2 | //
3 | // The current language binding is an official support library of the Iris
4 | // cloud messaging framework, and as such, the same licensing terms apply.
5 | // For details please see http://iris.karalabe.com/downloads#License
6 |
7 | package iris
8 |
9 | import (
10 | "errors"
11 | "fmt"
12 | "sync"
13 | "testing"
14 | "time"
15 |
16 | "github.com/project-iris/iris/pool"
17 | )
18 |
19 | // Service handler for the broadcast tests.
20 | type broadcastTestHandler struct {
21 | conn *Connection
22 | delivers chan []byte
23 | }
24 |
25 | func (b *broadcastTestHandler) Init(conn *Connection) error { b.conn = conn; return nil }
26 | func (b *broadcastTestHandler) HandleBroadcast(msg []byte) { b.delivers <- msg }
27 | func (b *broadcastTestHandler) HandleRequest(req []byte) ([]byte, error) { panic("not implemented") }
28 | func (b *broadcastTestHandler) HandleTunnel(tun *Tunnel) { panic("not implemented") }
29 | func (b *broadcastTestHandler) HandleDrop(reason error) { panic("not implemented") }
30 |
31 | // Tests multiple concurrent client and service broadcasts.
32 | func TestBroadcast(t *testing.T) {
33 | // Test specific configurations
34 | conf := struct {
35 | clients int
36 | servers int
37 | messages int
38 | }{25, 25, 25}
39 |
40 | barrier := newBarrier(conf.clients + conf.servers)
41 | shutdown := new(sync.WaitGroup)
42 |
43 | // Start up the concurrent broadcasting clients
44 | for i := 0; i < conf.clients; i++ {
45 | shutdown.Add(1)
46 | go func(client int) {
47 | defer shutdown.Done()
48 |
49 | // Connect to the local relay
50 | conn, err := Connect(config.relay)
51 | if err != nil {
52 | barrier.Exit(fmt.Errorf("connection failed: %v", err))
53 | return
54 | }
55 | defer conn.Close()
56 | barrier.Sync()
57 |
58 | // Broadcast to the whole service cluster
59 | for i := 0; i < conf.messages; i++ {
60 | message := fmt.Sprintf("client #%d, broadcast %d", client, i)
61 | if err := conn.Broadcast(config.cluster, []byte(message)); err != nil {
62 | barrier.Exit(fmt.Errorf("client broadcast failed: %v", err))
63 | return
64 | }
65 | }
66 | barrier.Exit(nil)
67 | }(i)
68 | }
69 | // Start up the concurrent broadcast services
70 | for i := 0; i < conf.servers; i++ {
71 | shutdown.Add(1)
72 | go func(server int) {
73 | defer shutdown.Done()
74 |
75 | // Create the service handler
76 | handler := &broadcastTestHandler{
77 | delivers: make(chan []byte, (conf.clients+conf.servers)*conf.messages),
78 | }
79 | // Register a new service to the relay
80 | serv, err := Register(config.relay, config.cluster, handler, nil)
81 | if err != nil {
82 | barrier.Exit(fmt.Errorf("registration failed: %v", err))
83 | return
84 | }
85 | defer serv.Unregister()
86 | barrier.Sync()
87 |
88 | // Broadcast to the whole service cluster
89 | for i := 0; i < conf.messages; i++ {
90 | message := fmt.Sprintf("server #%d, broadcast %d", server, i)
91 | if err := handler.conn.Broadcast(config.cluster, []byte(message)); err != nil {
92 | barrier.Exit(fmt.Errorf("server broadcast failed: %v", err))
93 | return
94 | }
95 | }
96 | barrier.Sync()
97 |
98 | // Retrieve all the arrived broadcasts
99 | messages := make(map[string]struct{})
100 | for i := 0; i < (conf.clients+conf.servers)*conf.messages; i++ {
101 | select {
102 | case msg := <-handler.delivers:
103 | messages[string(msg)] = struct{}{}
104 | case <-time.After(time.Second):
105 | barrier.Exit(errors.New("broadcast receive timeout"))
106 | return
107 | }
108 | }
109 | // Verify all the individual broadcasts
110 | for i := 0; i < conf.clients; i++ {
111 | for j := 0; j < conf.messages; j++ {
112 | msg := fmt.Sprintf("client #%d, broadcast %d", i, j)
113 | if _, ok := messages[msg]; !ok {
114 | barrier.Exit(fmt.Errorf("broadcast not found: %s", msg))
115 | return
116 | }
117 | delete(messages, msg)
118 | }
119 | }
120 | for i := 0; i < conf.servers; i++ {
121 | for j := 0; j < conf.messages; j++ {
122 | msg := fmt.Sprintf("server #%d, broadcast %d", i, j)
123 | if _, ok := messages[msg]; !ok {
124 | barrier.Exit(fmt.Errorf("broadcast not found: %s", msg))
125 | return
126 | }
127 | delete(messages, msg)
128 | }
129 | }
130 | barrier.Exit(nil)
131 | }(i)
132 | }
133 | // Schedule the parallel operations
134 | if errs := barrier.Wait(); len(errs) != 0 {
135 | t.Fatalf("startup phase failed: %v.", errs)
136 | }
137 | if errs := barrier.Wait(); len(errs) != 0 {
138 | t.Fatalf("broadcasting phase failed: %v.", errs)
139 | }
140 | if errs := barrier.Wait(); len(errs) != 0 {
141 | t.Fatalf("verification phase failed: %v.", errs)
142 | }
143 | // Make sure all children terminated
144 | shutdown.Wait()
145 | }
146 |
147 | // Service handler for the broadcast limit tests.
148 | type broadcastLimitTestHandler struct {
149 | conn *Connection
150 | delivers chan []byte
151 | sleep time.Duration
152 | }
153 |
154 | func (b *broadcastLimitTestHandler) Init(conn *Connection) error { b.conn = conn; return nil }
155 | func (b *broadcastLimitTestHandler) HandleRequest([]byte) ([]byte, error) { panic("not implemented") }
156 | func (b *broadcastLimitTestHandler) HandleTunnel(tun *Tunnel) { panic("not implemented") }
157 | func (b *broadcastLimitTestHandler) HandleDrop(reason error) { panic("not implemented") }
158 |
159 | func (b *broadcastLimitTestHandler) HandleBroadcast(msg []byte) {
160 | time.Sleep(b.sleep)
161 | b.delivers <- msg
162 | }
163 |
164 | // Tests the broadcast thread limitation.
165 | func TestBroadcastThreadLimit(t *testing.T) {
166 | // Test specific configurations
167 | conf := struct {
168 | messages int
169 | sleep time.Duration
170 | }{4, 100 * time.Millisecond}
171 |
172 | // Create the service handler and limiter
173 | handler := &broadcastLimitTestHandler{
174 | delivers: make(chan []byte, conf.messages),
175 | sleep: conf.sleep,
176 | }
177 | limits := &ServiceLimits{BroadcastThreads: 1}
178 |
179 | // Register a new service to the relay
180 | serv, err := Register(config.relay, config.cluster, handler, limits)
181 | if err != nil {
182 | t.Fatalf("registration failed: %v.", err)
183 | }
184 | defer serv.Unregister()
185 |
186 | // Send a few broadcasts
187 | for i := 0; i < conf.messages; i++ {
188 | if err := handler.conn.Broadcast(config.cluster, []byte{byte(i)}); err != nil {
189 | t.Fatalf("broadcast failed: %v.", err)
190 | }
191 | }
192 | // Wait for half time and verify that only half was processed
193 | time.Sleep(time.Duration(conf.messages/2)*conf.sleep + conf.sleep/2)
194 | for i := 0; i < conf.messages/2; i++ {
195 | select {
196 | case <-handler.delivers:
197 | default:
198 | t.Errorf("broadcast #%d not received", i)
199 | }
200 | }
201 | select {
202 | case <-handler.delivers:
203 | t.Errorf("additional broadcast received")
204 | default:
205 | }
206 | }
207 |
208 | // Tests the broadcast memory limitation.
209 | func TestBroadcastMemoryLimit(t *testing.T) {
210 | // Create the service handler and limiter
211 | handler := &broadcastTestHandler{
212 | delivers: make(chan []byte, 1),
213 | }
214 | limits := &ServiceLimits{BroadcastMemory: 1}
215 |
216 | // Register a new service to the relay
217 | serv, err := Register(config.relay, config.cluster, handler, limits)
218 | if err != nil {
219 | t.Fatalf("registration failed: %v.", err)
220 | }
221 | defer serv.Unregister()
222 |
223 | // Check that a 1 byte broadcast passes
224 | if err := handler.conn.Broadcast(config.cluster, []byte{0x00}); err != nil {
225 | t.Fatalf("small broadcast failed: %v.", err)
226 | }
227 | time.Sleep(time.Millisecond)
228 | select {
229 | case <-handler.delivers:
230 | default:
231 | t.Fatalf("small broadcast not received.")
232 | }
233 | // Check that a 2 byte broadcast is dropped
234 | if err := handler.conn.Broadcast(config.cluster, []byte{0x00, 0x00}); err != nil {
235 | t.Fatalf("large broadcast failed: %v.", err)
236 | }
237 | time.Sleep(time.Millisecond)
238 | select {
239 | case <-handler.delivers:
240 | t.Fatalf("large broadcast received.")
241 | default:
242 | }
243 | // Check that space freed gets replenished
244 | if err := handler.conn.Broadcast(config.cluster, []byte{0x00}); err != nil {
245 | t.Fatalf("second small broadcast failed: %v.", err)
246 | }
247 | time.Sleep(time.Millisecond)
248 | select {
249 | case <-handler.delivers:
250 | default:
251 | t.Fatalf("second small broadcast not received.")
252 | }
253 | }
254 |
255 | // Benchmarks broadcasting a single message.
256 | func BenchmarkBroadcastLatency(b *testing.B) {
257 | // Create the service handler
258 | handler := &broadcastTestHandler{
259 | delivers: make(chan []byte, b.N),
260 | }
261 | // Register a new service to the relay
262 | serv, err := Register(config.relay, config.cluster, handler, nil)
263 | if err != nil {
264 | b.Fatalf("registration failed: %v.", err)
265 | }
266 | defer serv.Unregister()
267 |
268 | // Reset timer and benchmark the message transfer
269 | b.ResetTimer()
270 | for i := 0; i < b.N; i++ {
271 | handler.conn.Broadcast(config.cluster, []byte{byte(i)})
272 | <-handler.delivers
273 | }
274 | // Stop the timer (don't measure deferred cleanup)
275 | b.StopTimer()
276 | }
277 |
278 | // Benchmarks broadcasting a stream of messages.
279 | func BenchmarkBroadcastThroughput1Threads(b *testing.B) {
280 | benchmarkBroadcastThroughput(1, b)
281 | }
282 |
283 | func BenchmarkBroadcastThroughput2Threads(b *testing.B) {
284 | benchmarkBroadcastThroughput(2, b)
285 | }
286 |
287 | func BenchmarkBroadcastThroughput4Threads(b *testing.B) {
288 | benchmarkBroadcastThroughput(4, b)
289 | }
290 |
291 | func BenchmarkBroadcastThroughput8Threads(b *testing.B) {
292 | benchmarkBroadcastThroughput(8, b)
293 | }
294 |
295 | func BenchmarkBroadcastThroughput16Threads(b *testing.B) {
296 | benchmarkBroadcastThroughput(16, b)
297 | }
298 |
299 | func BenchmarkBroadcastThroughput32Threads(b *testing.B) {
300 | benchmarkBroadcastThroughput(32, b)
301 | }
302 |
303 | func BenchmarkBroadcastThroughput64Threads(b *testing.B) {
304 | benchmarkBroadcastThroughput(64, b)
305 | }
306 |
307 | func BenchmarkBroadcastThroughput128Threads(b *testing.B) {
308 | benchmarkBroadcastThroughput(128, b)
309 | }
310 |
311 | func benchmarkBroadcastThroughput(threads int, b *testing.B) {
312 | // Create the service handler
313 | handler := &broadcastTestHandler{
314 | delivers: make(chan []byte, b.N),
315 | }
316 | // Register a new service to the relay
317 | serv, err := Register(config.relay, config.cluster, handler, nil)
318 | if err != nil {
319 | b.Fatalf("registration failed: %v.", err)
320 | }
321 | defer serv.Unregister()
322 |
323 | // Create the thread pool with the concurrent broadcasts
324 | workers := pool.NewThreadPool(threads)
325 | for i := 0; i < b.N; i++ {
326 | workers.Schedule(func() {
327 | if err := handler.conn.Broadcast(config.cluster, []byte{byte(i)}); err != nil {
328 | b.Fatalf("broadcast failed: %v.", err)
329 | }
330 | })
331 | }
332 | // Reset timer and benchmark the message transfer
333 | b.ResetTimer()
334 | workers.Start()
335 | for i := 0; i < b.N; i++ {
336 | <-handler.delivers
337 | }
338 | workers.Terminate(false)
339 |
340 | // Stop the timer (don't measure deferred cleanup)
341 | b.StopTimer()
342 | }
343 |
--------------------------------------------------------------------------------
/connection.go:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2013 Project Iris. All rights reserved.
2 | //
3 | // The current language binding is an official support library of the Iris
4 | // cloud messaging framework, and as such, the same licensing terms apply.
5 | // For details please see http://iris.karalabe.com/downloads#License
6 |
7 | package iris
8 |
9 | import (
10 | "bufio"
11 | "errors"
12 | "fmt"
13 | "net"
14 | "sync"
15 | "sync/atomic"
16 | "time"
17 |
18 | "github.com/project-iris/iris/pool"
19 | "gopkg.in/inconshreveable/log15.v2"
20 | )
21 |
22 | // Client connection to the Iris network.
23 | type Connection struct {
24 | // Application layer fields
25 | handler ServiceHandler // Handler for connection events
26 |
27 | reqIdx uint64 // Index to assign the next request
28 | reqReps map[uint64]chan []byte // Reply channels for active requests
29 | reqErrs map[uint64]chan error // Error channels for active requests
30 | reqLock sync.RWMutex // Mutex to protect the result channel maps
31 |
32 | subIdx uint64 // Index to assign the next subscription (logging purposes)
33 | subLive map[string]*topic // Active subscriptions
34 | subLock sync.RWMutex // Mutex to protect the subscription map
35 |
36 | tunIdx uint64 // Index to assign the next tunnel
37 | tunLive map[uint64]*Tunnel // Active tunnels
38 | tunLock sync.RWMutex // Mutex to protect the tunnel map
39 |
40 | // Quality of service fields
41 | limits *ServiceLimits // Limits on the inbound message processing
42 |
43 | bcastIdx uint64 // Index to assign the next inbound broadcast (logging purposes)
44 | bcastPool *pool.ThreadPool // Queue and concurrency limiter for the broadcast handlers
45 | bcastUsed int32 // Actual memory usage of the broadcast queue
46 |
47 | reqPool *pool.ThreadPool // Queue and concurrency limiter for the request handlers
48 | reqUsed int32 // Actual memory usage of the request queue
49 |
50 | // Network layer fields
51 | sock net.Conn // Network connection to the iris node
52 | sockBuf *bufio.ReadWriter // Buffered access to the network socket
53 | sockLock sync.Mutex // Mutex to atomize message sending
54 | sockWait int32 // Counter for the pending writes (batch before flush)
55 |
56 | // Bookkeeping fields
57 | init chan struct{} // Init channel to receive a success signal
58 | quit chan chan error // Quit channel to synchronize receiver termination
59 | term chan struct{} // Channel to signal termination to blocked go-routines
60 |
61 | Log log15.Logger // Logger with connection id injected
62 | }
63 |
64 | // Id to assign to the next connection (used for logging purposes).
65 | var nextConnId uint64
66 |
67 | // Connects to the Iris network as a simple client.
68 | func Connect(port int) (*Connection, error) {
69 | logger := Log.New("client", atomic.AddUint64(&nextConnId, 1))
70 | logger.Info("connecting new client", "relay_port", port)
71 |
72 | conn, err := newConnection(port, "", nil, nil, logger)
73 | if err != nil {
74 | logger.Warn("failed to connect new client", "reason", err)
75 | } else {
76 | logger.Info("client connection established")
77 | }
78 | return conn, err
79 | }
80 |
81 | // Connects to a local relay endpoint on port and registers as cluster.
82 | func newConnection(port int, cluster string, handler ServiceHandler, limits *ServiceLimits, logger log15.Logger) (*Connection, error) {
83 | // Connect to the iris relay node
84 | addr, err := net.ResolveTCPAddr("tcp", fmt.Sprintf("localhost:%d", port))
85 | if err != nil {
86 | return nil, err
87 | }
88 | sock, err := net.DialTCP("tcp", nil, addr)
89 | if err != nil {
90 | return nil, err
91 | }
92 | // Create the relay object
93 | conn := &Connection{
94 | // Application layer
95 | handler: handler,
96 |
97 | reqReps: make(map[uint64]chan []byte),
98 | reqErrs: make(map[uint64]chan error),
99 | subLive: make(map[string]*topic),
100 | tunLive: make(map[uint64]*Tunnel),
101 |
102 | // Network layer
103 | sock: sock,
104 | sockBuf: bufio.NewReadWriter(bufio.NewReader(sock), bufio.NewWriter(sock)),
105 |
106 | // Bookkeeping
107 | quit: make(chan chan error),
108 | term: make(chan struct{}),
109 |
110 | Log: logger,
111 | }
112 | // Initialize service QoS fields
113 | if cluster != "" {
114 | conn.limits = limits
115 | conn.bcastPool = pool.NewThreadPool(limits.BroadcastThreads)
116 | conn.reqPool = pool.NewThreadPool(limits.RequestThreads)
117 | }
118 | // Initialize the connection and wait for a confirmation
119 | if err := conn.sendInit(cluster); err != nil {
120 | return nil, err
121 | }
122 | if _, err := conn.procInit(); err != nil {
123 | return nil, err
124 | }
125 | // Start the network receiver and return
126 | go conn.process()
127 | return conn, nil
128 | }
129 |
130 | // Broadcasts a message to all members of a cluster. No guarantees are made that
131 | // all recipients receive the message (best effort).
132 | //
133 | // The call blocks until the message is forwarded to the local Iris node.
134 | func (c *Connection) Broadcast(cluster string, message []byte) error {
135 | // Sanity check on the arguments
136 | if len(cluster) == 0 {
137 | return errors.New("empty cluster identifier")
138 | }
139 | if message == nil || len(message) == 0 {
140 | return errors.New("nil or empty message")
141 | }
142 | // Broadcast and return
143 | c.Log.Debug("sending new broadcast", "cluster", cluster, "data", logLazyBlob(message))
144 | return c.sendBroadcast(cluster, message)
145 | }
146 |
147 | // Executes a synchronous request to be serviced by a member of the specified
148 | // cluster, load-balanced between all participant, returning the received reply.
149 | //
150 | // The timeout unit is in milliseconds. Anything lower will fail with an error.
151 | func (c *Connection) Request(cluster string, request []byte, timeout time.Duration) ([]byte, error) {
152 | // Sanity check on the arguments
153 | if len(cluster) == 0 {
154 | return nil, errors.New("empty cluster identifier")
155 | }
156 | if request == nil || len(request) == 0 {
157 | return nil, errors.New("nil or empty request")
158 | }
159 | timeoutms := int(timeout.Nanoseconds() / 1000000)
160 | if timeoutms < 1 {
161 | return nil, fmt.Errorf("invalid timeout %v < 1ms", timeout)
162 | }
163 | // Create a reply and error channel for the results
164 | repc := make(chan []byte, 1)
165 | errc := make(chan error, 1)
166 |
167 | c.reqLock.Lock()
168 | reqId := c.reqIdx
169 | c.reqIdx++
170 | c.reqReps[reqId] = repc
171 | c.reqErrs[reqId] = errc
172 | c.reqLock.Unlock()
173 |
174 | // Make sure the result channels are cleaned up
175 | defer func() {
176 | c.reqLock.Lock()
177 | delete(c.reqReps, reqId)
178 | delete(c.reqErrs, reqId)
179 | close(repc)
180 | close(errc)
181 | c.reqLock.Unlock()
182 | }()
183 | // Send the request
184 | c.Log.Debug("sending new request", "local_request", reqId, "cluster", cluster, "data", logLazyBlob(request), "timeout", timeout)
185 | if err := c.sendRequest(reqId, cluster, request, timeoutms); err != nil {
186 | return nil, err
187 | }
188 | // Retrieve the results or fail if terminating
189 | var reply []byte
190 | var err error
191 |
192 | select {
193 | case <-c.term:
194 | err = ErrClosed
195 | case reply = <-repc:
196 | case err = <-errc:
197 | }
198 | c.Log.Debug("request completed", "local_request", reqId, "data", logLazyBlob(reply), "error", err)
199 | return reply, err
200 | }
201 |
202 | // Subscribes to a topic, using handler as the callback for arriving events.
203 | //
204 | // The method blocks until the subscription is forwarded to the relay. There
205 | // might be a small delay between subscription completion and start of event
206 | // delivery. This is caused by subscription propagation through the network.
207 | func (c *Connection) Subscribe(topic string, handler TopicHandler, limits *TopicLimits) error {
208 | // Sanity check on the arguments
209 | if len(topic) == 0 {
210 | return errors.New("empty topic identifier")
211 | }
212 | if handler == nil {
213 | return errors.New("nil subscription handler")
214 | }
215 | // Make sure the subscription limits have valid values
216 | limits = finalizeTopicLimits(limits)
217 |
218 | // Subscribe locally
219 | c.subLock.Lock()
220 | if _, ok := c.subLive[topic]; ok {
221 | c.subLock.Unlock()
222 | return errors.New("already subscribed")
223 | }
224 | logger := c.Log.New("topic", atomic.AddUint64(&c.subIdx, 1))
225 | logger.Info("subscribing to new topic", "name", topic,
226 | "limits", log15.Lazy{func() string {
227 | return fmt.Sprintf("%dT|%dB", limits.EventThreads, limits.EventMemory)
228 | }})
229 |
230 | c.subLive[topic] = newTopic(handler, limits, logger)
231 | c.subLock.Unlock()
232 |
233 | // Send the subscription request
234 | err := c.sendSubscribe(topic)
235 | if err != nil {
236 | c.subLock.Lock()
237 | if top, ok := c.subLive[topic]; ok {
238 | top.terminate()
239 | delete(c.subLive, topic)
240 | }
241 | c.subLock.Unlock()
242 | }
243 | return err
244 | }
245 |
246 | // Publishes an event asynchronously to topic. No guarantees are made that all
247 | // subscribers receive the message (best effort).
248 | //
249 | // The method blocks until the message is forwarded to the local Iris node.
250 | func (c *Connection) Publish(topic string, event []byte) error {
251 | // Sanity check on the arguments
252 | if len(topic) == 0 {
253 | return errors.New("empty topic identifier")
254 | }
255 | if event == nil || len(event) == 0 {
256 | return errors.New("nil or empty event")
257 | }
258 | // Publish and return
259 | c.Log.Debug("publishing new event", "topic", topic, "data", logLazyBlob(event))
260 | return c.sendPublish(topic, event)
261 | }
262 |
263 | // Unsubscribes from topic, receiving no more event notifications for it.
264 | //
265 | // The method blocks until the unsubscription is forwarded to the local Iris node.
266 | func (c *Connection) Unsubscribe(topic string) error {
267 | // Sanity check on the arguments
268 | if len(topic) == 0 {
269 | return errors.New("empty topic identifier")
270 | }
271 | // Log the unsubscription request
272 | c.subLock.RLock()
273 | if top, ok := c.subLive[topic]; ok {
274 | top.logger.Info("unsubscribing from topic")
275 | }
276 | c.subLock.RUnlock()
277 |
278 | // Unsubscribe through the relay and remove if successful
279 | err := c.sendUnsubscribe(topic)
280 | if err == nil {
281 | c.subLock.Lock()
282 | defer c.subLock.Unlock()
283 |
284 | if top, ok := c.subLive[topic]; !ok {
285 | return errors.New("not subscribed")
286 | } else {
287 | top.terminate()
288 | delete(c.subLive, topic)
289 | }
290 | }
291 | return err
292 | }
293 |
294 | // Opens a direct tunnel to a member of a remote cluster, allowing pairwise-
295 | // exclusive, order-guaranteed and throttled message passing between them.
296 | //
297 | // The method blocks until the newly created tunnel is set up, or the time
298 | // limit is reached.
299 | //
300 | // The timeout unit is in milliseconds. Anything lower will fail with an error.
301 | func (c *Connection) Tunnel(cluster string, timeout time.Duration) (*Tunnel, error) {
302 | // Simple call indirection to move into the tunnel source file
303 | return c.initTunnel(cluster, timeout)
304 | }
305 |
306 | // Gracefully terminates the connection removing all subscriptions and closing
307 | // all active tunnels.
308 | //
309 | // The call blocks until the connection tear-down is confirmed by the Iris node.
310 | func (c *Connection) Close() error {
311 | c.Log.Info("detaching from relay")
312 |
313 | // Send a graceful close to the relay node
314 | if err := c.sendClose(); err != nil {
315 | return err
316 | }
317 | // Wait till the close syncs and return
318 | errc := make(chan error, 1)
319 | c.quit <- errc
320 |
321 | // Terminate all running subscription handlers
322 | c.subLock.Lock()
323 | for _, topic := range c.subLive {
324 | topic.logger.Warn("forcefully terminating subscription")
325 | topic.terminate()
326 | }
327 | c.subLock.Unlock()
328 |
329 | return <-errc
330 | }
331 |
--------------------------------------------------------------------------------
/doc.go:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2014 Project Iris. All rights reserved.
2 | //
3 | // The current language binding is an official support library of the Iris
4 | // cloud messaging framework, and as such, the same licensing terms apply.
5 | // For details please see http://iris.karalabe.com/downloads#License
6 |
7 | /*
8 | Package iris contains the Go binding to the Iris cloud messaging framework.
9 |
10 | Note, since GoDoc doesn't support any markup, a significant amount of visual
11 | polishes on the documentation were lost. For the fully marked up version of
12 | the introductory docs, please consult the package readme file.
13 |
14 | https://github.com/project-iris/iris-go/blob/v1/README.md
15 |
16 | Background
17 |
18 | Iris is an attempt at bringing the simplicity and elegance of cloud computing to
19 | the application layer. Consumer clouds provide unlimited virtual machines at the
20 | click of a button, but leaves it to developer to wire them together. Iris ensures
21 | that you can forget about networking challenges and instead focus on solving your
22 | own domain problems.
23 |
24 | It is a completely decentralized messaging solution for simplifying the design
25 | and implementation of cloud services. Among others, Iris features zero-
26 | configuration (i.e. start it up and it will do its magic), semantic addressing
27 | (i.e. application use textual names to address each other), clusters as units
28 | (i.e. automatic load balancing between apps of the same name) and perfect secrecy
29 | (i.e. all network traffic is encrypted).
30 |
31 | You can find further infos on the Iris website [http://iris.karalabe.com] and
32 | details of the above features in the core concepts [http://iris.karalabe.com/book/core_concepts]
33 | section of the book of Iris [http://iris.karalabe.com/book]. For the scientifically
34 | inclined, a small collection of papers [http://iris.karalabe.com/papers] is also
35 | available featuring Iris. Slides and videos of previously given public presentations
36 | are published in the talks [http://iris.karalabe.com/talks] page.
37 |
38 | There is a growing community on Twitter @iriscmf [https://twitter.com/iriscmf],
39 | Google groups project-iris [https://groups.google.com/group/project-iris] and
40 | GitHub project-iris [https://github.com/project-iris].
41 |
42 | Installation
43 |
44 | To get the package, execute:
45 |
46 | go get gopkg.in/project-iris/iris-go.v1
47 |
48 | To import this package, add the following line to your code:
49 |
50 | import "gopkg.in/project-iris/iris-go.v1"
51 |
52 | Refer to it as iris.
53 |
54 | Quickstart
55 |
56 | Iris uses a relaying architecture, where client applications do not communicate
57 | directly with one another, but instead delegate all messaging operations to a
58 | local relay process responsible for transferring the messages to the correct
59 | destinations. The first step hence to using Iris through any binding is setting
60 | up the local relay node [http://iris.karalabe.com/downloads]. You can find detailed
61 | infos in the Run, Forrest, Run [http://iris.karalabe.com/book/run_forrest_run]
62 | section of the book of Iris [http://iris.karalabe.com/book], but a very simple
63 | way would be to start a developer node.
64 |
65 | > iris -dev
66 | Entering developer mode
67 | Generating random RSA key... done.
68 | Generating random network name... done.
69 |
70 | 2014/06/13 18:13:47 main: booting iris overlay...
71 | 2014/06/13 18:13:47 scribe: booting with id 369650985814.
72 | 2014/06/13 18:13:57 main: iris overlay converged with 0 remote connections.
73 | 2014/06/13 18:13:57 main: booting relay service...
74 | 2014/06/13 18:13:57 main: iris successfully booted, listening on port 55555.
75 |
76 | Since it generates random credentials, a developer node will not be able to
77 | connect with other remote nodes in the network. However, it provides a quick
78 | solution to start developing without needing to configure a network name and
79 | associated access key. Should you wish to interconnect multiple nodes, please
80 | provide the -net and -rsa flags.
81 |
82 | Attaching to the relay
83 |
84 | After successfully booting, the relay opens a local TCP endpoint (port 55555 by
85 | default, configurable using -port) through which arbitrarily many entities may
86 | attach. Each connecting entity may also decide whether it becomes a simple client
87 | only consuming the services provided by other participants, or a full fledged
88 | service, also making functionality available to others for consumption.
89 |
90 | Connecting as a client can be done trivially by invoking iris.Connect with the
91 | port number of the local relay's client endpoint. After the attachment is
92 | completed, an iris.Connection instance is returned through which messaging can
93 | begin. A client cannot accept inbound requests, broadcasts and tunnels, only
94 | initiate them.
95 |
96 | conn, err := iris.Connect(55555)
97 | if err != nil {
98 | log.Fatalf("failed to connect to the Iris relay: %v.", err)
99 | }
100 | defer conn.Close()
101 |
102 | To provide functionality for consumption, an entity needs to register as a
103 | service. This is slightly more involved, as beside initiating a registration
104 | request, it also needs to specify a callback handler to process inbound events.
105 | First, the callback handler needs to implement the iris.ServiceHandler interface.
106 | After creating the handler, registration can commence by invoking iris.Register
107 | with the port number of the local relay's client endpoint; sub-service cluster
108 | this entity will join as a member; handler itself to process inbound messages
109 | and an optional resource cap.
110 |
111 | type EchoHandler struct {}
112 |
113 | func (b *EchoHandler) Init(conn *Connection) error { return nil }
114 | func (b *EchoHandler) HandleBroadcast(msg []byte) { }
115 | func (b *EchoHandler) HandleRequest(req []byte) ([]byte, error) { return req, nil }
116 | func (b *EchoHandler) HandleTunnel(tun *Tunnel) { }
117 | func (b *EchoHandler) HandleDrop(reason error) { }
118 |
119 | func main() {
120 | service, err := iris.Register(55555, "echo", new(EchoHandler), nil)
121 | if err != nil {
122 | log.Fatalf("failed to register to the Iris relay: %v.", err)
123 | }
124 | defer service.Unregister()
125 | }
126 |
127 | Upon successful registration, Iris invokes the handler's Init method with the
128 | live iris.Connection object - the service's client connection - through which
129 | the service itself can initiate outbound requests. Init is called only once and
130 | is synchronized before any other handler method is invoked.
131 |
132 | Messaging through Iris
133 |
134 | Iris supports four messaging schemes: request/reply, broadcast, tunnel and
135 | publish/subscribe. The first three schemes always target a specific cluster:
136 | send a request to one member of a cluster and wait for the reply; broadcast a
137 | message to all members of a cluster; open a streamed, ordered and throttled
138 | communication tunnel to one member of a cluster. The publish/subscribe is
139 | similar to broadcast, but any member of the network may subscribe to the same
140 | topic, hence breaking cluster boundaries.
141 |
142 | Schemes overview figure: https://dl.dropboxusercontent.com/u/10435909/Iris/messaging_schemes.png
143 |
144 | Presenting each primitive is out of scope, but for illustrative purposes the
145 | request/reply was included. Given the echo service registered above, we can send
146 | it requests and wait for replies through any client connection. Iris will
147 | automatically locate, route and load balanced between all services registered
148 | under the addressed name.
149 |
150 | request := []byte("some request binary")
151 | if reply, err := conn.Request("echo", request, time.Second); err != nil {
152 | log.Printf("failed to execute request: %v.", err)
153 | } else {
154 | fmt.Printf("reply arrived: %v.", string(reply))
155 | }
156 |
157 | An expanded summary of the supported messaging schemes can be found in the core
158 | concepts [http://iris.karalabe.com/book/core_concepts] section of the book of
159 | Iris [http://iris.karalabe.com/book]. A detailed presentation and analysis of
160 | each individual primitive will be added soon.
161 |
162 | Error handling
163 |
164 | The binding uses the idiomatic Go error handling mechanisms of returning error
165 | instances whenever a failure occurs. However, there are a few common cases that
166 | need to be individually checkable, hence a few special errors values and types
167 | have been introduced.
168 |
169 | Many operations - such as requests and tunnels - can time out. To allow checking
170 | for this particular failure, Iris returns iris.ErrTimeout in such scenarios.
171 | Similarly, connections, services and tunnels may fail, in the case of which all
172 | pending operations terminate with iris.ErrClosed.
173 |
174 | Additionally, the requests/reply pattern supports sending back an error instead of
175 | a reply to the caller. To enable the originating node to check whether a request
176 | failed locally or remotely, all remote errors are wrapped in an iris.RemoteError
177 | type.
178 |
179 | _, err := conn.Request("cluster", request, timeout)
180 | switch err {
181 | case nil:
182 | // Request completed successfully
183 | case iris.ErrTimeout:
184 | // Request timed out
185 | case iris.ErrClosed:
186 | // Connection terminated
187 | default:
188 | if _, ok := err.(*iris.RemoteError); ok {
189 | // Request failed remotely
190 | } else {
191 | // Requesting failed locally
192 | }
193 | }
194 |
195 | Resource capping
196 |
197 | To prevent the network from overwhelming an attached process, the binding places
198 | thread and memory limits on the broadcasts/requests inbound to a registered
199 | service as well as on the events received by a topic subscription. The thread
200 | limit defines the concurrent processing allowance, whereas the memory limit the
201 | maximal length of the pending queue.
202 |
203 | The default values - listed below - can be overridden during service registration
204 | and topic subscription via iris.ServiceLimits and iris.TopicLimits. Any unset
205 | fields (i.e. value of zero) will default to the preset ones.
206 |
207 | // Default limits of the threading and memory usage of a registered service.
208 | var defaultServiceLimits = ServiceLimits{
209 | BroadcastThreads: 4 * runtime.NumCPU(),
210 | BroadcastMemory: 64 * 1024 * 1024,
211 | RequestThreads: 4 * runtime.NumCPU(),
212 | RequestMemory: 64 * 1024 * 1024,
213 | }
214 |
215 | // Default limits of the threading and memory usage of a subscription.
216 | var defaultTopicLimits = TopicLimits{
217 | EventThreads: 4 * runtime.NumCPU(),
218 | EventMemory: 64 * 1024 * 1024,
219 | }
220 |
221 | There is also a sanity limit on the input buffer of a tunnel, but it is not
222 | exposed through the API as tunnels are meant as structural primitives, not
223 | sensitive to load. This may change in the future.
224 |
225 | Logging
226 |
227 | For logging purposes, the Go binding uses inconshreveable's [https://github.com/inconshreveable]
228 | log15 [https://github.com/inconshreveable/log15] library (version v2). By default,
229 | INFO level logs are collected and printed to stderr. This level allows tracking
230 | life-cycle events such as client and service attachments, topic subscriptions and
231 | tunnel establishments. Further log entries can be requested by lowering the level
232 | to DEBUG, effectively printing all messages passing through the binding.
233 |
234 | The binding's logger can be fine-tuned through the iris.Log variable. Below are
235 | a few common configurations.
236 |
237 | // Discard all log entries
238 | iris.Log.SetHandler(log15.DiscardHandler())
239 |
240 | // Log DEBUG level entries to STDERR
241 | iris.Log.SetHandler(log15.LvlFilterHandler(log15.LvlDebug, log15.StderrHandler))
242 |
243 | Each iris.Connection, iris.Service and iris.Tunnel has an embedded logger, through
244 | which contextual log entries may be printed (i.e. tagged with the specific id of
245 | the attached entity).
246 |
247 | conn, _ := iris.Connect(55555)
248 | defer conn.Close()
249 |
250 | conn.Log.Debug("debug entry, hidden by default")
251 | conn.Log.Info("info entry, client context included")
252 | conn.Log.Warn("warning entry", "extra", "some value")
253 | conn.Log.Crit("critical entry", "bool", false, "int", 1, "string", "two")
254 |
255 | As you can see below, all log entries have been automatically tagged with the
256 | 'client' attribute, set to the id of the current connection. Since the default
257 | log level is INFO, the conn.Log.Debug invocation has no effect. Additionally,
258 | arbitrarily many key-value pairs may be included in the entry.
259 |
260 | INFO[06-22|18:39:49] connecting new client client=1 relay_port=55555
261 | INFO[06-22|18:39:49] client connection established client=1
262 | INFO[06-22|18:39:49] info entry, client context included client=1
263 | WARN[06-22|18:39:49] warning entry client=1 extra="some value"
264 | CRIT[06-22|18:39:49] critical entry client=1 bool=false int=1 string=two
265 | INFO[06-22|18:39:49] detaching from relay client=1
266 |
267 | For further capabilities, configurations and details about the logger, please
268 | consult the log15 docs [https://godoc.org/github.com/inconshreveable/log15].
269 |
270 | Additional goodies
271 |
272 | You can find a teaser presentation, touching on all the key features of the
273 | library through a handful of challenges and their solutions. The recommended
274 | version is the playground [http://play.iris.karalabe.com/talks/binds/go.slide],
275 | containing modifiable and executable code snippets, but a read only version
276 | [http://iris.karalabe.com/talks/binds/go.slid]) is also available.
277 |
278 | */
279 | package iris
280 |
--------------------------------------------------------------------------------
/pubsub_test.go:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2013 Project Iris. All rights reserved.
2 | //
3 | // The current language binding is an official support library of the Iris
4 | // cloud messaging framework, and as such, the same licensing terms apply.
5 | // For details please see http://iris.karalabe.com/downloads#License
6 |
7 | package iris
8 |
9 | import (
10 | "errors"
11 | "fmt"
12 | "sync"
13 | "testing"
14 | "time"
15 |
16 | "github.com/project-iris/iris/pool"
17 | )
18 |
19 | // Service handler for the publish/subscribe tests.
20 | type publishTestServiceHandler struct {
21 | conn *Connection
22 | }
23 |
24 | func (p *publishTestServiceHandler) Init(conn *Connection) error { p.conn = conn; return nil }
25 | func (p *publishTestServiceHandler) HandleBroadcast(msg []byte) { panic("not implemented") }
26 | func (p *publishTestServiceHandler) HandleRequest(req []byte) ([]byte, error) {
27 | panic("not implemented")
28 | }
29 | func (p *publishTestServiceHandler) HandleTunnel(tun *Tunnel) { panic("not implemented") }
30 | func (p *publishTestServiceHandler) HandleDrop(reason error) { panic("not implemented") }
31 |
32 | // Topic handler for the publish/subscribe tests.
33 | type publishTestTopicHandler struct {
34 | delivers chan []byte
35 | }
36 |
37 | func (p *publishTestTopicHandler) HandleEvent(event []byte) { p.delivers <- event }
38 |
39 | // Multiple connections subscribe to the same batch of topics and publish to all.
40 | func TestPublish(t *testing.T) {
41 | // Test specific configurations
42 | conf := struct {
43 | clients int
44 | servers int
45 | topics int
46 | events int
47 | }{5, 5, 7, 15}
48 |
49 | // Pre-generate the topic names
50 | topics := make([]string, conf.topics)
51 | for i := 0; i < conf.topics; i++ {
52 | topics[i] = fmt.Sprintf("%s-%d", config.topic, i)
53 | }
54 |
55 | barrier := newBarrier(conf.clients + conf.servers)
56 | shutdown := new(sync.WaitGroup)
57 |
58 | // Start up the concurrent publishing clients
59 | for i := 0; i < conf.clients; i++ {
60 | shutdown.Add(1)
61 | go func(client int) {
62 | defer shutdown.Done()
63 |
64 | // Connect to the local relay
65 | conn, err := Connect(config.relay)
66 | if err != nil {
67 | barrier.Exit(fmt.Errorf("connection failed: %v", err))
68 | return
69 | }
70 | defer conn.Close()
71 |
72 | // Subscribe to the batch of topics
73 | hands := []*publishTestTopicHandler{}
74 | for _, topic := range topics {
75 | hand := &publishTestTopicHandler{
76 | delivers: make(chan []byte, (conf.clients+conf.servers)*conf.events),
77 | }
78 | if err := conn.Subscribe(topic, hand, nil); err != nil {
79 | barrier.Exit(fmt.Errorf("client subscription failed: %v", err))
80 | return
81 | }
82 | hands = append(hands, hand)
83 | defer func(topic string) {
84 | conn.Unsubscribe(topic)
85 | time.Sleep(100 * time.Millisecond)
86 | }(topic)
87 | }
88 | time.Sleep(100 * time.Millisecond)
89 | barrier.Sync()
90 |
91 | // Publish to all subscribers
92 | for i := 0; i < conf.events; i++ {
93 | event := fmt.Sprintf("client #%d, event %d", client, i)
94 | for _, topic := range topics {
95 | if err := conn.Publish(topic, []byte(event)); err != nil {
96 | barrier.Exit(fmt.Errorf("client publish failed: %v", err))
97 | return
98 | }
99 | }
100 | }
101 | barrier.Sync()
102 |
103 | // Verify all the topic deliveries
104 | if err := publishVerifyEvents(conf.clients, conf.servers, conf.events, hands); err != nil {
105 | barrier.Exit(err)
106 | return
107 | }
108 | barrier.Exit(nil)
109 | }(i)
110 | }
111 | // Start up the concurrent publishing services
112 | for i := 0; i < conf.servers; i++ {
113 | shutdown.Add(1)
114 | go func(server int) {
115 | defer shutdown.Done()
116 |
117 | // Create the service handler
118 | handler := new(publishTestServiceHandler)
119 |
120 | // Register a new service to the relay
121 | serv, err := Register(config.relay, config.cluster, handler, nil)
122 | if err != nil {
123 | barrier.Exit(fmt.Errorf("registration failed: %v", err))
124 | return
125 | }
126 | defer serv.Unregister()
127 |
128 | // Subscribe to the batch of topics
129 | hands := []*publishTestTopicHandler{}
130 | for _, topic := range topics {
131 | hand := &publishTestTopicHandler{
132 | delivers: make(chan []byte, (conf.clients+conf.servers)*conf.events),
133 | }
134 | if err := handler.conn.Subscribe(topic, hand, nil); err != nil {
135 | barrier.Exit(fmt.Errorf("service subscription failed: %v", err))
136 | return
137 | }
138 | hands = append(hands, hand)
139 | defer func(topic string) {
140 | handler.conn.Unsubscribe(topic)
141 | time.Sleep(100 * time.Millisecond)
142 | }(topic)
143 | }
144 | time.Sleep(100 * time.Millisecond)
145 | barrier.Sync()
146 |
147 | // Publish to all subscribers
148 | for i := 0; i < conf.events; i++ {
149 | event := fmt.Sprintf("server #%d, event %d", server, i)
150 | for _, topic := range topics {
151 | if err := handler.conn.Publish(topic, []byte(event)); err != nil {
152 | barrier.Exit(fmt.Errorf("server publish failed: %v", err))
153 | return
154 | }
155 | }
156 | }
157 | barrier.Sync()
158 |
159 | // Verify all the topic deliveries
160 | if err := publishVerifyEvents(conf.clients, conf.servers, conf.events, hands); err != nil {
161 | barrier.Exit(err)
162 | return
163 | }
164 | barrier.Exit(nil)
165 | }(i)
166 | }
167 | // Schedule the parallel operations
168 | if errs := barrier.Wait(); len(errs) != 0 {
169 | t.Fatalf("startup phase failed: %v.", errs)
170 | }
171 | if errs := barrier.Wait(); len(errs) != 0 {
172 | t.Fatalf("publishing phase failed: %v.", errs)
173 | }
174 | if errs := barrier.Wait(); len(errs) != 0 {
175 | t.Fatalf("verification phase failed: %v.", errs)
176 | }
177 | // Make sure all children terminated
178 | shutdown.Wait()
179 | }
180 |
181 | // Verifies the delivered topic events.
182 | func publishVerifyEvents(clients, servers, publishes int, hands []*publishTestTopicHandler) error {
183 | // Verify each topic handler separately
184 | for i := 0; i < len(hands); i++ {
185 | // Retrieve all the published events
186 | events := make(map[string]struct{})
187 | for j := 0; j < (clients+servers)*publishes; j++ {
188 | select {
189 | case event := <-hands[i].delivers:
190 | events[string(event)] = struct{}{}
191 | case <-time.After(time.Second):
192 | return errors.New("event retrieve timeout")
193 | }
194 | }
195 | // Verify all the individual events
196 | for j := 0; j < clients; j++ {
197 | for k := 0; k < publishes; k++ {
198 | msg := fmt.Sprintf("client #%d, event %d", j, k)
199 | if _, ok := events[msg]; !ok {
200 | return fmt.Errorf("event not found: %s", msg)
201 | }
202 | delete(events, msg)
203 | }
204 | }
205 | for j := 0; j < servers; j++ {
206 | for k := 0; k < publishes; k++ {
207 | msg := fmt.Sprintf("server #%d, event %d", j, k)
208 | if _, ok := events[msg]; !ok {
209 | return fmt.Errorf("event not found: %s", msg)
210 | }
211 | delete(events, msg)
212 | }
213 | }
214 | }
215 | return nil
216 | }
217 |
218 | // Topic handler for the publish/subscribe limit tests.
219 | type publishLimitTestTopicHandler struct {
220 | delivers chan []byte
221 | sleep time.Duration
222 | }
223 |
224 | func (p *publishLimitTestTopicHandler) HandleEvent(event []byte) {
225 | time.Sleep(p.sleep)
226 | p.delivers <- event
227 | }
228 |
229 | // Tests the topic subscription thread limitation.
230 | func TestPublishThreadLimit(t *testing.T) {
231 | // Test specific configurations
232 | conf := struct {
233 | messages int
234 | sleep time.Duration
235 | }{4, 100 * time.Millisecond}
236 |
237 | // Connect to the local relay
238 | conn, err := Connect(config.relay)
239 | if err != nil {
240 | t.Fatalf("connection failed: %v", err)
241 | }
242 | defer conn.Close()
243 |
244 | // Subscribe to a topic and wait for state propagation
245 | handler := &publishLimitTestTopicHandler{
246 | delivers: make(chan []byte, conf.messages),
247 | sleep: conf.sleep,
248 | }
249 | limits := &TopicLimits{EventThreads: 1}
250 |
251 | if err := conn.Subscribe(config.topic, handler, limits); err != nil {
252 | t.Fatalf("subscription failed: %v", err)
253 | }
254 | defer conn.Unsubscribe(config.topic)
255 | time.Sleep(100 * time.Millisecond)
256 |
257 | // Send a few publishes
258 | for i := 0; i < conf.messages; i++ {
259 | if err := conn.Publish(config.topic, []byte{byte(i)}); err != nil {
260 | t.Fatalf("event publish failed: %v.", err)
261 | }
262 | }
263 | // Wait for half time and verify that only half was processed
264 | time.Sleep(time.Duration(conf.messages/2)*conf.sleep + conf.sleep/2)
265 | for i := 0; i < conf.messages/2; i++ {
266 | select {
267 | case <-handler.delivers:
268 | default:
269 | t.Errorf("event #%d not received", i)
270 | }
271 | }
272 | select {
273 | case <-handler.delivers:
274 | t.Errorf("additional event received")
275 | default:
276 | }
277 | }
278 |
279 | // Tests the subscription memory limitation.
280 | func TestPublishMemoryLimit(t *testing.T) {
281 | // Test specific configurations
282 | conf := struct {
283 | messages int
284 | sleep time.Duration
285 | }{4, 100 * time.Millisecond}
286 |
287 | // Connect to the local relay
288 | conn, err := Connect(config.relay)
289 | if err != nil {
290 | t.Fatalf("connection failed: %v", err)
291 | }
292 | defer conn.Close()
293 |
294 | // Subscribe to a topic and wait for state propagation
295 | handler := &publishTestTopicHandler{
296 | delivers: make(chan []byte, conf.messages),
297 | }
298 | limits := &TopicLimits{EventMemory: 1}
299 |
300 | if err := conn.Subscribe(config.topic, handler, limits); err != nil {
301 | t.Fatalf("subscription failed: %v", err)
302 | }
303 | defer conn.Unsubscribe(config.topic)
304 | time.Sleep(100 * time.Millisecond)
305 |
306 | // Check that a 1 byte publish passes
307 | if err := conn.Publish(config.topic, []byte{0x00}); err != nil {
308 | t.Fatalf("small publish failed: %v.", err)
309 | }
310 | time.Sleep(time.Millisecond)
311 | select {
312 | case <-handler.delivers:
313 | default:
314 | t.Fatalf("small publish not received.")
315 | }
316 | // Check that a 2 byte publish is dropped
317 | if err := conn.Publish(config.topic, []byte{0x00, 0x00}); err != nil {
318 | t.Fatalf("large publish failed: %v.", err)
319 | }
320 | time.Sleep(time.Millisecond)
321 | select {
322 | case <-handler.delivers:
323 | t.Fatalf("large publish received.")
324 | default:
325 | }
326 | // Check that space freed gets replenished
327 | if err := conn.Publish(config.topic, []byte{0x00}); err != nil {
328 | t.Fatalf("second small publish failed: %v.", err)
329 | }
330 | time.Sleep(time.Millisecond)
331 | select {
332 | case <-handler.delivers:
333 | default:
334 | t.Fatalf("second small publish not received.")
335 | }
336 | }
337 |
338 | // Benchmarks the latency of a single publish operation.
339 | func BenchmarkPublishLatency(b *testing.B) {
340 | // Connect to the local relay
341 | conn, err := Connect(config.relay)
342 | if err != nil {
343 | b.Fatalf("connection failed: %v", err)
344 | }
345 | defer conn.Close()
346 |
347 | // Subscribe to a topic and wait for state propagation
348 | handler := &publishTestTopicHandler{
349 | delivers: make(chan []byte, b.N),
350 | }
351 | if err := conn.Subscribe(config.topic, handler, nil); err != nil {
352 | b.Fatalf("subscription failed: %v", err)
353 | }
354 | defer conn.Unsubscribe(config.topic)
355 | time.Sleep(100 * time.Millisecond)
356 |
357 | // Reset timer and time sync publish
358 | b.ResetTimer()
359 | for i := 0; i < b.N; i++ {
360 | if err := conn.Publish(config.topic, []byte{byte(i)}); err != nil {
361 | b.Fatalf("failed to publish: %v.", err)
362 | }
363 | <-handler.delivers
364 | }
365 | // Stop the timer (don't measure deferred cleanup)
366 | b.StopTimer()
367 | }
368 |
369 | // Benchmarks the throughput of a stream of concurrent publishes.
370 | func BenchmarkPublishThroughput1Threads(b *testing.B) {
371 | benchmarkPublishThroughput(1, b)
372 | }
373 |
374 | func BenchmarkPublishThroughput2Threads(b *testing.B) {
375 | benchmarkPublishThroughput(2, b)
376 | }
377 |
378 | func BenchmarkPublishThroughput4Threads(b *testing.B) {
379 | benchmarkPublishThroughput(4, b)
380 | }
381 |
382 | func BenchmarkPublishThroughput8Threads(b *testing.B) {
383 | benchmarkPublishThroughput(8, b)
384 | }
385 |
386 | func BenchmarkPublishThroughput16Threads(b *testing.B) {
387 | benchmarkPublishThroughput(16, b)
388 | }
389 |
390 | func BenchmarkPublishThroughput32Threads(b *testing.B) {
391 | benchmarkPublishThroughput(32, b)
392 | }
393 |
394 | func BenchmarkPublishThroughput64Threads(b *testing.B) {
395 | benchmarkPublishThroughput(64, b)
396 | }
397 |
398 | func BenchmarkPublishThroughput128Threads(b *testing.B) {
399 | benchmarkPublishThroughput(128, b)
400 | }
401 |
402 | func benchmarkPublishThroughput(threads int, b *testing.B) {
403 | // Connect to the local relay
404 | conn, err := Connect(config.relay)
405 | if err != nil {
406 | b.Fatalf("connection failed: %v", err)
407 | }
408 | defer conn.Close()
409 |
410 | // Subscribe to a topic and wait for state propagation
411 | handler := &publishTestTopicHandler{
412 | delivers: make(chan []byte, b.N),
413 | }
414 | if err := conn.Subscribe(config.topic, handler, nil); err != nil {
415 | b.Fatalf("subscription failed: %v", err)
416 | }
417 | defer conn.Unsubscribe(config.topic)
418 | time.Sleep(100 * time.Millisecond)
419 |
420 | // Create the thread pool with the concurrent publishes
421 | workers := pool.NewThreadPool(threads)
422 | for i := 0; i < b.N; i++ {
423 | workers.Schedule(func() {
424 | if err := conn.Publish(config.topic, []byte{byte(i)}); err != nil {
425 | b.Fatalf("failed to publish: %v.", err)
426 | }
427 | })
428 | }
429 | // Reset timer and benchmark the message transfer
430 | b.ResetTimer()
431 | workers.Start()
432 | for i := 0; i < b.N; i++ {
433 | <-handler.delivers
434 | }
435 | workers.Terminate(false)
436 |
437 | // Stop the timer (don't measure deferred cleanup)
438 | b.StopTimer()
439 | }
440 |
--------------------------------------------------------------------------------
/reqrep_test.go:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2013 Project Iris. All rights reserved.
2 | //
3 | // The current language binding is an official support library of the Iris
4 | // cloud messaging framework, and as such, the same licensing terms apply.
5 | // For details please see http://iris.karalabe.com/downloads#License
6 |
7 | package iris
8 |
9 | import (
10 | "errors"
11 | "fmt"
12 | "sync"
13 | "sync/atomic"
14 | "testing"
15 | "time"
16 |
17 | "github.com/project-iris/iris/pool"
18 | )
19 |
20 | // Service handler for the request/reply tests.
21 | type requestTestHandler struct {
22 | conn *Connection
23 | }
24 |
25 | func (r *requestTestHandler) Init(conn *Connection) error { r.conn = conn; return nil }
26 | func (r *requestTestHandler) HandleBroadcast(msg []byte) { panic("not implemented") }
27 | func (r *requestTestHandler) HandleRequest(req []byte) ([]byte, error) { return req, nil }
28 | func (r *requestTestHandler) HandleTunnel(tun *Tunnel) { panic("not implemented") }
29 | func (r *requestTestHandler) HandleDrop(reason error) { panic("not implemented") }
30 |
31 | // Service handler for the request/reply failure tests.
32 | type requestFailTestHandler struct {
33 | conn *Connection
34 | }
35 |
36 | func (r *requestFailTestHandler) Init(conn *Connection) error { r.conn = conn; return nil }
37 | func (r *requestFailTestHandler) HandleBroadcast(msg []byte) { panic("not implemented") }
38 | func (r *requestFailTestHandler) HandleTunnel(tun *Tunnel) { panic("not implemented") }
39 | func (r *requestFailTestHandler) HandleDrop(reason error) { panic("not implemented") }
40 |
41 | func (r *requestFailTestHandler) HandleRequest(req []byte) ([]byte, error) {
42 | return nil, errors.New(string(req))
43 | }
44 |
45 | // Tests multiple concurrent client and service requests.
46 | func TestRequest(t *testing.T) {
47 | // Test specific configurations
48 | conf := struct {
49 | clients int
50 | servers int
51 | requests int
52 | }{25, 25, 25}
53 |
54 | barrier := newBarrier(conf.clients + conf.servers)
55 | shutdown := new(sync.WaitGroup)
56 |
57 | // Start up the concurrent requesting clients
58 | for i := 0; i < conf.clients; i++ {
59 | shutdown.Add(1)
60 | go func(client int) {
61 | defer shutdown.Done()
62 |
63 | // Connect to the local relay
64 | conn, err := Connect(config.relay)
65 | if err != nil {
66 | barrier.Exit(fmt.Errorf("connection failed: %v", err))
67 | return
68 | }
69 | defer conn.Close()
70 | barrier.Sync()
71 |
72 | // Request from the service cluster
73 | for i := 0; i < conf.requests; i++ {
74 | request := fmt.Sprintf("client #%d, request %d", client, i)
75 | if reply, err := conn.Request(config.cluster, []byte(request), time.Second); err != nil {
76 | barrier.Exit(fmt.Errorf("client request failed: %v", err))
77 | return
78 | } else if string(reply) != request {
79 | barrier.Exit(fmt.Errorf("client invalid reply: have %v, want %v", string(reply), request))
80 | return
81 | }
82 | }
83 | barrier.Exit(nil)
84 | }(i)
85 | }
86 | // Start up the concurrent request services
87 | for i := 0; i < conf.servers; i++ {
88 | shutdown.Add(1)
89 | go func(server int) {
90 | defer shutdown.Done()
91 |
92 | // Create the service handler
93 | handler := new(requestTestHandler)
94 |
95 | // Register a new service to the relay
96 | serv, err := Register(config.relay, config.cluster, handler, nil)
97 | if err != nil {
98 | barrier.Exit(fmt.Errorf("registration failed: %v", err))
99 | return
100 | }
101 | defer serv.Unregister()
102 | barrier.Sync()
103 |
104 | // Request from the service cluster
105 | for i := 0; i < conf.requests; i++ {
106 | request := fmt.Sprintf("server #%d, request %d", server, i)
107 | if reply, err := handler.conn.Request(config.cluster, []byte(request), time.Second); err != nil {
108 | barrier.Exit(fmt.Errorf("server request failed: %v", err))
109 | return
110 | } else if string(reply) != request {
111 | barrier.Exit(fmt.Errorf("server invalid reply: have %v, want %v", string(reply), request))
112 | return
113 | }
114 | }
115 | barrier.Exit(nil)
116 | }(i)
117 | }
118 | // Schedule the parallel operations
119 | if errs := barrier.Wait(); len(errs) != 0 {
120 | t.Fatalf("startup phase failed: %v.", errs)
121 | }
122 | if errs := barrier.Wait(); len(errs) != 0 {
123 | t.Fatalf("request phase failed: %v.", errs)
124 | }
125 | // Make sure all children terminated
126 | shutdown.Wait()
127 | }
128 |
129 | // Tests request failure forwarding.
130 | func TestRequestFail(t *testing.T) {
131 | // Test specific configurations
132 | conf := struct {
133 | requests int
134 | }{125}
135 |
136 | // Create the service handler
137 | handler := new(requestFailTestHandler)
138 |
139 | // Register a new service to the relay
140 | serv, err := Register(config.relay, config.cluster, handler, nil)
141 | if err != nil {
142 | t.Fatalf("registration failed: %v", err)
143 | }
144 | defer serv.Unregister()
145 |
146 | // Request from the failing service cluster
147 | for i := 0; i < conf.requests; i++ {
148 | request := fmt.Sprintf("failure %d", i)
149 | reply, err := handler.conn.Request(config.cluster, []byte(request), time.Second)
150 | if err == nil {
151 | t.Fatalf("request didn't fail: %v.", reply)
152 | } else if _, ok := err.(*RemoteError); !ok {
153 | t.Fatalf("request didn't fail remotely: %v.", err)
154 | } else if err.Error() != request {
155 | t.Fatalf("error message mismatch: have %v, want %v.", err, request)
156 | }
157 | }
158 | }
159 |
160 | // Service handler for the request/reply limit tests.
161 | type requestTestTimedHandler struct {
162 | conn *Connection
163 | sleep time.Duration
164 | }
165 |
166 | func (r *requestTestTimedHandler) Init(conn *Connection) error { r.conn = conn; return nil }
167 | func (r *requestTestTimedHandler) HandleBroadcast(msg []byte) { panic("not implemented") }
168 | func (r *requestTestTimedHandler) HandleTunnel(tun *Tunnel) { panic("not implemented") }
169 | func (r *requestTestTimedHandler) HandleDrop(reason error) { panic("not implemented") }
170 |
171 | func (r *requestTestTimedHandler) HandleRequest(req []byte) ([]byte, error) {
172 | time.Sleep(r.sleep)
173 | return req, nil
174 | }
175 |
176 | // Tests the request timeouts.
177 | func TestRequestTimeout(t *testing.T) {
178 | // Test specific configurations
179 | conf := struct {
180 | sleep time.Duration
181 | }{25 * time.Millisecond}
182 |
183 | // Create the service handler
184 | handler := &requestTestTimedHandler{
185 | sleep: conf.sleep,
186 | }
187 | // Register a new service to the relay
188 | serv, err := Register(config.relay, config.cluster, handler, nil)
189 | if err != nil {
190 | t.Fatalf("registration failed: %v.", err)
191 | }
192 | defer serv.Unregister()
193 |
194 | // Check that the timeouts are complied with.
195 | if _, err := handler.conn.Request(config.cluster, []byte{0x00}, conf.sleep*2); err != nil {
196 | t.Fatalf("longer timeout failed: %v.", err)
197 | }
198 | if rep, err := handler.conn.Request(config.cluster, []byte{0x00}, conf.sleep/2); err == nil {
199 | t.Fatalf("shorter timeout succeeded: %v.", rep)
200 | }
201 | }
202 |
203 | // Tests the request thread limitation.
204 | func TestRequestThreadLimit(t *testing.T) {
205 | // Test specific configurations
206 | conf := struct {
207 | requests int
208 | sleep time.Duration
209 | }{4, 25 * time.Millisecond}
210 |
211 | // Create the service handler and limiter
212 | handler := &requestTestTimedHandler{
213 | sleep: conf.sleep,
214 | }
215 | limits := &ServiceLimits{RequestThreads: 1}
216 |
217 | // Register a new service to the relay
218 | serv, err := Register(config.relay, config.cluster, handler, limits)
219 | if err != nil {
220 | t.Fatalf("registration failed: %v.", err)
221 | }
222 | defer serv.Unregister()
223 |
224 | // Start a batch of requesters
225 | done := make(chan struct{}, conf.requests)
226 | for i := 0; i < conf.requests; i++ {
227 | go func() {
228 | defer func() { done <- struct{}{} }()
229 | if _, err := handler.conn.Request(config.cluster, []byte{0x00}, time.Duration(conf.requests+1)*conf.sleep); err != nil {
230 | t.Fatalf("request failed: %v.", err)
231 | }
232 | }()
233 | }
234 | // Wait for half of them to complete and verify completion
235 | time.Sleep(time.Duration(conf.requests/2)*conf.sleep + conf.sleep/2)
236 | for i := 0; i < conf.requests/2; i++ {
237 | select {
238 | case <-done:
239 | default:
240 | t.Fatalf("request #%d not completed.", i)
241 | }
242 | }
243 | select {
244 | case <-done:
245 | t.Fatalf("extra request completed.")
246 | default:
247 | }
248 | // Wait for the rest to complete
249 | time.Sleep(time.Duration(conf.requests/2)*conf.sleep + conf.sleep/2)
250 | for i := conf.requests / 2; i < conf.requests; i++ {
251 | select {
252 | case <-done:
253 | default:
254 | t.Fatalf("request #%d not completed.", i)
255 | }
256 | }
257 | }
258 |
259 | // Tests the request memory limitation.
260 | func TestRequestMemoryLimit(t *testing.T) {
261 | // Create the service handler and limiter
262 | handler := new(requestTestHandler)
263 | limits := &ServiceLimits{RequestMemory: 1}
264 |
265 | // Register a new service to the relay
266 | serv, err := Register(config.relay, config.cluster, handler, limits)
267 | if err != nil {
268 | t.Fatalf("registration failed: %v.", err)
269 | }
270 | defer serv.Unregister()
271 |
272 | // Check that a 1 byte request passes
273 | if _, err := handler.conn.Request(config.cluster, []byte{0x00}, 25*time.Millisecond); err != nil {
274 | t.Fatalf("small request failed: %v.", err)
275 | }
276 | // Check that a 2 byte request is dropped
277 | if rep, err := handler.conn.Request(config.cluster, []byte{0x00, 0x00}, 25*time.Millisecond); err != ErrTimeout {
278 | t.Fatalf("large request didn't time out: %v : %v.", rep, err)
279 | }
280 | // Check that space freed gets replenished
281 | if _, err := handler.conn.Request(config.cluster, []byte{0x00}, 25*time.Millisecond); err != nil {
282 | t.Fatalf("second small request failed: %v.", err)
283 | }
284 | }
285 |
286 | // Service handler for the request/reply expiry tests.
287 | type requestTestExpiryHandler struct {
288 | conn *Connection
289 | sleep time.Duration
290 | done int32
291 | }
292 |
293 | func (r *requestTestExpiryHandler) Init(conn *Connection) error { r.conn = conn; return nil }
294 | func (r *requestTestExpiryHandler) HandleBroadcast(msg []byte) { panic("not implemented") }
295 | func (r *requestTestExpiryHandler) HandleTunnel(tun *Tunnel) { panic("not implemented") }
296 | func (r *requestTestExpiryHandler) HandleDrop(reason error) { panic("not implemented") }
297 |
298 | func (r *requestTestExpiryHandler) HandleRequest(req []byte) ([]byte, error) {
299 | time.Sleep(r.sleep)
300 | atomic.AddInt32(&r.done, 1)
301 | return req, nil
302 | }
303 |
304 | // Tests that enqueued but expired requests don't get executed.
305 | func TestRequestExpiration(t *testing.T) {
306 | // Test specific configurations
307 | conf := struct {
308 | requests int
309 | sleep time.Duration
310 | }{4, 25 * time.Millisecond}
311 |
312 | // Create the service handler and limiter
313 | handler := &requestTestExpiryHandler{
314 | sleep: conf.sleep,
315 | }
316 | limits := &ServiceLimits{RequestThreads: 1}
317 |
318 | // Register a new service to the relay
319 | serv, err := Register(config.relay, config.cluster, handler, limits)
320 | if err != nil {
321 | t.Fatalf("registration failed: %v.", err)
322 | }
323 | defer serv.Unregister()
324 |
325 | // Start a batch of concurrent requesters (all but one should be scheduled remotely)
326 | var pend sync.WaitGroup
327 | for i := 0; i < conf.requests; i++ {
328 | pend.Add(1)
329 | go func() {
330 | defer pend.Done()
331 | handler.conn.Request(config.cluster, []byte{0x00}, time.Millisecond)
332 | }()
333 | }
334 | // Wait for all of them to complete and verify that all but 1 expired
335 | pend.Wait()
336 | time.Sleep(time.Duration(conf.requests+1) * conf.sleep)
337 | if done := atomic.LoadInt32(&handler.done); done != 1 {
338 | t.Fatalf("executed request count mismatch: have %v, want %v.", done, 1)
339 | }
340 | }
341 |
342 | // Benchmarks the latency of a single request/reply operation.
343 | func BenchmarkRequestLatency(b *testing.B) {
344 | // Create the service handler
345 | handler := new(requestTestHandler)
346 |
347 | // Register a new service to the relay
348 | serv, err := Register(config.relay, config.cluster, handler, nil)
349 | if err != nil {
350 | b.Fatalf("registration failed: %v.", err)
351 | }
352 | defer serv.Unregister()
353 |
354 | // Reset timer and benchmark the message transfer
355 | b.ResetTimer()
356 | for i := 0; i < b.N; i++ {
357 | if _, err := handler.conn.Request(config.cluster, []byte{byte(i)}, time.Second); err != nil {
358 | b.Fatalf("request failed: %v.", err)
359 | }
360 | }
361 | // Stop the timer (don't measure deferred cleanup)
362 | b.StopTimer()
363 | }
364 |
365 | // Benchmarks the throughput of a stream of concurrent requests.
366 | func BenchmarkRequestThroughput1Threads(b *testing.B) {
367 | benchmarkRequestThroughput(1, b)
368 | }
369 |
370 | func BenchmarkRequestThroughput2Threads(b *testing.B) {
371 | benchmarkRequestThroughput(2, b)
372 | }
373 |
374 | func BenchmarkRequestThroughput4Threads(b *testing.B) {
375 | benchmarkRequestThroughput(4, b)
376 | }
377 |
378 | func BenchmarkRequestThroughput8Threads(b *testing.B) {
379 | benchmarkRequestThroughput(8, b)
380 | }
381 |
382 | func BenchmarkRequestThroughput16Threads(b *testing.B) {
383 | benchmarkRequestThroughput(16, b)
384 | }
385 |
386 | func BenchmarkRequestThroughput32Threads(b *testing.B) {
387 | benchmarkRequestThroughput(32, b)
388 | }
389 |
390 | func BenchmarkRequestThroughput64Threads(b *testing.B) {
391 | benchmarkRequestThroughput(64, b)
392 | }
393 |
394 | func BenchmarkRequestThroughput128Threads(b *testing.B) {
395 | benchmarkRequestThroughput(128, b)
396 | }
397 |
398 | func benchmarkRequestThroughput(threads int, b *testing.B) {
399 | // Create the service handler
400 | handler := new(requestTestHandler)
401 |
402 | // Register a new service to the relay
403 | serv, err := Register(config.relay, config.cluster, handler, nil)
404 | if err != nil {
405 | b.Fatalf("registration failed: %v.", err)
406 | }
407 | defer serv.Unregister()
408 |
409 | // Create the thread pool with the concurrent requests
410 | workers := pool.NewThreadPool(threads)
411 | for i := 0; i < b.N; i++ {
412 | workers.Schedule(func() {
413 | if _, err := handler.conn.Request(config.cluster, []byte{byte(i)}, 10*time.Second); err != nil {
414 | b.Fatalf("request failed: %v.", err)
415 | }
416 | })
417 | }
418 | // Reset timer and benchmark the message transfer
419 | b.ResetTimer()
420 | workers.Start()
421 | workers.Terminate(false)
422 |
423 | // Stop the timer (don't measure deferred cleanup)
424 | b.StopTimer()
425 | }
426 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | Iris Go binding
2 | ===================
3 |
4 | This is the official Go language binding for the Iris cloud messaging framework. Version `v1` of the binding is compatible with Iris `v0.3.0` and newer.
5 |
6 | If you are unfamiliar with Iris, please read the next introductory section. It contains a short summary, as well as some valuable pointers on where you can discover more.
7 |
8 | Background
9 | -------------------
10 |
11 | Iris is an attempt at bringing the simplicity and elegance of cloud computing to the application layer. Consumer clouds provide unlimited virtual machines at the click of a button, but leaves it to developer to wire them together. Iris ensures that you can forget about networking challenges and instead focus on solving your own domain problems.
12 |
13 | It is a completely decentralized messaging solution for simplifying the design and implementation of cloud services. Among others, Iris features zero-configuration (i.e. start it up and it will do its magic), semantic addressing (i.e. application use textual names to address each other), clusters as units (i.e. automatic load balancing between apps of the same name) and perfect secrecy (i.e. all network traffic is encrypted).
14 |
15 | You can find further infos on the [Iris website](http://iris.karalabe.com) and details of the above features in the [core concepts](http://iris.karalabe.com/book/core_concepts) section of [the book of Iris](http://iris.karalabe.com/book). For the scientifically inclined, a small collection of [papers](http://iris.karalabe.com/papers) is also available featuring Iris. Slides and videos of previously given public presentations are published in the [talks](http://iris.karalabe.com/talks) page.
16 |
17 | There is a growing community on Twitter [@iriscmf](https://twitter.com/iriscmf), Google groups [project-iris](https://groups.google.com/group/project-iris) and GitHub [project-iris](https://github.com/project-iris).
18 |
19 | Installation
20 | ----------------
21 |
22 | To get the package, execute:
23 |
24 | go get gopkg.in/project-iris/iris-go.v1
25 |
26 | To import this package, add the following line to your code:
27 |
28 | import "gopkg.in/project-iris/iris-go.v1"
29 |
30 | Refer to it as _iris_.
31 |
32 | Quickstart
33 | --------------
34 |
35 | Iris uses a relaying architecture, where client applications do not communicate directly with one another, but instead delegate all messaging operations to a local relay process responsible for transferring the messages to the correct destinations. The first step hence to using Iris through any binding is setting up the local [_relay_ _node_](http://iris.karalabe.com/downloads). You can find detailed infos in the [Run, Forrest, Run](http://iris.karalabe.com/book/run_forrest_run) section of [the book of Iris](http://iris.karalabe.com/book), but a very simple way would be to start a _developer_ node.
36 |
37 | > iris -dev
38 | Entering developer mode
39 | Generating random RSA key... done.
40 | Generating random network name... done.
41 |
42 | 2014/06/13 18:13:47 main: booting iris overlay...
43 | 2014/06/13 18:13:47 scribe: booting with id 369650985814.
44 | 2014/06/13 18:13:57 main: iris overlay converged with 0 remote connections.
45 | 2014/06/13 18:13:57 main: booting relay service...
46 | 2014/06/13 18:13:57 main: iris successfully booted, listening on port 55555.
47 |
48 | Since it generates random credentials, a developer node will not be able to connect with other remote nodes in the network. However, it provides a quick solution to start developing without needing to configure a _network_ _name_ and associated _access_ _key_. Should you wish to interconnect multiple nodes, please provide the `-net` and `-rsa` flags.
49 |
50 | ### Attaching to the relay
51 |
52 | After successfully booting, the relay opens a _local_ TCP endpoint (port `55555` by default, configurable using `-port`) through which arbitrarily many entities may attach. Each connecting entity may also decide whether it becomes a simple _client_ only consuming the services provided by other participants, or a full fledged _service_, also making functionality available to others for consumption.
53 |
54 | Connecting as a client can be done trivially by invoking [`iris.Connect`](http://godoc.org/gopkg.in/project-iris/iris-go.v1#Connect) with the port number of the local relay's client endpoint. After the attachment is completed, an [`iris.Connection`](http://godoc.org/gopkg.in/project-iris/iris-go.v1#Connection) instance is returned through which messaging can begin. A client cannot accept inbound requests, broadcasts and tunnels, only initiate them.
55 |
56 | ```go
57 | conn, err := iris.Connect(55555)
58 | if err != nil {
59 | log.Fatalf("failed to connect to the Iris relay: %v.", err)
60 | }
61 | defer conn.Close()
62 | ```
63 |
64 | To provide functionality for consumption, an entity needs to register as a service. This is slightly more involved, as beside initiating a registration request, it also needs to specify a callback handler to process inbound events. First, the callback handler needs to implement the [`iris.ServiceHandler`](http://godoc.org/gopkg.in/project-iris/iris-go.v1#ServiceHandler) interface. After creating the handler, registration can commence by invoking [`iris.Register`](http://godoc.org/gopkg.in/project-iris/iris-go.v1#Register) with the port number of the local relay's client endpoint; sub-service cluster this entity will join as a member; handler itself to process inbound messages and an optional resource cap.
65 |
66 | ```go
67 | type EchoHandler struct {}
68 |
69 | func (b *EchoHandler) Init(conn *iris.Connection) error { return nil }
70 | func (b *EchoHandler) HandleBroadcast(msg []byte) { }
71 | func (b *EchoHandler) HandleRequest(req []byte) ([]byte, error) { return req, nil }
72 | func (b *EchoHandler) HandleTunnel(tun *iris.Tunnel) { }
73 | func (b *EchoHandler) HandleDrop(reason error) { }
74 |
75 | func main() {
76 | service, err := iris.Register(55555, "echo", new(EchoHandler), nil)
77 | if err != nil {
78 | log.Fatalf("failed to register to the Iris relay: %v.", err)
79 | }
80 | defer service.Unregister()
81 | }
82 | ```
83 |
84 | Upon successful registration, Iris invokes the handler's `Init` method with the live [`iris.Connection`](http://godoc.org/gopkg.in/project-iris/iris-go.v1#Connection) object - the service's client connection - through which the service itself can initiate outbound requests. `Init` is called only once and is synchronized before any other handler method is invoked.
85 |
86 | ### Messaging through Iris
87 |
88 | Iris supports four messaging schemes: request/reply, broadcast, tunnel and publish/subscribe. The first three schemes always target a specific cluster: send a request to _one_ member of a cluster and wait for the reply; broadcast a message to _all_ members of a cluster; open a streamed, ordered and throttled communication tunnel to _one_ member of a cluster. The publish/subscribe is similar to broadcast, but _any_ member of the network may subscribe to the same topic, hence breaking cluster boundaries.
89 |
90 |
91 |
92 | Presenting each primitive is out of scope, but for illustrative purposes the request/reply was included. Given the echo service registered above, we can send it requests and wait for replies through any client connection. Iris will automatically locate, route and load balanced between all services registered under the addressed name.
93 |
94 | ```go
95 | request := []byte("some request binary")
96 | if reply, err := conn.Request("echo", request, time.Second); err != nil {
97 | log.Printf("failed to execute request: %v.", err)
98 | } else {
99 | fmt.Printf("reply arrived: %v.", string(reply))
100 | }
101 | ```
102 |
103 | An expanded summary of the supported messaging schemes can be found in the [core concepts](http://iris.karalabe.com/book/core_concepts) section of [the book of Iris](http://iris.karalabe.com/book). A detailed presentation and analysis of each individual primitive will be added soon.
104 |
105 | ### Error handling
106 |
107 | The binding uses the idiomatic Go error handling mechanisms of returning `error` instances whenever a failure occurs. However, there are a few common cases that need to be individually checkable, hence a few special errors values and types have been introduced.
108 |
109 | Many operations - such as requests and tunnels - can time out. To allow checking for this particular failure, Iris returns [`iris.ErrTimeout`](http://godoc.org/gopkg.in/project-iris/iris-go.v1#pkg-variables) in such scenarios. Similarly, connections, services and tunnels may fail, in the case of which all pending operations terminate with [`iris.ErrClosed`](http://godoc.org/gopkg.in/project-iris/iris-go.v1#pkg-variables).
110 |
111 | Additionally, the requests/reply pattern supports sending back an error instead of a reply to the caller. To enable the originating node to check whether a request failed locally or remotely, all remote errors are wrapped in an [`iris.RemoteError`](http://godoc.org/gopkg.in/project-iris/iris-go.v1#RemoteError) type.
112 |
113 | ```go
114 | _, err := conn.Request("cluster", request, timeout)
115 | switch err {
116 | case nil:
117 | // Request completed successfully
118 | case iris.ErrTimeout:
119 | // Request timed out
120 | case iris.ErrClosed:
121 | // Connection terminated
122 | default:
123 | if _, ok := err.(*iris.RemoteError); ok {
124 | // Request failed remotely
125 | } else {
126 | // Requesting failed locally
127 | }
128 | }
129 | ```
130 |
131 | ### Resource capping
132 |
133 | To prevent the network from overwhelming an attached process, the binding places thread and memory limits on the broadcasts/requests inbound to a registered service as well as on the events received by a topic subscription. The thread limit defines the concurrent processing allowance, whereas the memory limit the maximal length of the pending queue.
134 |
135 | The default values - listed below - can be overridden during service registration and topic subscription via [`iris.ServiceLimits`](http://godoc.org/gopkg.in/project-iris/iris-go.v1#ServiceLimits) and [`iris.TopicLimits`](http://godoc.org/gopkg.in/project-iris/iris-go.v1#TopicLimits). Any unset fields (i.e. value of zero) will default to the preset ones.
136 |
137 | ```go
138 | // Default limits of the threading and memory usage of a registered service.
139 | var defaultServiceLimits = ServiceLimits{
140 | BroadcastThreads: 4 * runtime.NumCPU(),
141 | BroadcastMemory: 64 * 1024 * 1024,
142 | RequestThreads: 4 * runtime.NumCPU(),
143 | RequestMemory: 64 * 1024 * 1024,
144 | }
145 |
146 | // Default limits of the threading and memory usage of a subscription.
147 | var defaultTopicLimits = TopicLimits{
148 | EventThreads: 4 * runtime.NumCPU(),
149 | EventMemory: 64 * 1024 * 1024,
150 | }
151 | ```
152 |
153 | There is also a sanity limit on the input buffer of a tunnel, but it is not exposed through the API as tunnels are meant as structural primitives, not sensitive to load. This may change in the future.
154 |
155 | ### Logging
156 |
157 | For logging purposes, the Go binding uses [inconshreveable](https://github.com/inconshreveable)'s [log15](https://github.com/inconshreveable/log15) library (version v2). By default, _INFO_ level logs are collected and printed to _stderr_. This level allows tracking life-cycle events such as client and service attachments, topic subscriptions and tunnel establishments. Further log entries can be requested by lowering the level to _DEBUG_, effectively printing all messages passing through the binding.
158 |
159 | The binding's logger can be fine-tuned through the `iris.Log` variable. Below are a few common configurations.
160 |
161 | ```go
162 | // Discard all log entries
163 | iris.Log.SetHandler(log15.DiscardHandler())
164 |
165 | // Log DEBUG level entries to STDERR
166 | iris.Log.SetHandler(log15.LvlFilterHandler(log15.LvlDebug, log15.StderrHandler))
167 | ```
168 |
169 | Each [`iris.Connection`](http://godoc.org/gopkg.in/project-iris/iris-go.v1#Connection), [`iris.Service`](http://godoc.org/gopkg.in/project-iris/iris-go.v1#Service) and [`iris.Tunnel`](http://godoc.org/gopkg.in/project-iris/iris-go.v1#Tunnel) has an embedded logger, through which contextual log entries may be printed (i.e. tagged with the specific id of the attached entity).
170 |
171 | ```go
172 | conn, _ := iris.Connect(55555)
173 | defer conn.Close()
174 |
175 | conn.Log.Debug("debug entry, hidden by default")
176 | conn.Log.Info("info entry, client context included")
177 | conn.Log.Warn("warning entry", "extra", "some value")
178 | conn.Log.Crit("critical entry", "bool", false, "int", 1, "string", "two")
179 | ```
180 |
181 | As you can see below, all log entries have been automatically tagged with the `client` attribute, set to the id of the current connection. Since the default log level is _INFO_, the `conn.Log.Debug` invocation has no effect. Additionally, arbitrarily many key-value pairs may be included in the entry.
182 |
183 | ```
184 | INFO[06-22|18:39:49] connecting new client client=1 relay_port=55555
185 | INFO[06-22|18:39:49] client connection established client=1
186 | INFO[06-22|18:39:49] info entry, client context included client=1
187 | WARN[06-22|18:39:49] warning entry client=1 extra="some value"
188 | CRIT[06-22|18:39:49] critical entry client=1 bool=false int=1 string=two
189 | INFO[06-22|18:39:49] detaching from relay client=1
190 | ```
191 |
192 | For further capabilities, configurations and details about the logger, please consult the [log15 docs](https://godoc.org/github.com/inconshreveable/log15).
193 |
194 | ### Additional goodies
195 |
196 | You can find a teaser presentation, touching on all the key features of the library through a handful of challenges and their solutions. The recommended version is the [playground](http://play.iris.karalabe.com/talks/binds/go.v1.slide), containing modifiable and executable code snippets, but a [read only](http://iris.karalabe.com/talks/binds/go.v1.slide) one is also available.
197 |
198 | Contributions
199 | -----------------
200 |
201 | Currently my development aims are to stabilize the project and its language bindings. Hence, although I'm open and very happy for any and all contributions, the most valuable ones are tests, benchmarks and actual binding usage to reach a high enough quality.
202 |
203 | Due to the already significant complexity of the project (Iris in general), I kindly ask anyone willing to pinch in to first file an [issue](https://github.com/project-iris/iris-go/issues) with their plans to achieve a best possible integration :).
204 |
205 | Additionally, to prevent copyright disputes and such, a signed contributor license agreement is required to be on file before any material can be accepted into the official repositories. These can be filled online via either the [Individual Contributor License Agreement](http://iris.karalabe.com/icla) or the [Corporate Contributor License Agreement](http://iris.karalabe.com/ccla).
206 |
--------------------------------------------------------------------------------
/proto.go:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2013 Project Iris. All rights reserved.
2 | //
3 | // The current language binding is an official support library of the Iris
4 | // cloud messaging framework, and as such, the same licensing terms apply.
5 | // For details please see http://iris.karalabe.com/downloads#License
6 |
7 | // Contains the wire protocol for communicating with the Iris relay endpoint.
8 |
9 | // The specification version implemented is v1.0-draft2, available at:
10 | // http://iris.karalabe.com/specs/relay-protocol-v1.0-draft2.pdf
11 |
12 | package iris
13 |
14 | import (
15 | "fmt"
16 | "io"
17 | "sync/atomic"
18 | "time"
19 | )
20 |
21 | // Packet opcodes
22 | const (
23 | opInit byte = 0x00 // Out: connection initiation | In: connection acceptance
24 | opDeny = 0x01 // Out: | In: connection refusal
25 | opClose = 0x02 // Out: connection tear-down initiation | In: connection tear-down notification
26 |
27 | opBroadcast = 0x03 // Out: application broadcast initiation | In: application broadcast delivery
28 | opRequest = 0x04 // Out: application request initiation | In: application request delivery
29 | opReply = 0x05 // Out: application reply initiation | In: application reply delivery
30 |
31 | opSubscribe = 0x06 // Out: topic subscription | In:
32 | opUnsubscribe = 0x07 // Out: topic subscription removal | In:
33 | opPublish = 0x08 // Out: topic event publish | In: topic event delivery
34 |
35 | opTunInit = 0x09 // Out: tunnel construction request | In: tunnel initiation
36 | opTunConfirm = 0x0a // Out: tunnel confirmation | In: tunnel construction result
37 | opTunAllow = 0x0b // Out: tunnel transfer allowance | In:
38 | opTunTransfer = 0x0c // Out: tunnel data exchange | In:
39 | opTunClose = 0x0d // Out: tunnel termination request | In: tunnel termination notification
40 | )
41 |
42 | // Protocol constants
43 | var (
44 | protoVersion = "v1.0-draft2"
45 | clientMagic = "iris-client-magic"
46 | relayMagic = "iris-relay-magic"
47 | )
48 |
49 | // Serializes a single byte into the relay connection.
50 | func (c *Connection) sendByte(data byte) error {
51 | return c.sockBuf.WriteByte(data)
52 | }
53 |
54 | // Serializes a boolean into the relay connection.
55 | func (c *Connection) sendBool(data bool) error {
56 | if data {
57 | return c.sendByte(1)
58 | }
59 | return c.sendByte(0)
60 | }
61 |
62 | // Serializes a variable int using base 128 encoding into the relay connection.
63 | func (c *Connection) sendVarint(data uint64) error {
64 | for data > 127 {
65 | // Internal byte, set the continuation flag and send
66 | if err := c.sendByte(byte(128 + data%128)); err != nil {
67 | return err
68 | }
69 | data /= 128
70 | }
71 | // Final byte, send and return
72 | return c.sendByte(byte(data))
73 | }
74 |
75 | // Serializes a length-tagged binary array into the relay connection.
76 | func (c *Connection) sendBinary(data []byte) error {
77 | if err := c.sendVarint(uint64(len(data))); err != nil {
78 | return err
79 | }
80 | if _, err := c.sockBuf.Write([]byte(data)); err != nil {
81 | return err
82 | }
83 | return nil
84 | }
85 |
86 | // Serializes a length-tagged string into the relay connection.
87 | func (c *Connection) sendString(data string) error {
88 | return c.sendBinary([]byte(data))
89 | }
90 |
91 | // Serializes a packet through a closure into the relay connection.
92 | func (c *Connection) sendPacket(closure func() error) error {
93 | // Increment the pending write count
94 | atomic.AddInt32(&c.sockWait, 1)
95 |
96 | // Acquire the socket lock
97 | c.sockLock.Lock()
98 | defer c.sockLock.Unlock()
99 |
100 | // Send the packet itself
101 | if err := closure(); err != nil {
102 | // Decrement the pending count and error out
103 | atomic.AddInt32(&c.sockWait, -1)
104 | return err
105 | }
106 | // Flush the stream if no more messages are pending
107 | if atomic.AddInt32(&c.sockWait, -1) == 0 {
108 | return c.sockBuf.Flush()
109 | }
110 | return nil
111 | }
112 |
113 | // Sends a connection initiation.
114 | func (c *Connection) sendInit(cluster string) error {
115 | return c.sendPacket(func() error {
116 | if err := c.sendByte(opInit); err != nil {
117 | return err
118 | }
119 | if err := c.sendString(clientMagic); err != nil {
120 | return err
121 | }
122 | if err := c.sendString(protoVersion); err != nil {
123 | return err
124 | }
125 | return c.sendString(cluster)
126 | })
127 | }
128 |
129 | // Sends a connection tear-down initiation.
130 | func (c *Connection) sendClose() error {
131 | return c.sendPacket(func() error {
132 | return c.sendByte(opClose)
133 | })
134 | }
135 |
136 | // Sends an application broadcast initiation.
137 | func (c *Connection) sendBroadcast(cluster string, message []byte) error {
138 | return c.sendPacket(func() error {
139 | if err := c.sendByte(opBroadcast); err != nil {
140 | return err
141 | }
142 | if err := c.sendString(cluster); err != nil {
143 | return err
144 | }
145 | return c.sendBinary(message)
146 | })
147 | }
148 |
149 | // Sends an application request initiation.
150 | func (c *Connection) sendRequest(id uint64, cluster string, request []byte, timeout int) error {
151 | return c.sendPacket(func() error {
152 | if err := c.sendByte(opRequest); err != nil {
153 | return err
154 | }
155 | if err := c.sendVarint(id); err != nil {
156 | return err
157 | }
158 | if err := c.sendString(cluster); err != nil {
159 | return err
160 | }
161 | if err := c.sendBinary(request); err != nil {
162 | return err
163 | }
164 | return c.sendVarint(uint64(timeout))
165 | })
166 | }
167 |
168 | // Sends an application reply initiation.
169 | func (c *Connection) sendReply(id uint64, reply []byte, fault string) error {
170 | return c.sendPacket(func() error {
171 | if err := c.sendByte(opReply); err != nil {
172 | return err
173 | }
174 | if err := c.sendVarint(id); err != nil {
175 | return err
176 | }
177 | success := (len(fault) == 0)
178 | if err := c.sendBool(success); err != nil {
179 | return err
180 | }
181 | if success {
182 | return c.sendBinary(reply)
183 | } else {
184 | return c.sendString(fault)
185 | }
186 | })
187 | }
188 |
189 | // Sends a topic subscription.
190 | func (c *Connection) sendSubscribe(topic string) error {
191 | return c.sendPacket(func() error {
192 | if err := c.sendByte(opSubscribe); err != nil {
193 | return err
194 | }
195 | return c.sendString(topic)
196 | })
197 | }
198 |
199 | // Sends a topic subscription removal.
200 | func (c *Connection) sendUnsubscribe(topic string) error {
201 | return c.sendPacket(func() error {
202 | if err := c.sendByte(opUnsubscribe); err != nil {
203 | return err
204 | }
205 | return c.sendString(topic)
206 | })
207 | }
208 |
209 | // Sends a topic event publish.
210 | func (c *Connection) sendPublish(topic string, event []byte) error {
211 | return c.sendPacket(func() error {
212 | if err := c.sendByte(opPublish); err != nil {
213 | return err
214 | }
215 | if err := c.sendString(topic); err != nil {
216 | return err
217 | }
218 | return c.sendBinary(event)
219 | })
220 | }
221 |
222 | // Sends a tunnel construction request.
223 | func (c *Connection) sendTunnelInit(id uint64, cluster string, timeout int) error {
224 | return c.sendPacket(func() error {
225 | if err := c.sendByte(opTunInit); err != nil {
226 | return err
227 | }
228 | if err := c.sendVarint(id); err != nil {
229 | return err
230 | }
231 | if err := c.sendString(cluster); err != nil {
232 | return err
233 | }
234 | return c.sendVarint(uint64(timeout))
235 | })
236 | }
237 |
238 | // Sends a tunnel confirmation.
239 | func (c *Connection) sendTunnelConfirm(buildId, tunId uint64) error {
240 | return c.sendPacket(func() error {
241 | if err := c.sendByte(opTunConfirm); err != nil {
242 | return err
243 | }
244 | if err := c.sendVarint(buildId); err != nil {
245 | return err
246 | }
247 | return c.sendVarint(tunId)
248 | })
249 | }
250 |
251 | // Sends a tunnel transfer allowance.
252 | func (c *Connection) sendTunnelAllowance(id uint64, space int) error {
253 | return c.sendPacket(func() error {
254 | if err := c.sendByte(opTunAllow); err != nil {
255 | return err
256 | }
257 | if err := c.sendVarint(id); err != nil {
258 | return err
259 | }
260 | return c.sendVarint(uint64(space))
261 | })
262 | }
263 |
264 | // Sends a tunnel data exchange.
265 | func (c *Connection) sendTunnelTransfer(id uint64, sizeOrCont int, payload []byte) error {
266 | return c.sendPacket(func() error {
267 | if err := c.sendByte(opTunTransfer); err != nil {
268 | return err
269 | }
270 | if err := c.sendVarint(id); err != nil {
271 | return err
272 | }
273 | if err := c.sendVarint(uint64(sizeOrCont)); err != nil {
274 | return err
275 | }
276 | return c.sendBinary(payload)
277 | })
278 | }
279 |
280 | // Sends a tunnel termination request.
281 | func (c *Connection) sendTunnelClose(id uint64) error {
282 | return c.sendPacket(func() error {
283 | if err := c.sendByte(opTunClose); err != nil {
284 | return err
285 | }
286 | return c.sendVarint(id)
287 | })
288 | }
289 |
290 | // Retrieves a single byte from the relay connection.
291 | func (c *Connection) recvByte() (byte, error) {
292 | return c.sockBuf.ReadByte()
293 | }
294 |
295 | // Retrieves a boolean from the relay connection.
296 | func (c *Connection) recvBool() (bool, error) {
297 | b, err := c.recvByte()
298 | if err != nil {
299 | return false, err
300 | }
301 | switch b {
302 | case 0:
303 | return false, nil
304 | case 1:
305 | return true, nil
306 | default:
307 | return false, fmt.Errorf("protocol violation: invalid boolean value: %v", b)
308 | }
309 | }
310 |
311 | // Retrieves a variable int in base 128 encoding from the relay connection.
312 | func (c *Connection) recvVarint() (uint64, error) {
313 | var num uint64
314 | for i := uint(0); ; i++ {
315 | chunk, err := c.recvByte()
316 | if err != nil {
317 | return 0, err
318 | }
319 | num += uint64(chunk&127) << (7 * i)
320 | if chunk <= 127 {
321 | break
322 | }
323 | }
324 | return num, nil
325 | }
326 |
327 | // Retrieves a length-tagged binary array from the relay connection.
328 | func (c *Connection) recvBinary() ([]byte, error) {
329 | // Fetch the length of the binary blob
330 | size, err := c.recvVarint()
331 | if err != nil {
332 | return nil, err
333 | }
334 | // Fetch the blob itself
335 | data := make([]byte, size)
336 | if _, err := io.ReadFull(c.sockBuf, data); err != nil {
337 | return nil, err
338 | }
339 | return data, nil
340 | }
341 |
342 | // Retrieves a length-tagged string from the relay connection.
343 | func (c *Connection) recvString() (string, error) {
344 | if data, err := c.recvBinary(); err != nil {
345 | return "", err
346 | } else {
347 | return string(data), nil
348 | }
349 | }
350 |
351 | // Retrieves a connection initiation response (either accept or deny).
352 | func (c *Connection) procInit() (string, error) {
353 | // Retrieve the response opcode
354 | op, err := c.recvByte()
355 | if err != nil {
356 | return "", err
357 | }
358 | // Verify the opcode validity and relay magic string
359 | switch {
360 | case op == opInit || op == opDeny:
361 | if magic, err := c.recvString(); err != nil {
362 | return "", err
363 | } else if magic != relayMagic {
364 | return "", fmt.Errorf("protocol violation: invalid relay magic: %s", magic)
365 | }
366 | default:
367 | return "", fmt.Errorf("protocol violation: invalid init response opcode: %v", op)
368 | }
369 | // Depending on success or failure, proceed and return
370 | switch op {
371 | case opInit:
372 | // Read the highest supported protocol version
373 | if version, err := c.recvString(); err != nil {
374 | return "", err
375 | } else {
376 | return version, nil
377 | }
378 | case opDeny:
379 | // Read the reason for connection denial
380 | if reason, err := c.recvString(); err != nil {
381 | return "", err
382 | } else {
383 | return "", fmt.Errorf("connection denied: %s", reason)
384 | }
385 | default:
386 | panic("unreachable code")
387 | }
388 | }
389 |
390 | // Retrieves a connection tear-down notification.
391 | func (c *Connection) procClose() (string, error) {
392 | return c.recvString()
393 | }
394 |
395 | // Retrieves an application broadcast delivery.
396 | func (c *Connection) procBroadcast() error {
397 | message, err := c.recvBinary()
398 | if err != nil {
399 | return err
400 | }
401 | c.handleBroadcast(message)
402 | return nil
403 | }
404 |
405 | // Retrieves an application request delivery.
406 | func (c *Connection) procRequest() error {
407 | id, err := c.recvVarint()
408 | if err != nil {
409 | return err
410 | }
411 | request, err := c.recvBinary()
412 | if err != nil {
413 | return err
414 | }
415 | timeout, err := c.recvVarint()
416 | if err != nil {
417 | return err
418 | }
419 | c.handleRequest(id, request, time.Duration(timeout)*time.Millisecond)
420 | return nil
421 | }
422 |
423 | // Retrieves an application reply delivery.
424 | func (c *Connection) procReply() error {
425 | id, err := c.recvVarint()
426 | if err != nil {
427 | return err
428 | }
429 | timeout, err := c.recvBool()
430 | if err != nil {
431 | return err
432 | }
433 | if timeout {
434 | c.handleReply(id, nil, "")
435 | return nil
436 | }
437 | // The request didn't time out, get the result
438 | success, err := c.recvBool()
439 | if err != nil {
440 | return err
441 | }
442 | if success {
443 | reply, err := c.recvBinary()
444 | if err != nil {
445 | return err
446 | }
447 | c.handleReply(id, reply, "")
448 | } else {
449 | fault, err := c.recvString()
450 | if err != nil {
451 | return err
452 | }
453 | c.handleReply(id, nil, fault)
454 | }
455 | return nil
456 | }
457 |
458 | // Retrieves a topic event delivery.
459 | func (c *Connection) procPublish() error {
460 | topic, err := c.recvString()
461 | if err != nil {
462 | return err
463 | }
464 | event, err := c.recvBinary()
465 | if err != nil {
466 | return err
467 | }
468 | go c.handlePublish(topic, event)
469 | return nil
470 | }
471 |
472 | // Retrieves a tunnel initiation message.
473 | func (c *Connection) procTunnelInit() error {
474 | id, err := c.recvVarint()
475 | if err != nil {
476 | return err
477 | }
478 | chunkLimit, err := c.recvVarint()
479 | if err != nil {
480 | return err
481 | }
482 | c.handleTunnelInit(id, int(chunkLimit))
483 | return nil
484 | }
485 |
486 | // Retrieves a tunnel construction result.
487 | func (c *Connection) procTunnelResult() error {
488 | id, err := c.recvVarint()
489 | if err != nil {
490 | return err
491 | }
492 | timeout, err := c.recvBool()
493 | if err != nil {
494 | return err
495 | }
496 | if timeout {
497 | c.handleTunnelResult(id, 0)
498 | return nil
499 | }
500 | // The tunnel didn't time out, proceed
501 | chunkLimit, err := c.recvVarint()
502 | if err != nil {
503 | return err
504 | }
505 | c.handleTunnelResult(id, int(chunkLimit))
506 | return nil
507 | }
508 |
509 | // Retrieves a tunnel transfer allowance message.
510 | func (c *Connection) procTunnelAllowance() error {
511 | id, err := c.recvVarint()
512 | if err != nil {
513 | return err
514 | }
515 | space, err := c.recvVarint()
516 | if err != nil {
517 | return err
518 | }
519 | c.handleTunnelAllowance(id, int(space))
520 | return nil
521 | }
522 |
523 | // Retrieves a tunnel data exchange message.
524 | func (c *Connection) procTunnelTransfer() error {
525 | id, err := c.recvVarint()
526 | if err != nil {
527 | return err
528 | }
529 | size, err := c.recvVarint()
530 | if err != nil {
531 | return err
532 | }
533 | payload, err := c.recvBinary()
534 | if err != nil {
535 | return err
536 | }
537 | c.handleTunnelTransfer(id, int(size), payload)
538 | return nil
539 | }
540 |
541 | // Retrieves a tunnel closure notification.
542 | func (c *Connection) procTunnelClose() error {
543 | id, err := c.recvVarint()
544 | if err != nil {
545 | return err
546 | }
547 | reason, err := c.recvString()
548 | if err != nil {
549 | return err
550 | }
551 | go c.handleTunnelClose(id, reason)
552 | return nil
553 | }
554 |
555 | // Retrieves messages from the client connection and keeps processing them until
556 | // either the relay closes (graceful close) or the connection drops.
557 | func (c *Connection) process() {
558 | var op byte
559 | var err error
560 | for closed := false; !closed && err == nil; {
561 | // Retrieve the next opcode and call the specific handler for the rest
562 | if op, err = c.recvByte(); err == nil {
563 | switch op {
564 | case opBroadcast:
565 | err = c.procBroadcast()
566 | case opRequest:
567 | err = c.procRequest()
568 | case opReply:
569 | err = c.procReply()
570 | case opPublish:
571 | err = c.procPublish()
572 | case opTunInit:
573 | err = c.procTunnelInit()
574 | case opTunConfirm:
575 | err = c.procTunnelResult()
576 | case opTunAllow:
577 | err = c.procTunnelAllowance()
578 | case opTunTransfer:
579 | err = c.procTunnelTransfer()
580 | case opTunClose:
581 | err = c.procTunnelClose()
582 | case opClose:
583 | // Retrieve any reason for remote closure
584 | if reason, cerr := c.procClose(); cerr != nil {
585 | err = cerr
586 | } else if len(reason) > 0 {
587 | err = fmt.Errorf("connection dropped: %s", reason)
588 | } else {
589 | closed = true
590 | }
591 | default:
592 | err = fmt.Errorf("protocol violation: unknown opcode: %v", op)
593 | }
594 | }
595 | }
596 | // Close the socket and signal termination to all blocked threads
597 | c.sock.Close()
598 | close(c.term)
599 |
600 | // Notify the application of the connection closure
601 | c.handleClose(err)
602 |
603 | // Wait for termination sync
604 | errc := <-c.quit
605 | errc <- err
606 | }
607 |
--------------------------------------------------------------------------------