├── .gitignore ├── Makefile ├── README.md ├── client └── client.go ├── cluster ├── balancer.go ├── cluster.go ├── errors.go └── listener.go ├── coordinator ├── coordinator.go └── map.go ├── donutctl ├── cmd │ ├── addwork.go │ ├── delwork.go │ └── root.go └── main.go ├── example └── readme │ └── main.go ├── go.mod ├── go.sum ├── itest ├── cluster_test.go ├── coordinator_test.go └── util.go ├── log └── log.go └── script └── run-etcd.sh /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | 8 | # Test binary, built with `go test -c` 9 | *.test 10 | 11 | # Output of the go coverage tool, specifically when used with LiteIDE 12 | *.out 13 | 14 | example/readme/readme 15 | donutctl/donutctl 16 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | all: cluster client donutctl example-readme 2 | 3 | build: 4 | 5 | test: 6 | go test -race -v ./... 7 | 8 | run-etcd: 9 | sudo rm -rf /tmp/etcd-data.tmp && mkdir -p /tmp/etcd-data.tmp && \ 10 | docker rmi gcr.io/etcd-development/etcd:v3.3.11 || true && \ 11 | docker run \ 12 | -p 2379:2379 \ 13 | -p 2380:2380 \ 14 | --mount type=bind,source=/tmp/etcd-data.tmp,destination=/etcd-data \ 15 | gcr.io/etcd-development/etcd:v3.3.11 \ 16 | /usr/local/bin/etcd \ 17 | --name s1 \ 18 | --data-dir /etcd-data \ 19 | --listen-client-urls http://0.0.0.0:2379 \ 20 | --advertise-client-urls http://0.0.0.0:2379 \ 21 | --listen-peer-urls http://0.0.0.0:2380 \ 22 | --initial-advertise-peer-urls http://0.0.0.0:2380 \ 23 | --initial-cluster s1=http://0.0.0.0:2380 \ 24 | --initial-cluster-token tkn \ 25 | --initial-cluster-state new 26 | 27 | build-client: 28 | go build github.com/dforsyth/donut/client 29 | 30 | build-cluster: 31 | go build github.com/dforsyth/donut/cluster 32 | 33 | build-donutctl: 34 | go build -o donutctl-bin github.com/dforsyth/donut/donutctl 35 | 36 | build-example-readme: 37 | go build -o example-readme github.com/dforsyth/donut/example/readme 38 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Donut is a library for building clustered applications in Go. 2 | 3 | ## Example 4 | 5 | ```go 6 | package main 7 | 8 | import ( 9 | "context" 10 | "log" 11 | "os" 12 | 13 | // Wait for etcd client v3.4, there is a module import issue. 14 | client "github.com/coreos/etcd/clientv3" // "go.etcd.io/etcd/clientv3" 15 | "github.com/dforsyth/donut/cluster" 16 | "github.com/dforsyth/donut/coordinator" 17 | ) 18 | 19 | type ExampleListener struct { 20 | logger *log.Logger 21 | } 22 | 23 | func (l *ExampleListener) OnJoin(c *cluster.Cluster) { 24 | l.logger.Println("Joined the cluster!") 25 | } 26 | 27 | func (l *ExampleListener) StartWork(ctx context.Context, workKey string) { 28 | l.logger.Println("Starting work " + workKey) 29 | } 30 | 31 | func (*ExampleListener) OnLeave() {} 32 | 33 | func main() { 34 | logger := log.New(os.Stderr, "", log.LstdFlags) 35 | c := cluster.New("example", "node", &ExampleListener{logger: logger}) 36 | client, err := client.New(client.Config{ 37 | Endpoints: []string{"http://0.0.0.0:2379"}, 38 | }) 39 | if err != nil { 40 | logger.Fatalf("Failed to create client: %s", err) 41 | } 42 | coo, err := coordinator.NewEtcdCoordinator(client) 43 | if err != nil { 44 | logger.Fatalf("Failed to create coordinator: %s", err) 45 | } 46 | if err := c.Join(coo); err != nil { 47 | logger.Fatalf("Failed to join cluster: %s", err) 48 | } 49 | select {} 50 | } 51 | 52 | ``` 53 | 54 | ## Documentation 55 | 56 | http://go.pkgdoc.org/github.com/dforsyth/donut 57 | 58 | 59 | ### TODO 60 | - [ ] Better testing 61 | - [ ] More examples 62 | -------------------------------------------------------------------------------- /client/client.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "github.com/dforsyth/donut/cluster" 5 | "github.com/dforsyth/donut/coordinator" 6 | ) 7 | 8 | type Client struct { 9 | kv coordinator.Coordinator 10 | } 11 | 12 | func New(kv coordinator.Coordinator) *Client { 13 | return &Client{kv: kv} 14 | } 15 | 16 | func (c *Client) CreateWork(cls *cluster.Cluster, name, value string) (string, error) { 17 | workKey := cluster.Key(cls, cluster.TypeWork, name) 18 | if err := c.kv.Store(workKey, value); err != nil { 19 | return "", err 20 | } 21 | return workKey, nil 22 | } 23 | 24 | func (c *Client) DeleteWork(cls *cluster.Cluster, name string) error { 25 | workKey := cluster.Key(cls, cluster.TypeWork, name) 26 | return c.kv.Delete(workKey) 27 | } 28 | -------------------------------------------------------------------------------- /cluster/balancer.go: -------------------------------------------------------------------------------- 1 | package cluster 2 | 3 | type Balancer interface { 4 | // Can claim should return true if a node can attempt to claim a given 5 | // workKey, and false otherwise. 6 | CanClaim(string) bool 7 | // OnBalance is during a nodes balance step. Useful for bookeeping or 8 | // actions necessary to balance the cluster 9 | OnBalance() 10 | } 11 | 12 | type DumbBalancer struct{} 13 | 14 | func (*DumbBalancer) CanClaim(workKey string) bool { 15 | return true 16 | } 17 | 18 | func (*DumbBalancer) OnBalance() { 19 | 20 | } 21 | -------------------------------------------------------------------------------- /cluster/cluster.go: -------------------------------------------------------------------------------- 1 | package cluster 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | stdlog "log" 8 | "os" 9 | "sync" 10 | "time" 11 | 12 | "github.com/coreos/etcd/clientv3" // "go.etcd.io/etcd/clientv3" 13 | "github.com/dforsyth/donut/coordinator" 14 | "github.com/dforsyth/donut/log" 15 | "golang.org/x/sync/syncmap" 16 | ) 17 | 18 | // DefaultBalancer is the default balancer, a DumbBalancer 19 | var DefaultBalancer = &DumbBalancer{} 20 | 21 | const ( 22 | StateNew = "new" 23 | StateJoining = "joining" 24 | StateJoined = "joined" 25 | StateLeaving = "leaving" 26 | StateLeft = "left" 27 | 28 | TypeNode = "node" 29 | TypeWork = "work" 30 | TypeClaim = "claim" 31 | 32 | DefaultBalancerInterval = 5 * time.Second 33 | DefaultDrainTimeout = 1 * time.Minute 34 | DefaultEndWorkTimeout = 5 * time.Second 35 | ) 36 | 37 | type Cluster struct { 38 | kv coordinator.Coordinator 39 | 40 | clusterName string 41 | nodeID string 42 | 43 | listener Listener 44 | balancer Balancer 45 | balanceInterval time.Duration 46 | logger log.FmtLogger 47 | endWorkTimeout time.Duration 48 | drainTimeout time.Duration 49 | 50 | nodes *coordinator.WatchMap 51 | work *coordinator.WatchMap 52 | claims *coordinator.WatchMap 53 | // TODO(dforsyth): Switch back to a mutex guarded map? 54 | owned *syncmap.Map 55 | 56 | state string 57 | stateLk sync.RWMutex 58 | 59 | balancerFire chan struct{} 60 | balancerCancel context.CancelFunc 61 | } 62 | 63 | // ClusterOption configures a Cluster. 64 | type ClusterOption func(*Cluster) 65 | 66 | func WithLogger(logger log.FmtLogger) ClusterOption { 67 | return func(c *Cluster) { c.logger = logger } 68 | } 69 | 70 | func WithBalancer(balancer Balancer) ClusterOption { 71 | return func(c *Cluster) { c.balancer = balancer } 72 | } 73 | 74 | func WithBalanceInterval(balanceInterval time.Duration) ClusterOption { 75 | return func(c *Cluster) { c.balanceInterval = balanceInterval } 76 | } 77 | 78 | func WithDrainTimeout(drainTimeout time.Duration) ClusterOption { 79 | return func(c *Cluster) { c.drainTimeout = drainTimeout } 80 | } 81 | 82 | func WithEndWorkTimeout(endWorkTimeout time.Duration) ClusterOption { 83 | return func(c *Cluster) { c.endWorkTimeout = endWorkTimeout } 84 | } 85 | 86 | // New creates a new cluster to operate on. 87 | func New(clusterName, nodeID string, listener Listener, opts ...ClusterOption) *Cluster { 88 | c := &Cluster{ 89 | state: StateNew, 90 | clusterName: clusterName, 91 | nodeID: nodeID, 92 | listener: listener, 93 | balancer: DefaultBalancer, 94 | balanceInterval: DefaultBalancerInterval, 95 | drainTimeout: DefaultDrainTimeout, 96 | endWorkTimeout: DefaultEndWorkTimeout, 97 | logger: stdlog.New(os.Stderr, "", stdlog.LstdFlags), 98 | } 99 | 100 | for _, opt := range opts { 101 | opt(c) 102 | } 103 | 104 | return c 105 | } 106 | 107 | func (c *Cluster) transitionState(from, to string) bool { 108 | c.stateLk.Lock() 109 | defer c.stateLk.Unlock() 110 | 111 | if c.state != from { 112 | return false 113 | } 114 | 115 | c.state = to 116 | return true 117 | } 118 | 119 | // Join joins a cluster, using kv for storage and coordination. 120 | func (c *Cluster) Join(kv coordinator.Coordinator) error { 121 | if kv == nil { 122 | return errors.New("kv must not be nil") 123 | } 124 | 125 | c.logger.Printf("Joining cluster %s", c.clusterName) 126 | if !c.transitionState(StateNew, StateJoining) { 127 | return StateTransitionError{ 128 | From: StateNew, 129 | To: StateJoining, 130 | } 131 | } 132 | 133 | c.kv = kv 134 | 135 | // Join the cluster. 136 | if err := c.joinCluster(); err != nil { 137 | return err 138 | } 139 | 140 | c.listener.OnJoin(c) 141 | 142 | // Start our balancer. Run this before setting up watchers so we don't 143 | // race on balancerFire 144 | c.balancerFire, c.balancerCancel = c.startBalancer() 145 | 146 | // Start watchers. 147 | if err := c.ensureWatchers(); err != nil { 148 | return err 149 | } 150 | 151 | c.owned = &syncmap.Map{} 152 | 153 | // Set state to joined, allowing this node to accept work. 154 | if !c.transitionState(StateJoining, StateJoined) { 155 | return StateTransitionError{ 156 | From: StateJoining, 157 | To: StateJoined, 158 | } 159 | } 160 | 161 | return nil 162 | } 163 | 164 | // Leave leaves the cluster. 165 | func (c *Cluster) Leave() error { 166 | if !c.transitionState(StateJoined, StateLeaving) { 167 | return StateTransitionError{ 168 | From: StateJoined, 169 | To: StateLeaving, 170 | } 171 | } 172 | 173 | // Cancel our watchers 174 | c.work.Cancel() 175 | c.claims.Cancel() 176 | c.nodes.Cancel() 177 | 178 | // Cancel the balancer 179 | c.balancerCancel() 180 | 181 | // end all work 182 | c.drainWork() 183 | 184 | if err := c.kv.Delete(Key(c, TypeNode, c.nodeID)); err != nil { 185 | c.logger.Printf("Failed to remove node entry for %s", c.nodeID) 186 | } 187 | 188 | if !c.transitionState(StateLeaving, StateLeft) { 189 | return StateTransitionError{ 190 | From: StateLeaving, 191 | To: StateLeft, 192 | } 193 | } 194 | 195 | c.listener.OnLeave() 196 | 197 | c.kv = nil 198 | 199 | return nil 200 | } 201 | 202 | func (c *Cluster) ensureWatchers() error { 203 | nodes, err := c.kv.Watch(Key(c, TypeNode, ""), c.onChanged) 204 | if err != nil { 205 | return err 206 | } 207 | c.nodes = nodes 208 | 209 | work, err := c.kv.Watch(Key(c, TypeWork, ""), c.onChanged) 210 | if err != nil { 211 | return err 212 | } 213 | c.work = work 214 | 215 | claims, err := c.kv.Watch(Key(c, TypeClaim, ""), c.onChanged) 216 | if err != nil { 217 | return err 218 | } 219 | c.claims = claims 220 | 221 | return nil 222 | } 223 | 224 | func (c *Cluster) onChanged(m *coordinator.WatchMap) { 225 | select { 226 | case c.balancerFire <- struct{}{}: 227 | c.logger.Printf("Submitted to balancer") 228 | default: 229 | c.logger.Printf("Can't submit to balancer") 230 | } 231 | } 232 | 233 | func (c *Cluster) isJoined() bool { 234 | c.stateLk.RLock() 235 | defer c.stateLk.RUnlock() 236 | return c.state == StateJoined 237 | } 238 | 239 | func (c *Cluster) isLeaving() bool { 240 | c.stateLk.RLock() 241 | defer c.stateLk.RUnlock() 242 | return c.state == StateLeaving 243 | } 244 | 245 | func (c *Cluster) GetNodeID() string { 246 | return c.nodeID 247 | } 248 | 249 | func (c *Cluster) GetNodes() *coordinator.WatchMap { 250 | return c.nodes 251 | } 252 | 253 | func (c *Cluster) GetWork() *coordinator.WatchMap { 254 | return c.work 255 | } 256 | 257 | func (c *Cluster) GetClaims() *coordinator.WatchMap { 258 | return c.claims 259 | } 260 | 261 | func (c *Cluster) GetOwned() *syncmap.Map { 262 | return c.owned 263 | } 264 | func (c *Cluster) joinCluster() error { 265 | c.logger.Printf("Creating cluster member node") 266 | // Create our membership entry. 267 | key := Key(c, TypeNode, c.nodeID) 268 | if err := c.kv.StoreEphemeral(key, ""); err != nil { 269 | return fmt.Errorf("joinCluster::StoreEphemeral(%s): %s", key, err) 270 | } 271 | 272 | c.logger.Printf("Created cluster member node") 273 | 274 | return nil 275 | } 276 | 277 | func (c *Cluster) startBalancer() (chan struct{}, context.CancelFunc) { 278 | ctx, cancel := context.WithCancel(context.TODO()) 279 | fire := make(chan struct{}, 1) 280 | go func() { 281 | for { 282 | c.balance() 283 | select { 284 | case <-fire: 285 | case <-time.After(c.balanceInterval): 286 | case <-ctx.Done(): 287 | return 288 | } 289 | } 290 | close(fire) 291 | }() 292 | return fire, cancel 293 | } 294 | 295 | func (c *Cluster) balance() { 296 | if !c.isJoined() { 297 | c.logger.Printf("Cannot balance: node state is not joined") 298 | return 299 | } 300 | 301 | c.logger.Printf("Balancing...") 302 | 303 | c.getWork() 304 | c.verifyWork() 305 | 306 | c.balancer.OnBalance() 307 | } 308 | 309 | func (c *Cluster) getWork() { 310 | if !c.isJoined() { 311 | return 312 | } 313 | 314 | keys := c.work.Keys() 315 | for _, workKey := range keys { 316 | if _, ok := c.owned.Load(workKey); ok { 317 | continue 318 | } 319 | if _, ok := c.claims.Get(workKey); ok { 320 | continue 321 | } 322 | 323 | if c.balancer.CanClaim(workKey) { 324 | c.tryClaimWork(workKey) 325 | } 326 | } 327 | } 328 | 329 | // Claim work if it is not already owned by another node 330 | func (c *Cluster) tryClaimWork(workKey string) { 331 | // Build our claims key 332 | claimKey := Key(c, TypeClaim, workKey) 333 | c.logger.Printf("Attempting to claim %s", workKey) 334 | if err := c.kv.StoreEphemeral(claimKey, c.nodeID); err != nil { 335 | c.logger.Printf("Failed to claim %s with %s: %s", workKey, claimKey, err) 336 | return 337 | } 338 | 339 | // Once claimed, we can start our work. 340 | c.startWork(workKey) 341 | } 342 | 343 | func verifyOwner(claimKey, ownerID string) clientv3.Cmp { 344 | return clientv3.Compare(clientv3.Value(claimKey), "=", ownerID) 345 | } 346 | 347 | // verifyWork ensures that this instance is working on valid work. If an 348 | // owned work is no longer found in the work cache, we immediately release 349 | // it. 350 | func (c *Cluster) verifyWork() { 351 | if !c.isJoined() { 352 | return 353 | } 354 | 355 | var toEnd []string 356 | c.owned.Range(func(_key, _ interface{}) bool { 357 | key := _key.(string) 358 | if _, ok := c.work.Get(key); !ok { 359 | toEnd = append(toEnd, key) 360 | } 361 | // TODO(dforsyth): Verify claims, if they do not exits kill the work 362 | // immediately. 363 | return true 364 | }) 365 | // TODO(dforsyth): Verify that we are working on all the claims we say we 366 | // are, or nuke the claims that we don't seem to have in owned. 367 | 368 | // We no longer support handoffs because the implemention should be a 369 | // detail of the application. 370 | 371 | for _, key := range toEnd { 372 | c.logger.Printf("Ending %s", key) 373 | c.endWork(key) 374 | } 375 | } 376 | 377 | type work struct { 378 | cancel context.CancelFunc 379 | // Watch to find out if work is finished. TODO(dforsyth): lazy create? 380 | finished chan struct{} 381 | } 382 | 383 | func (w *work) waitForFinish(timeout time.Duration) { 384 | select { 385 | case <-w.finished: 386 | case <-time.After(timeout): 387 | } 388 | } 389 | 390 | func (c *Cluster) startWork(workKey string) { 391 | ctx, cancel := context.WithCancel(context.TODO()) 392 | c.logger.Printf("Starting work, storing in %s", workKey) 393 | work := &work{ 394 | cancel: cancel, 395 | finished: make(chan struct{}), 396 | } 397 | c.owned.Store(workKey, work) 398 | 399 | go func() { 400 | c.listener.StartWork(ctx, workKey) 401 | close(work.finished) 402 | }() 403 | } 404 | 405 | // EndWork will attempt to have the current node to drop its claim on a given 406 | // workKey. 407 | func (c *Cluster) EndWork(workKey string) error { 408 | return c.endWork(workKey) 409 | } 410 | 411 | func (c *Cluster) endWork(workKey string) error { 412 | if c.owned == nil { 413 | panic("c owned is nil") 414 | } 415 | loaded, ok := c.owned.Load(workKey) 416 | if !ok { 417 | // Log that we no longer own the work 418 | c.logger.Printf("Wanted to cancel %s, but it is not owned", workKey) 419 | return NotOwnedError{WorkKey: workKey} 420 | } 421 | 422 | work := loaded.(*work) 423 | 424 | // Signal a cancelation 425 | work.cancel() 426 | // Wait for the work to finish 427 | work.waitForFinish(c.endWorkTimeout) 428 | 429 | // Remove the claim to this work 430 | claimKey := Key(c, TypeClaim, workKey) 431 | c.logger.Printf("Deleting key: %s", claimKey) 432 | // Delete the claim if we still own it. 433 | if err := c.kv.Delete(claimKey, verifyOwner(claimKey, c.nodeID)); err != nil { 434 | // Failure to remove node (or some other problem). For now we can bail 435 | // here and hope to remove the work from owned later. 436 | c.logger.Printf("Failed to remove claim node %s", err) 437 | return err 438 | } 439 | 440 | c.logger.Printf("Removing %s from owned", workKey) 441 | c.owned.Delete(workKey) 442 | 443 | return nil 444 | } 445 | 446 | func (c *Cluster) drainWork() { 447 | if !c.isLeaving() { 448 | c.logger.Printf("Cannot drain when not leaving") 449 | return 450 | } 451 | 452 | c.owned.Range(func(key, _ interface{}) bool { 453 | c.endWork(key.(string)) 454 | return true 455 | }) 456 | 457 | // Wait until owned map is empty or we timeout 458 | start := time.Now() 459 | for time.Since(start) < c.drainTimeout { 460 | cnt := 0 461 | c.owned.Range(func(_, _ interface{}) bool { 462 | cnt++ 463 | return true 464 | }) 465 | if cnt == 0 { 466 | break 467 | } 468 | } 469 | 470 | c.logger.Printf("All work ended") 471 | } 472 | 473 | // Key will create a key of any type for a given cluster. Leave ID blank to get a 474 | // suitable prefix (to watch, for instance). 475 | func Key(c *Cluster, t string, id string) string { 476 | return fmt.Sprintf("%s-%s-%s", c.clusterName, t, id) 477 | } 478 | -------------------------------------------------------------------------------- /cluster/errors.go: -------------------------------------------------------------------------------- 1 | package cluster 2 | 3 | import ( 4 | "fmt" 5 | ) 6 | 7 | type StateTransitionError struct { 8 | From, To string 9 | } 10 | 11 | func (e StateTransitionError) Error() string { 12 | return fmt.Sprintf("Cannot move from state[%s] to state[%s]", e.From, e.To) 13 | } 14 | 15 | type NotOwnedError struct { 16 | WorkKey string 17 | } 18 | 19 | func (e NotOwnedError) Error() string { 20 | return fmt.Sprintf("%s is not owned by this node", e.WorkKey) 21 | } 22 | -------------------------------------------------------------------------------- /cluster/listener.go: -------------------------------------------------------------------------------- 1 | package cluster 2 | 3 | import ( 4 | "context" 5 | ) 6 | 7 | // Listener is the interface for user actions on a Cluster. 8 | type Listener interface { 9 | // Called when the listener joins a cluster 10 | OnJoin(c *Cluster) 11 | // Called when the listener leaves a cluster 12 | OnLeave() 13 | // Called when work is started. Observe the passed context to finish work. 14 | StartWork(context.Context, string) 15 | } 16 | -------------------------------------------------------------------------------- /coordinator/coordinator.go: -------------------------------------------------------------------------------- 1 | package coordinator 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | stdlog "log" 7 | "os" 8 | "sync" 9 | "time" 10 | 11 | // Wait for etcd client v3.4, there is a module import issue. 12 | client "github.com/coreos/etcd/clientv3" // "go.etcd.io/etcd/clientv3" 13 | clientutil "github.com/coreos/etcd/clientv3/clientv3util" // "go.etcd.io/etcd/clientv3/clientv3util" 14 | pb "github.com/coreos/etcd/mvcc/mvccpb" // "go.etcd.io/etcd/mvcc/mvccpb" 15 | "github.com/dforsyth/donut/log" 16 | ) 17 | 18 | // Coordinator is a storage interface 19 | type Coordinator interface { 20 | Store(key, value string) error 21 | // Store a value at key, with a lifetime attached to client liveness 22 | StoreEphemeral(key, value string) error 23 | // Load(key string) (string, error) 24 | // Delete a key 25 | Delete(key string, ifs ...client.Cmp) error 26 | // Watch a key, populating a map with all values within that keys child 27 | // space 28 | Watch(prefix string, handler WatchMapChangeHandler) (*WatchMap, error) 29 | // Finish stops a Coordinator 30 | Finish() error 31 | } 32 | 33 | // EtcdCoordinator is a coordinator based on Etcd 34 | type EtcdCoordinator struct { 35 | client *client.Client 36 | leaseID client.LeaseID 37 | timeout time.Duration 38 | leaseTimeout time.Duration 39 | logger log.FmtLogger 40 | } 41 | 42 | type EtcdCoordinatorOption func(*EtcdCoordinator) 43 | 44 | func WithLogger(logger log.FmtLogger) func(*EtcdCoordinator) { 45 | return func(kv *EtcdCoordinator) { kv.logger = logger } 46 | } 47 | 48 | func WithRequestTimeout(requestTimeout time.Duration) func(*EtcdCoordinator) { 49 | return func(kv *EtcdCoordinator) { kv.timeout = requestTimeout } 50 | } 51 | 52 | func WithLeaseTimeout(leaseTimeout time.Duration) func(*EtcdCoordinator) { 53 | return func(kv *EtcdCoordinator) { kv.leaseTimeout = leaseTimeout } 54 | } 55 | 56 | func NewEtcdCoordinator(client *client.Client, opts ...EtcdCoordinatorOption) (*EtcdCoordinator, error) { 57 | kv := &EtcdCoordinator{ 58 | client: client, 59 | timeout: 1 * time.Second, 60 | leaseTimeout: 5 * time.Second, 61 | logger: stdlog.New(os.Stderr, "", stdlog.LstdFlags), 62 | } 63 | for _, opt := range opts { 64 | opt(kv) 65 | } 66 | _ctx, cancel := context.WithTimeout(context.Background(), kv.timeout) 67 | leaseResp, err := client.Grant(_ctx, int64(kv.leaseTimeout.Seconds())) 68 | cancel() 69 | if err != nil { 70 | return nil, err 71 | } 72 | 73 | ch, err := client.KeepAlive(context.Background(), leaseResp.ID) 74 | if err != nil { 75 | return nil, fmt.Errorf("Keep alive error: %s", err) 76 | } 77 | go func() { 78 | for r := range ch { 79 | kv.logger.Printf("Keep alive response: %d", r.ID) 80 | } 81 | }() 82 | 83 | kv.leaseID = leaseResp.ID 84 | 85 | return kv, nil 86 | } 87 | 88 | func (kv *EtcdCoordinator) Finish() error { 89 | // We don't close the client in here since we didn't create it here. 90 | return nil 91 | } 92 | 93 | func (kv *EtcdCoordinator) Store(key, value string) error { 94 | ctx, cancel := context.WithTimeout(context.Background(), kv.timeout) 95 | _, err := kv.client.Txn(ctx). 96 | If(clientutil.KeyMissing(key)). 97 | Then(client.OpPut(key, value)). 98 | Commit() 99 | cancel() 100 | if err != nil { 101 | return err 102 | } 103 | return nil 104 | } 105 | 106 | func (kv *EtcdCoordinator) StoreEphemeral(key, value string) error { 107 | ctx, cancel := context.WithTimeout(context.Background(), kv.timeout) 108 | resp, err := kv.client.Txn(ctx). 109 | If(clientutil.KeyMissing(key)). 110 | Then(client.OpPut(key, value, client.WithLease(kv.leaseID))). 111 | Commit() 112 | cancel() 113 | if !resp.Succeeded { 114 | return fmt.Errorf("Failed to store %s in txn", key) 115 | } 116 | fmt.Printf("Stored %s\n", key) 117 | if err != nil { 118 | return err 119 | } 120 | return nil 121 | } 122 | 123 | func (kv *EtcdCoordinator) Delete(key string, cmps ...client.Cmp) error { 124 | ctx, cancel := context.WithTimeout(context.Background(), kv.timeout) 125 | resp, err := kv.client.Txn(ctx). 126 | If(cmps...). 127 | Then(client.OpDelete(key)). 128 | Commit() 129 | cancel() 130 | if !resp.Succeeded { 131 | return fmt.Errorf("Failed to delete %s in txn", key) 132 | } 133 | if err != nil { 134 | return err 135 | } 136 | return nil 137 | } 138 | 139 | // Watch a given prefix, firing handler on a change. Returns a WatchMap. 140 | func (kv *EtcdCoordinator) Watch(prefix string, handler WatchMapChangeHandler) (*WatchMap, error) { 141 | _ctx, _cancel := context.WithCancel(context.Background()) 142 | resp, err := kv.client.Get(_ctx, prefix, client.WithPrefix()) 143 | _cancel() 144 | if err != nil { 145 | kv.logger.Printf("Failed to get initial for watch %s", err) 146 | return nil, err 147 | } 148 | 149 | data := make(map[string]string) 150 | for _, kv := range resp.Kvs { 151 | data[string(kv.Key)] = string(kv.Value) 152 | } 153 | 154 | watcher := client.NewWatcher(kv.client) 155 | watchChan := watcher.Watch( 156 | context.Background(), 157 | prefix, 158 | client.WithPrefix(), 159 | client.WithRev(resp.Header.GetRevision()), 160 | ) 161 | ctx, cancel := context.WithCancel(context.Background()) 162 | 163 | nm := &WatchMap{ 164 | m: &sync.RWMutex{}, 165 | data: data, 166 | cancel: cancel, 167 | } 168 | 169 | // Run handler on the initial map 170 | go handler(nm) 171 | 172 | // Start the watch loop 173 | go func() { 174 | for { 175 | select { 176 | case <-ctx.Done(): 177 | kv.logger.Printf("Watch done") 178 | watcher.Close() 179 | return 180 | case wr := <-watchChan: 181 | // If we're canceled, bail 182 | if wr.Canceled { 183 | return 184 | } 185 | for _, ev := range wr.Events { 186 | k := string(ev.Kv.Key) 187 | v := string(ev.Kv.Value) 188 | kv.logger.Printf("(%s) Watch event %s %s:%s", prefix, ev.Type, k, v) 189 | switch ev.Type { 190 | case pb.PUT: 191 | nm.set(k, v) 192 | case pb.DELETE: 193 | nm.delete(k) 194 | } 195 | } 196 | 197 | } 198 | go handler(nm) 199 | } 200 | }() 201 | 202 | return nm, nil 203 | } 204 | -------------------------------------------------------------------------------- /coordinator/map.go: -------------------------------------------------------------------------------- 1 | package coordinator 2 | 3 | import ( 4 | "context" 5 | "sync" 6 | // "go.etcd.io/etcd/client" 7 | ) 8 | 9 | // WatchMap provides an mapped interface to a watched range. 10 | type WatchMap struct { 11 | m *sync.RWMutex 12 | data map[string]string 13 | cancel context.CancelFunc 14 | } 15 | 16 | // WatchMapChangeHandler callback. 17 | type WatchMapChangeHandler func(*WatchMap) 18 | 19 | // Get returns a value from a WatchMap. If a value is not present, 20 | // bool will be false. 21 | func (m *WatchMap) Get(key string) (string, bool) { 22 | m.m.RLock() 23 | defer m.m.RUnlock() 24 | 25 | v, ok := m.data[key] 26 | return v, ok 27 | } 28 | 29 | // Keys returns all keys from a NodeMap. 30 | func (m *WatchMap) Keys() []string { 31 | m.m.RLock() 32 | defer m.m.RUnlock() 33 | 34 | keys := make([]string, 0, len(m.data)) 35 | for k := range m.data { 36 | keys = append(keys, k) 37 | } 38 | return keys 39 | } 40 | 41 | func (m *WatchMap) set(key, value string) { 42 | m.m.Lock() 43 | defer m.m.Unlock() 44 | 45 | m.data[key] = value 46 | } 47 | 48 | func (m *WatchMap) delete(key string) { 49 | m.m.Lock() 50 | defer m.m.Unlock() 51 | 52 | delete(m.data, key) 53 | } 54 | 55 | // Cancel will cancel a maps watch. 56 | func (m *WatchMap) Cancel() { 57 | // TODO(dforsyth): Rework this so this function waits until the watcher 58 | // loop exits? 59 | m.cancel() 60 | } 61 | -------------------------------------------------------------------------------- /donutctl/cmd/addwork.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/dforsyth/donut/client" 7 | "github.com/dforsyth/donut/cluster" 8 | "github.com/spf13/cobra" 9 | ) 10 | 11 | func AddWorkCmd(cls *cluster.Cluster, cli *client.Client) *cobra.Command { 12 | return &cobra.Command{ 13 | Use: "addwork [name] [value]", 14 | Args: cobra.MinimumNArgs(1), 15 | Run: func(cmd *cobra.Command, args []string) { 16 | name := args[0] 17 | value := args[1] 18 | 19 | workKey, err := cli.CreateWork(cls, name, value) 20 | if err != nil { 21 | panic(err) 22 | } 23 | fmt.Println(workKey) 24 | }, 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /donutctl/cmd/delwork.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "github.com/dforsyth/donut/client" 5 | "github.com/dforsyth/donut/cluster" 6 | "github.com/spf13/cobra" 7 | ) 8 | 9 | func DelWorkCmd(cls *cluster.Cluster, cli *client.Client) *cobra.Command { 10 | return &cobra.Command{ 11 | Use: "delwork [name]", 12 | Args: cobra.ExactArgs(1), 13 | Run: func(cmd *cobra.Command, args []string) { 14 | name := args[0] 15 | 16 | if err := cli.DeleteWork(cls, name); err != nil { 17 | panic(err) 18 | } 19 | }, 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /donutctl/cmd/root.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "github.com/dforsyth/donut/client" 5 | "github.com/dforsyth/donut/cluster" 6 | "github.com/spf13/cobra" 7 | ) 8 | 9 | func NewCmd(cls *cluster.Cluster, cli *client.Client) *cobra.Command { 10 | rootCmd := &cobra.Command{ 11 | Use: "donutctl", 12 | } 13 | 14 | rootCmd.AddCommand(AddWorkCmd(cls, cli)) 15 | rootCmd.AddCommand(DelWorkCmd(cls, cli)) 16 | 17 | return rootCmd 18 | } 19 | -------------------------------------------------------------------------------- /donutctl/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "strings" 7 | 8 | // Wait for etcd client v3.4, there is a module import issue. 9 | "github.com/coreos/etcd/clientv3" // "go.etcd.io/etcd/clientv3" 10 | "github.com/dforsyth/donut/client" 11 | "github.com/dforsyth/donut/cluster" 12 | "github.com/dforsyth/donut/coordinator" 13 | "github.com/dforsyth/donut/donutctl/cmd" 14 | ) 15 | 16 | func main() { 17 | endpoints, found := os.LookupEnv("ETCD_ENDPOINTS") 18 | if !found { 19 | fmt.Println("ETCD_ENDPOINTS not set.") 20 | os.Exit(1) 21 | } 22 | endpointsList := strings.Split(endpoints, ",") 23 | 24 | clusterName, found := os.LookupEnv("DONUT_CLUSTER_NAME") 25 | if !found { 26 | fmt.Println("DONUT_CLUSTER_NAME") 27 | os.Exit(1) 28 | } 29 | 30 | etcd, err := clientv3.New(clientv3.Config{ 31 | Endpoints: endpointsList, 32 | }) 33 | if err != nil { 34 | fmt.Println(err) 35 | os.Exit(1) 36 | } 37 | 38 | kv, err := coordinator.NewEtcdCoordinator(etcd) 39 | if err != nil { 40 | fmt.Println(err) 41 | os.Exit(1) 42 | } 43 | 44 | cli := client.New(kv) 45 | // We need a cluster name, but that's it. 46 | cls := cluster.New(clusterName, "donutctl", nil) 47 | cmd.NewCmd(cls, cli).Execute() 48 | } 49 | -------------------------------------------------------------------------------- /example/readme/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "log" 6 | "os" 7 | 8 | // Wait for etcd client v3.4, there is a module import issue. 9 | client "github.com/coreos/etcd/clientv3" // "go.etcd.io/etcd/clientv3" 10 | "github.com/dforsyth/donut/cluster" 11 | "github.com/dforsyth/donut/coordinator" 12 | ) 13 | 14 | type ExampleListener struct { 15 | logger *log.Logger 16 | } 17 | 18 | func (l *ExampleListener) OnJoin(c *cluster.Cluster) { 19 | l.logger.Println("Joined the cluster!") 20 | } 21 | 22 | func (l *ExampleListener) StartWork(ctx context.Context, workKey string) { 23 | l.logger.Println("Starting work " + workKey) 24 | } 25 | 26 | func (*ExampleListener) OnLeave() {} 27 | 28 | func main() { 29 | logger := log.New(os.Stderr, "", log.LstdFlags) 30 | c := cluster.New("example", "node", &ExampleListener{logger: logger}) 31 | client, err := client.New(client.Config{ 32 | Endpoints: []string{"http://0.0.0.0:2379"}, 33 | }) 34 | if err != nil { 35 | logger.Fatalf("Failed to create client: %s", err) 36 | } 37 | coo, err := coordinator.NewEtcdCoordinator(client) 38 | if err != nil { 39 | logger.Fatalf("Failed to create coordinator: %s", err) 40 | } 41 | if err := c.Join(coo); err != nil { 42 | logger.Fatalf("Failed to join cluster: %s", err) 43 | } 44 | select {} 45 | } 46 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/dforsyth/donut 2 | 3 | go 1.12 4 | 5 | require ( 6 | github.com/coreos/bbolt v1.3.2 // indirect 7 | github.com/coreos/etcd v3.3.11+incompatible 8 | github.com/coreos/go-semver v0.2.0 // indirect 9 | github.com/coreos/go-systemd v0.0.0-20181031085051-9002847aa142 // indirect 10 | github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect 11 | github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect 12 | github.com/ghodss/yaml v1.0.0 // indirect 13 | github.com/gogo/protobuf v1.2.0 // indirect 14 | github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef // indirect 15 | github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c // indirect 16 | github.com/google/uuid v1.1.0 17 | github.com/gorilla/websocket v1.4.0 // indirect 18 | github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 // indirect 19 | github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect 20 | github.com/grpc-ecosystem/grpc-gateway v1.7.0 // indirect 21 | github.com/inconshreveable/mousetrap v1.0.0 // indirect 22 | github.com/jonboulle/clockwork v0.1.0 // indirect 23 | github.com/pkg/errors v0.8.1 // indirect 24 | github.com/prometheus/client_golang v0.9.2 // indirect 25 | github.com/sirupsen/logrus v1.3.0 // indirect 26 | github.com/soheilhy/cmux v0.1.4 // indirect 27 | github.com/spf13/cobra v0.0.3 28 | github.com/spf13/pflag v1.0.3 // indirect 29 | github.com/stretchr/testify v1.2.2 30 | github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 // indirect 31 | github.com/ugorji/go/codec v0.0.0-20190128213124-ee1426cffec0 // indirect 32 | github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect 33 | go.etcd.io/bbolt v1.3.2 // indirect 34 | go.etcd.io/etcd v3.3.11+incompatible 35 | go.uber.org/atomic v1.3.2 // indirect 36 | go.uber.org/multierr v1.1.0 // indirect 37 | go.uber.org/zap v1.9.1 // indirect 38 | golang.org/x/crypto v0.0.0-20190129210102-ccddf3741a0c // indirect 39 | golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 40 | golang.org/x/time v0.0.0-20181108054448-85acf8d2951c // indirect 41 | google.golang.org/grpc v1.18.0 // indirect 42 | gopkg.in/yaml.v2 v2.2.2 // indirect 43 | ) 44 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= 2 | github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= 3 | github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= 4 | github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= 5 | github.com/coreos/bbolt v1.3.2 h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s= 6 | github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= 7 | github.com/coreos/etcd v3.3.9+incompatible h1:iKSVPXGNGqroBx4+RmUXv8emeU7y+ucRZSzTYgzLZwM= 8 | github.com/coreos/etcd v3.3.9+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= 9 | github.com/coreos/etcd v3.3.10+incompatible h1:jFneRYjIvLMLhDLCzuTuU4rSJUjRplcJQ7pD7MnhC04= 10 | github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= 11 | github.com/coreos/etcd v3.3.11+incompatible h1:0gCnqKsq7XxMi69JsnbmMc1o+RJH3XH64sV9aiTTYko= 12 | github.com/coreos/etcd v3.3.11+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= 13 | github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY= 14 | github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= 15 | github.com/coreos/go-systemd v0.0.0-20181031085051-9002847aa142 h1:3jFq2xL4ZajGK4aZY8jz+DAF0FHjI51BXjjSwCzS1Dk= 16 | github.com/coreos/go-systemd v0.0.0-20181031085051-9002847aa142/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= 17 | github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= 18 | github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= 19 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 20 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 21 | github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= 22 | github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= 23 | github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= 24 | github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= 25 | github.com/gogo/protobuf v1.2.0 h1:xU6/SpYbvkNYiptHJYEDRseDLvYE7wSqhYYNy0QSUzI= 26 | github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= 27 | github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= 28 | github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= 29 | github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk= 30 | github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= 31 | github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= 32 | github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= 33 | github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 34 | github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw= 35 | github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= 36 | github.com/google/uuid v1.1.0 h1:Jf4mxPC/ziBnoPIdpQdPJ9OeiomAUHLvxmPRSPH9m4s= 37 | github.com/google/uuid v1.1.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= 38 | github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= 39 | github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= 40 | github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c= 41 | github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= 42 | github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= 43 | github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= 44 | github.com/grpc-ecosystem/grpc-gateway v1.7.0 h1:tPFY/SM+d656aSgLWO2Eckc3ExwpwwybwdN5Ph20h1A= 45 | github.com/grpc-ecosystem/grpc-gateway v1.7.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= 46 | github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= 47 | github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= 48 | github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= 49 | github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= 50 | github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= 51 | github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= 52 | github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= 53 | github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= 54 | github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= 55 | github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= 56 | github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 57 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 58 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 59 | github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740= 60 | github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= 61 | github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= 62 | github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= 63 | github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 h1:PnBWHBf+6L0jOqq0gIVUe6Yk0/QMZ640k6NvkxcBf+8= 64 | github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= 65 | github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a h1:9a8MnZMP0X2nLJdBg+pBmGgkJlSaKC2KaQmTCk1XDtE= 66 | github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= 67 | github.com/sirupsen/logrus v1.3.0 h1:hI/7Q+DtNZ2kINb6qt/lS+IyXnHQe9e90POfeewL/ME= 68 | github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= 69 | github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= 70 | github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= 71 | github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8= 72 | github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= 73 | github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= 74 | github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= 75 | github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= 76 | github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 77 | github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= 78 | github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= 79 | github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= 80 | github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= 81 | github.com/ugorji/go v1.1.2 h1:JON3E2/GPW2iDNGoSAusl1KDf5TRQ8k8q7Tp097pZGs= 82 | github.com/ugorji/go v1.1.2/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ= 83 | github.com/ugorji/go/codec v0.0.0-20190128213124-ee1426cffec0 h1:Q3Bh5Dwzek5LreV9l86IftyLaexgU1mag9WNntbAW9c= 84 | github.com/ugorji/go/codec v0.0.0-20190128213124-ee1426cffec0/go.mod h1:iT03XoTwV7xq/+UGwKO3UbC1nNNlopQiY61beSdrtOA= 85 | github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= 86 | github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= 87 | go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk= 88 | go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= 89 | go.etcd.io/etcd v3.3.11+incompatible h1:AVwRXu9VIzZcvVe1nSirTVkNv7WT3/hwdMRrDVFsf3A= 90 | go.etcd.io/etcd v3.3.11+incompatible/go.mod h1:yaeTdrJi5lOmYerz05bd8+V7KubZs8YSFZfzsF9A6aI= 91 | go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= 92 | go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= 93 | go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= 94 | go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= 95 | go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o= 96 | go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= 97 | golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= 98 | golang.org/x/crypto v0.0.0-20190129210102-ccddf3741a0c h1:MWY7h75sb9ioBR+s5Zgq1JYXxhbZvrSP2okwLi3ItmI= 99 | golang.org/x/crypto v0.0.0-20190129210102-ccddf3741a0c/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= 100 | golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= 101 | golang.org/x/net v0.0.0-20180826012351-8a410e7b638d h1:g9qWBGx4puODJTMVyoPrpoxPFgVGd+z1DZwjfRu4d0I= 102 | golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 103 | golang.org/x/net v0.0.0-20181201002055-351d144fa1fc h1:a3CU5tJYVj92DY2LaA1kUkrsqD5/3mLDhx2NcNqyW+0= 104 | golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 105 | golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= 106 | golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 107 | golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 108 | golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= 109 | golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 110 | golang.org/x/sys v0.0.0-20180830151530-49385e6e1522 h1:Ve1ORMCxvRmSXBwJK+t3Oy+V2vRW2OetUQBq4rJIkZE= 111 | golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 112 | golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33 h1:I6FyU15t786LL7oL/hn43zqTuEGr4PN7F4XJ1p4E3Y8= 113 | golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 114 | golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= 115 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 116 | golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg= 117 | golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= 118 | golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 119 | google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= 120 | google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= 121 | google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= 122 | google.golang.org/grpc v1.18.0 h1:IZl7mfBGfbhYx2p2rKRtYgDFw6SBz+kclmxYrCksPPA= 123 | google.golang.org/grpc v1.18.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= 124 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= 125 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 126 | gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= 127 | gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 128 | honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= 129 | -------------------------------------------------------------------------------- /itest/cluster_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "testing" 7 | "time" 8 | 9 | "github.com/coreos/etcd/clientv3" // "go.etcd.io/etcd/clientv3" 10 | "github.com/dforsyth/donut/client" 11 | "github.com/dforsyth/donut/cluster" 12 | "github.com/google/uuid" 13 | "github.com/stretchr/testify/assert" 14 | "github.com/stretchr/testify/mock" 15 | "golang.org/x/sync/syncmap" 16 | ) 17 | 18 | func TestNew(t *testing.T) { 19 | assert.Nil(t, nil, "Sanity") 20 | } 21 | 22 | // TODO(dforsyth): Generate this 23 | type mockListener struct { 24 | mock.Mock 25 | } 26 | 27 | func newMockListener() *mockListener { 28 | lst := &mockListener{} 29 | lst.On("OnJoin").Return() 30 | lst.On("OnLeave").Return() 31 | lst.On("StartWork", mock.Anything, mock.Anything).Return() 32 | return lst 33 | } 34 | 35 | func (l *mockListener) OnJoin(_ *cluster.Cluster) { 36 | l.Called() 37 | } 38 | func (l *mockListener) OnLeave() { 39 | l.Called() 40 | } 41 | func (l *mockListener) StartWork(ctx context.Context, wk string) { 42 | l.Called(ctx, wk) 43 | } 44 | 45 | func TestJoinAndLeave(t *testing.T) { 46 | cli := NewTestClient() 47 | kv := NewTestCoordinator(cli) 48 | id := uuid.New().String() 49 | nodeID := fmt.Sprintf("test-node-%s-0", id) 50 | lst := newMockListener() 51 | cls := cluster.New( 52 | fmt.Sprintf("test-%s", id), 53 | nodeID, lst, 54 | ) 55 | err := cls.Join(kv) 56 | assert.Nil(t, err, "Should join with no error") 57 | 58 | nodeKey := cluster.Key(cls, cluster.TypeNode, nodeID) 59 | 60 | ctx := context.TODO() 61 | _, gerr := cli.Get(ctx, nodeKey) 62 | assert.NoError(t, gerr) 63 | 64 | lst.AssertCalled(t, "OnJoin") 65 | 66 | cls.Leave() 67 | 68 | resp, lerr := cli.Get(ctx, nodeKey) 69 | assert.NoError(t, lerr) 70 | assert.Equal(t, 0, len(resp.Kvs)) 71 | 72 | lst.AssertCalled(t, "OnLeave") 73 | } 74 | 75 | func TestClaimWork(t *testing.T) { 76 | cli := NewTestClient() 77 | kv := NewTestCoordinator(cli) 78 | id := uuid.New().String() 79 | lst := newMockListener() 80 | cls := cluster.New( 81 | fmt.Sprintf("test-%s", id), 82 | fmt.Sprintf("test-node-%s-0", id), 83 | lst, 84 | ) 85 | err := cls.Join(kv) 86 | defer cls.Leave() 87 | assert.Nil(t, err, "Should join with no error") 88 | 89 | ccli := client.New(kv) 90 | wk, cerr := ccli.CreateWork(cls, "test-work", "") 91 | assert.NoError(t, cerr) 92 | 93 | // Wait for the claim procedures. If this flakes, increase the time on this. 94 | time.Sleep(1 * time.Second) 95 | 96 | _, ok := cls.GetOwned().Load(wk) 97 | assert.True(t, ok) 98 | 99 | lst.AssertCalled(t, "StartWork", mock.Anything, wk) 100 | } 101 | 102 | type neverClaimBalancer struct{} 103 | 104 | func (b *neverClaimBalancer) CanClaim(_ string) bool { return false } 105 | func (b *neverClaimBalancer) OnBalance() {} 106 | 107 | type neverReclaimBalancerListener struct { 108 | claimed syncmap.Map 109 | } 110 | 111 | func (b *neverReclaimBalancerListener) CanClaim(wk string) bool { 112 | _, ok := b.claimed.Load(wk) 113 | return !ok 114 | } 115 | func (b *neverReclaimBalancerListener) OnBalance() {} 116 | func (b *neverReclaimBalancerListener) OnJoin(cls *cluster.Cluster) {} 117 | func (b *neverReclaimBalancerListener) OnLeave() {} 118 | func (b *neverReclaimBalancerListener) StartWork(ctx context.Context, workKey string) { 119 | b.claimed.Store(workKey, nil) 120 | select { 121 | case <-ctx.Done(): 122 | } 123 | } 124 | 125 | func TestEndwork(t *testing.T) { 126 | var bl neverReclaimBalancerListener 127 | cls := cluster.New( 128 | "testendwork", 129 | "testendwork", 130 | &bl, 131 | cluster.WithBalancer(&bl), 132 | ) 133 | cli := NewTestClient() 134 | coo := NewTestCoordinator(cli) 135 | ccli := client.New(coo) 136 | cls.Join(coo) 137 | 138 | wk, cerr := ccli.CreateWork(cls, "testendwork", "") 139 | assert.NoError(t, cerr) 140 | 141 | // Sleep for the claim 142 | time.Sleep(1 * time.Second) 143 | _, cok := cls.GetOwned().Load(wk) 144 | assert.True(t, cok) 145 | 146 | assert.NoError(t, cls.EndWork(wk)) 147 | time.Sleep(1 * time.Second) 148 | 149 | _, eok := cls.GetOwned().Load(wk) 150 | assert.False(t, eok) 151 | 152 | resp, eerr := cli.Get(context.TODO(), cluster.Key(cls, cluster.TypeClaim, wk)) 153 | assert.NoError(t, eerr) 154 | assert.Equal(t, 0, len(resp.Kvs)) 155 | } 156 | 157 | func TestEndworkNotOwned(t *testing.T) { 158 | var l neverClaimBalancer 159 | cls := cluster.New( 160 | "testendworknotowned", 161 | "testendworknotowned", 162 | newMockListener(), 163 | cluster.WithBalancer(&l), 164 | ) 165 | coo := NewTestCoordinator(NewTestClient()) 166 | assert.NoError(t, cls.Join(NewTestCoordinator(NewTestClient()))) 167 | 168 | err := cls.EndWork("imaginary-work") 169 | assert.Error(t, err) 170 | 171 | cls.Leave() 172 | coo.Finish() 173 | } 174 | 175 | func TestEndworkOwnedButNoClaim(t *testing.T) { 176 | cls := cluster.New( 177 | "testendworknoclaim", 178 | "testendworknoclaim", 179 | newMockListener(), 180 | ) 181 | cli := NewTestClient() 182 | coo := NewTestCoordinator(cli) 183 | ccli := client.New(coo) 184 | assert.NoError(t, cls.Join(coo)) 185 | 186 | wk, cerr := ccli.CreateWork(cls, "testendworknoclaim", "") 187 | assert.NoError(t, cerr) 188 | 189 | // Sleep for the claim 190 | time.Sleep(1 * time.Second) 191 | _, cok := cls.GetOwned().Load(wk) 192 | assert.True(t, cok) 193 | 194 | // Drop the claim key 195 | ck := cluster.Key(cls, cluster.TypeClaim, wk) 196 | leases, lerr := cli.Leases(context.TODO()) 197 | assert.NoError(t, lerr) 198 | _, perr := cli.Put(context.TODO(), ck, "imaginary", clientv3.WithLease(leases.Leases[0].ID)) 199 | assert.NoError(t, perr) 200 | 201 | err := cls.EndWork(wk) 202 | assert.Error(t, err) 203 | } 204 | -------------------------------------------------------------------------------- /itest/coordinator_test.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestStoreEphemeral(t *testing.T) { 11 | cli := NewTestClient() 12 | coordinator := NewTestCoordinator(cli) 13 | 14 | ctx := context.TODO() 15 | 16 | assert.NoError(t, coordinator.StoreEphemeral("ephemeral", "value")) 17 | 18 | fr, err := cli.Get(ctx, "ephemeral") 19 | assert.NoError(t, err) 20 | assert.Equal(t, string(fr.Kvs[0].Value), "value") 21 | 22 | coordinator.Finish() 23 | cli.Close() 24 | 25 | sr, err := cli.Get(ctx, "ephemeral") 26 | assert.Error(t, err) 27 | assert.Nil(t, sr) 28 | } 29 | 30 | func TestStore(t *testing.T) { 31 | 32 | } 33 | 34 | func TestDelete(t *testing.T) { 35 | 36 | } 37 | 38 | func TestDeleteWithCondition(t *testing.T) { 39 | 40 | } 41 | 42 | func TestWatch(t *testing.T) { 43 | 44 | } 45 | -------------------------------------------------------------------------------- /itest/util.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | // Wait for etcd client v3.4, there is a module import issue. 5 | "github.com/coreos/etcd/clientv3" // "go.etcd.io/etcd/clientv3" 6 | "github.com/dforsyth/donut/coordinator" 7 | "time" 8 | ) 9 | 10 | // NewTestClient creates an insecure "test" client, which points at 11 | // 0.0.0.0 12 | func NewTestClient() *clientv3.Client { 13 | endpoints := []string{"http://0.0.0.0:2379"} 14 | config := clientv3.Config{ 15 | Endpoints: endpoints, 16 | } 17 | 18 | cli, err := clientv3.New(config) 19 | if err != nil { 20 | panic(err) 21 | } 22 | return cli 23 | } 24 | 25 | // NewTestCoordinator creates a "test" Coordinator with some simple 26 | // defaults for testing. 27 | func NewTestCoordinator(cli *clientv3.Client) coordinator.Coordinator { 28 | kv, err := coordinator.NewEtcdCoordinator(cli, 29 | coordinator.WithRequestTimeout(1*time.Second), 30 | coordinator.WithLeaseTimeout(1*time.Second)) 31 | if err != nil { 32 | panic(err) 33 | } 34 | return kv 35 | } 36 | -------------------------------------------------------------------------------- /log/log.go: -------------------------------------------------------------------------------- 1 | package log 2 | 3 | // FmtLogger is a trivial interface for passing a logger to 4 | // a cluster. This is not something to use outside of donut. 5 | type FmtLogger interface { 6 | Printf(format string, v ...interface{}) 7 | } 8 | -------------------------------------------------------------------------------- /script/run-etcd.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | rm -rf etcd-data.tmp && mkdir -p etcd-data.tmp && \ 4 | docker rmi -f gcr.io/etcd-development/etcd:v3.3.11 || true && \ 5 | docker run \ 6 | -p 2379:2379 \ 7 | -p 2380:2380 \ 8 | --name ${1} \ 9 | --mount type=bind,source=/tmp/etcd-data.tmp,destination=/etcd-data \ 10 | gcr.io/etcd-development/etcd:v3.3.11 \ 11 | /usr/local/bin/etcd \ 12 | --name s1 \ 13 | --data-dir /etcd-data \ 14 | --listen-client-urls http://0.0.0.0:2379 \ 15 | --advertise-client-urls http://0.0.0.0:2379 \ 16 | --listen-peer-urls http://0.0.0.0:2380 \ 17 | --initial-advertise-peer-urls http://0.0.0.0:2380 \ 18 | --initial-cluster s1=http://0.0.0.0:2380 \ 19 | --initial-cluster-token tkn \ 20 | --initial-cluster-state new --------------------------------------------------------------------------------