├── .gitignore ├── .travis.yml ├── Dockerfile ├── LICENSE ├── README.md ├── commands └── root.go ├── main.go ├── plugin ├── common_test.go ├── filesystem.go ├── volume.go └── volume_test.go ├── providers ├── client.go ├── common.go ├── common_test.go ├── config.go ├── config_test.go ├── disk.go ├── disk_test.go ├── network.go └── network_test.go └── watcher ├── watcher.go ├── watcher_test.go ├── worker.go └── worker_test.go /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled Object files, Static and Dynamic libs (Shared Objects) 2 | *.o 3 | *.a 4 | *.so 5 | 6 | # Folders 7 | _obj 8 | _test 9 | 10 | # Architecture specific extensions/prefixes 11 | *.[568vq] 12 | [568vq].out 13 | 14 | *.cgo1.go 15 | *.cgo2.c 16 | _cgo_defun.c 17 | _cgo_gotypes.go 18 | _cgo_export.* 19 | 20 | _testmain.go 21 | 22 | *.exe 23 | *.test 24 | *.prof 25 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: go 2 | go: 3 | - 1.6 4 | 5 | script: 6 | - go test -v ./... 7 | 8 | sudo: false 9 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:jessie 2 | MAINTAINER Máximo Cuadros 3 | 4 | RUN apt-get update \ 5 | && apt-get install -y ca-certificates \ 6 | && apt-get clean \ 7 | && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 8 | 9 | ENV DOCKER_HOST unix:///var/run/docker.sock 10 | 11 | ADD gce-docker /bin/ 12 | CMD ["gce-docker"] 13 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2016 Máximo Cuadros 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Google Cloud Engine integration for Docker [![Build Status](https://travis-ci.org/mcuadros/gce-docker.svg?branch=master)](https://travis-ci.org/mcuadros/gce-docker) 2 | 3 | __gce-docker__ is a service that provides integration with the GCE to Docker, the following resources are supported: 4 | 5 | - __Persistent Disks__, the service is able to attach, format and mount [_persistent-disks_](https://cloud.google.com/compute/docs/disks/persistent-disks) allowing to use it as volumes in the container 6 | - __Load Balancers & External IPs__: support from auto-creation of LoadBanacers and External IPs allowing direcct access to the container. 7 | 8 | 9 | Examples 10 | -------- 11 | 12 | #### Creating a Persistent Disk and mount is a volume to a Container 13 | 14 | ```sh 15 | docker run -ti -v my-disk:/data --volume-driver=gce busybox df -h /data 16 | 17 | ``` 18 | 19 | #### Creating a simple Load Balancer with a static IP 20 | 21 | ```sh 22 | docker run -d --label gce.lb.address=104.197.200.230 --label gce.lb.type=static -p 80:80 tutum/hello-world 23 | ``` 24 | 25 | 26 | Installing 27 | ---------- 28 | The recommended way to install `gce-docker` is use the provided docker image. 29 | 30 | Run the driver using the following command: 31 | ```sh 32 | docker run -d -v /:/rootfs -v /run/docker/plugins:/run/docker/plugins -v /var/run/docker.sock:/var/run/docker.sock --privileged mcuadros/gce-docker 33 | ``` 34 | 35 | `privileged` is required since `gce-docker` needs low level access to the host mount namespace, the driver mounts, umounts and format disk. 36 | 37 | > The instance requires `Read/Write` privileges to Google Compute Engine and IP forwarding flags should be active to. 38 | 39 | Usage 40 | ----- 41 | 42 | ### Persistent Disks 43 | #### Persistent disk creation 44 | 45 | Using `docker volume create` a new disk is created. 46 | ```sh 47 | docker volume create --driver=gce --name my-disk -o SizeGb=90 48 | ``` 49 | 50 | Options: 51 | - __Type__ (_optional, default:pd-ssd_, options: `pd-ssd` or `pd-standard`): Disk type to use to create the disk. 52 | - __SizeGb__ (optional): Size of the persistent disk, specified in GB. 53 | - __SourceSnapshot__ (optional): The source snapshot used to create this disk. 54 | - __SourceImaget__ (optional): The source image used to create this disk. 55 | 56 | 57 | #### Using a disk on your container 58 | 59 | Just add the flags `--volume-driver=gce` and the `-v :/data` to any docker run command: 60 | 61 | ```sh 62 | docker run -ti -v my-disk:/data --volume-driver=gce busybox sh 63 | ``` 64 | 65 | If the disk already exists will be used, if not a new one with the default values will be created (Standard/500GB) 66 | 67 | The disk is attached to the instance, if the disk is not formatted also is formatted with `ext4`, when the container stops, the disk is unmounted and detached. 68 | 69 | 70 | 71 | ### Load Balancer 72 | The load balancers, are handle by a watcher, waiting for Docker events, the watched events are `start` and `die`. When a new containeris created or destroyed, the LoadBalancer and all the others dependant resources are created or deleted too. 73 | 74 | This is a small example create a LoadBalancer for a web server: 75 | ```sh 76 | docker run -d --label gce.lb.type=ephemeral -p 80:80 tutum/hello-world 77 | ``` 78 | 79 | Available labels: 80 | - __gce.lb.type__ (options: `ephemeral` or `static`): Type of IP to be used in the new load balancer 81 | - __gce.lb.group__ (optional): Name of group of instances to assign to the same load balancer. If not provided a combination of instance name and container id will be used. 82 | - __gce.lb.address__ (optional, required with type `static`): Value of the reserved IP address that the forwarding rule is serving on behalf of. The IP address or the IP name. 83 | - __gce.lb.source.ranges__ (optional): The IP address blocks that this load balancer applies to expressed in CIDR format. One or both of sourceRanges and sourceTags may be set. 84 | - __gce.lb.source.tags__ (optional):A list of instance tags which this rule applies to. One or both of sourceRanges and sourceTags may be set. 85 | - __gce.lb.session.affinity__ (optional): Sesssion affinity option, must be one of the following values: 86 | - `NONE`: Connections from the same client IP may go to any instance in the pool. 87 | - `CLIENT_IP`: Connections from the same client IP will go to the same instance in the pool while that instance remains healthy. 88 | - `CLIENT_IP_PROTO`: Connections from the same client IP with the same IP protocol will go to the same instance in the pool while that instance remains healthy. 89 | 90 | 91 | 92 | 93 | License 94 | ------- 95 | 96 | MIT, see [LICENSE](LICENSE) 97 | -------------------------------------------------------------------------------- /commands/root.go: -------------------------------------------------------------------------------- 1 | package commands 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | "os" 7 | 8 | "golang.org/x/net/context" 9 | "golang.org/x/oauth2/google" 10 | 11 | "google.golang.org/api/compute/v1" 12 | "google.golang.org/cloud/compute/metadata" 13 | 14 | "gopkg.in/inconshreveable/log15.v2" 15 | 16 | "github.com/docker/go-plugins-helpers/volume" 17 | "github.com/fsouza/go-dockerclient" 18 | "github.com/mcuadros/gce-docker/plugin" 19 | "github.com/mcuadros/gce-docker/watcher" 20 | "github.com/spf13/cobra" 21 | ) 22 | 23 | type RootCommand struct { 24 | LogLevel string 25 | LogFile string 26 | 27 | project string 28 | zone string 29 | instance string 30 | client *http.Client 31 | } 32 | 33 | func NewRootCommand() *RootCommand { 34 | return &RootCommand{} 35 | } 36 | 37 | func (c *RootCommand) Command() *cobra.Command { 38 | cmd := &cobra.Command{ 39 | Use: "gce-docker", 40 | Short: "gce-docker - Google Cloud Engine integration for Docker", 41 | RunE: c.Execute, 42 | } 43 | 44 | cmd.Flags().StringVar(&c.LogFile, "log-file", "", "log file") 45 | cmd.Flags().StringVar(&c.LogLevel, "log-level", "info", "max log level enabled") 46 | return cmd 47 | } 48 | 49 | func (c *RootCommand) Execute(cmd *cobra.Command, args []string) error { 50 | if err := c.checkGCE(); err != nil { 51 | return err 52 | } 53 | 54 | if err := c.loadMetadataInfo(); err != nil { 55 | return err 56 | } 57 | 58 | if err := c.setupLogging(); err != nil { 59 | return err 60 | } 61 | 62 | if err := c.buildComputeClient(); err != nil { 63 | return err 64 | } 65 | 66 | go func() { 67 | if err := c.runWatcher(); err != nil { 68 | log15.Crit(err.Error()) 69 | } 70 | }() 71 | 72 | go func() { 73 | if err := c.runVolumePlugin(); err != nil { 74 | log15.Crit(err.Error()) 75 | } 76 | }() 77 | 78 | select {} 79 | return nil 80 | } 81 | 82 | func (c *RootCommand) checkGCE() error { 83 | if !metadata.OnGCE() { 84 | return fmt.Errorf("gce-docker driver only runs on Google Compute Engine") 85 | } 86 | 87 | return nil 88 | } 89 | 90 | func (c *RootCommand) loadMetadataInfo() error { 91 | var err error 92 | c.instance, err = metadata.InstanceName() 93 | if err != nil { 94 | return fmt.Errorf("error retrieving instance name: %s", err) 95 | } 96 | 97 | c.zone, err = metadata.Zone() 98 | if err != nil { 99 | return fmt.Errorf("error retrieving zone: %s", err) 100 | } 101 | 102 | c.project, err = metadata.ProjectID() 103 | if err != nil { 104 | return fmt.Errorf("error retrieving project: %s", err) 105 | } 106 | 107 | return nil 108 | } 109 | 110 | func (c *RootCommand) setupLogging() error { 111 | lvl, err := log15.LvlFromString(c.LogLevel) 112 | if err != nil { 113 | return fmt.Errorf("unknown log level name %q", c.LogLevel) 114 | } 115 | 116 | handler := log15.StdoutHandler 117 | format := log15.LogfmtFormat() 118 | 119 | if c.LogFile != "" { 120 | handler = log15.MultiHandler(handler, log15.Must.FileHandler(c.LogFile, format)) 121 | } 122 | 123 | handler = log15.LvlFilterHandler(lvl, handler) 124 | 125 | if lvl == log15.LvlDebug { 126 | handler = log15.CallerFileHandler(log15.LvlFilterHandler(lvl, handler)) 127 | } 128 | 129 | log15.Root().SetHandler(handler) 130 | return nil 131 | } 132 | 133 | func (c *RootCommand) buildComputeClient() error { 134 | ctx := context.Background() 135 | 136 | var err error 137 | c.client, err = google.DefaultClient(ctx, compute.ComputeScope) 138 | if err != nil { 139 | return fmt.Errorf("error building compute client: %s", err) 140 | } 141 | 142 | return nil 143 | } 144 | 145 | func (c *RootCommand) runWatcher() error { 146 | log15.Info("starting watcher", "project", c.project, "zone", c.zone, "instance", c.instance) 147 | d, err := docker.NewClientFromEnv() 148 | if err != nil { 149 | return fmt.Errorf("error creating docker client: %s", err) 150 | } 151 | 152 | w, err := watcher.NewWatcher(d, c.client, c.project, c.zone, c.instance) 153 | if err != nil { 154 | return fmt.Errorf("error creating watcher: %s", err) 155 | } 156 | 157 | if err := w.Watch(); err != nil { 158 | return fmt.Errorf("error starting watcher: %s", err) 159 | } 160 | 161 | return nil 162 | } 163 | 164 | func (c *RootCommand) runVolumePlugin() error { 165 | log15.Info("starting volume driver", "project", c.project, "zone", c.zone, "instance", c.instance) 166 | d, err := plugin.NewVolume(c.client, c.project, c.zone, c.instance) 167 | if err != nil { 168 | return fmt.Errorf("error creating volume plugin: %s", err) 169 | } 170 | 171 | h := volume.NewHandler(d) 172 | if err := h.ServeUnix("docker", "gce"); err != nil { 173 | return fmt.Errorf("error starting volume driver server: %s", err) 174 | } 175 | 176 | return nil 177 | } 178 | 179 | var RootCmd = NewRootCommand().Command() 180 | 181 | func Execute() { 182 | if err := RootCmd.Execute(); err != nil { 183 | fmt.Println(err) 184 | os.Exit(-1) 185 | } 186 | } 187 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | 7 | "github.com/mcuadros/gce-docker/commands" 8 | ) 9 | 10 | func main() { 11 | if err := commands.RootCmd.Execute(); err != nil { 12 | fmt.Println(err) 13 | os.Exit(-1) 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /plugin/common_test.go: -------------------------------------------------------------------------------- 1 | package plugin 2 | 3 | import ( 4 | "testing" 5 | 6 | . "gopkg.in/check.v1" 7 | ) 8 | 9 | func Test(t *testing.T) { TestingT(t) } 10 | -------------------------------------------------------------------------------- /plugin/filesystem.go: -------------------------------------------------------------------------------- 1 | package plugin 2 | 3 | import ( 4 | "fmt" 5 | "io/ioutil" 6 | "os/exec" 7 | "strings" 8 | 9 | "github.com/spf13/afero" 10 | "gopkg.in/inconshreveable/log15.v2" 11 | ) 12 | 13 | var ( 14 | DefaultFStype = "ext4" 15 | DefaultMountOptions = []string{"discard", "defaults"} 16 | HostFilesystem = "/rootfs/" 17 | MountNamespace = "/rootfs/proc/1/ns/mnt" 18 | CGroupFilename = "/proc/1/cgroup" 19 | ) 20 | 21 | type Filesystem interface { 22 | afero.Fs 23 | Mount(source string, target string) error 24 | Unmount(target string) error 25 | Format(source string) error 26 | } 27 | 28 | type OSFilesystem struct { 29 | inContainer bool 30 | afero.Fs 31 | } 32 | 33 | func NewFilesystem() Filesystem { 34 | fs := afero.NewOsFs() 35 | 36 | inContainer := inContainer() 37 | 38 | if inContainer { 39 | log15.Info("running inside of container") 40 | fs = afero.NewBasePathFs(fs, HostFilesystem) 41 | } 42 | 43 | return &OSFilesystem{inContainer: inContainer, Fs: fs} 44 | } 45 | 46 | var nsenterArgs = []string{ 47 | "nsenter", 48 | fmt.Sprintf("--mount=%s", MountNamespace), 49 | "--", 50 | } 51 | 52 | func (fs *OSFilesystem) Mount(source string, target string) error { 53 | args := fs.getMountArgs(source, target, DefaultFStype, DefaultMountOptions) 54 | 55 | command := exec.Command(args[0], args[1:]...) 56 | output, err := command.CombinedOutput() 57 | if err != nil { 58 | return fmt.Errorf( 59 | "mount failed, arguments: %q\noutput: %s\n", 60 | args, string(output), 61 | ) 62 | } 63 | 64 | return err 65 | } 66 | 67 | func (fs *OSFilesystem) getMountArgs(source, target, fstype string, options []string) []string { 68 | var args []string 69 | args = append(args, "mount") 70 | 71 | if len(fstype) > 0 { 72 | args = append(args, "-t", fstype) 73 | } 74 | 75 | if len(options) > 0 { 76 | args = append(args, "-o", strings.Join(options, ",")) 77 | } 78 | 79 | args = append(args, source) 80 | args = append(args, target) 81 | 82 | if fs.inContainer { 83 | return append(nsenterArgs, args...) 84 | } 85 | 86 | return args 87 | } 88 | 89 | func (fs *OSFilesystem) Unmount(target string) error { 90 | args := fs.getUnmountArgs(target) 91 | 92 | command := exec.Command(args[0], args[1:]...) 93 | output, err := command.CombinedOutput() 94 | if err != nil { 95 | return fmt.Errorf( 96 | "unmount failed, arguments: %q\noutput: %s\n", 97 | args, string(output), 98 | ) 99 | } 100 | 101 | return nil 102 | } 103 | 104 | func (fs *OSFilesystem) getUnmountArgs(target string) []string { 105 | var args []string 106 | args = append(args, "umount", target) 107 | 108 | if fs.inContainer { 109 | return append(nsenterArgs, args...) 110 | } 111 | 112 | return args 113 | } 114 | 115 | func (fs *OSFilesystem) Format(source string) error { 116 | if fs.isFormatted(source) { 117 | return nil 118 | } 119 | 120 | args := fs.getMkfsExt4Args(source) 121 | command := exec.Command(args[0], args[1:]...) 122 | output, err := command.CombinedOutput() 123 | if err != nil { 124 | return fmt.Errorf( 125 | "mkfs.ext4 failed, arguments: %q\noutput: %s\n", 126 | args, string(output), 127 | ) 128 | } 129 | 130 | return nil 131 | } 132 | 133 | func (fs *OSFilesystem) getMkfsExt4Args(source string) []string { 134 | var args []string 135 | args = append(args, "mkfs.ext4", source) 136 | 137 | if fs.inContainer { 138 | return append(nsenterArgs, args...) 139 | } 140 | 141 | return args 142 | } 143 | 144 | func (fs *OSFilesystem) isFormatted(source string) bool { 145 | args := fs.getBlkidArgs(source) 146 | 147 | command := exec.Command(args[0], args[1:]...) 148 | _, err := command.CombinedOutput() 149 | if err != nil { 150 | return false 151 | } 152 | 153 | return true 154 | } 155 | 156 | func (fs *OSFilesystem) getBlkidArgs(source string) []string { 157 | var args []string 158 | args = append(args, "blkid", source) 159 | 160 | if fs.inContainer { 161 | return append(nsenterArgs, args...) 162 | } 163 | 164 | return args 165 | } 166 | 167 | func inContainer() bool { 168 | content, err := ioutil.ReadFile(CGroupFilename) 169 | if err != nil { 170 | return false 171 | } 172 | 173 | for _, l := range strings.Split(string(content), "\n") { 174 | p := strings.Split(l, ":") 175 | if len(p) != 3 { 176 | continue 177 | } 178 | 179 | if strings.TrimSpace(p[2]) != "/" { 180 | return true 181 | } 182 | } 183 | 184 | return false 185 | } 186 | -------------------------------------------------------------------------------- /plugin/volume.go: -------------------------------------------------------------------------------- 1 | package plugin 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | "os" 7 | "strconv" 8 | "time" 9 | 10 | "github.com/mcuadros/gce-docker/providers" 11 | 12 | "github.com/docker/go-plugins-helpers/volume" 13 | "gopkg.in/inconshreveable/log15.v2" 14 | ) 15 | 16 | var WaitStatusTimeout = 100 * time.Second 17 | 18 | type Volume struct { 19 | Root string 20 | p providers.DiskProvider 21 | fs Filesystem 22 | } 23 | 24 | func NewVolume(c *http.Client, project, zone, instance string) (*Volume, error) { 25 | p, err := providers.NewDisk(c, project, zone, instance) 26 | if err != nil { 27 | return nil, err 28 | } 29 | 30 | return &Volume{ 31 | Root: "/mnt/", 32 | p: p, 33 | fs: NewFilesystem(), 34 | }, nil 35 | } 36 | 37 | func (v *Volume) Create(r volume.Request) volume.Response { 38 | log15.Debug("create request received", "name", r.Name) 39 | start := time.Now() 40 | config, err := v.createDiskConfig(r) 41 | if err != nil { 42 | return buildReponseError(err) 43 | } 44 | 45 | if err := v.p.Create(config); err != nil { 46 | return buildReponseError(err) 47 | } 48 | 49 | log15.Info("disk created", "disk", r.Name, "elapsed", time.Since(start)) 50 | return volume.Response{} 51 | } 52 | 53 | func (v *Volume) List(volume.Request) volume.Response { 54 | log15.Debug("list request received") 55 | disks, err := v.p.List() 56 | if err != nil { 57 | return buildReponseError(err) 58 | } 59 | 60 | r := volume.Response{} 61 | for _, d := range disks { 62 | if d.Status != "READY" { 63 | continue 64 | } 65 | 66 | r.Volumes = append(r.Volumes, &volume.Volume{ 67 | Name: d.Name, 68 | }) 69 | } 70 | 71 | return r 72 | } 73 | 74 | func (v *Volume) Capabilities(volume.Request) volume.Response { 75 | log15.Debug("capabilities request received") 76 | return volume.Response{ 77 | Capabilities: volume.Capability{Scope: "local"}, 78 | } 79 | } 80 | 81 | func (v *Volume) Get(r volume.Request) volume.Response { 82 | log15.Debug("get request received") 83 | disks, err := v.p.List() 84 | if err != nil { 85 | return buildReponseError(err) 86 | } 87 | 88 | resp := volume.Response{} 89 | for _, d := range disks { 90 | if d.Name != r.Name { 91 | continue 92 | } 93 | 94 | config, err := v.createDiskConfig(r) 95 | if err != nil { 96 | return buildReponseError(err) 97 | } 98 | 99 | resp.Volume = &volume.Volume{ 100 | Name: d.Name, 101 | Mountpoint: config.MountPoint(v.Root), 102 | } 103 | } 104 | 105 | return resp 106 | } 107 | 108 | func (v *Volume) Remove(r volume.Request) volume.Response { 109 | log15.Debug("remove request received", "name", r.Name) 110 | start := time.Now() 111 | 112 | config, err := v.createDiskConfig(r) 113 | if err != nil { 114 | return buildReponseError(err) 115 | } 116 | 117 | if err := v.p.Delete(config); err != nil { 118 | return buildReponseError(err) 119 | } 120 | 121 | log15.Info("disk removed", "disk", r.Name, "elapsed", time.Since(start)) 122 | return volume.Response{} 123 | } 124 | 125 | func (v *Volume) Path(r volume.Request) volume.Response { 126 | config, err := v.createDiskConfig(r) 127 | if err != nil { 128 | return buildReponseError(err) 129 | } 130 | 131 | mnt := config.MountPoint(v.Root) 132 | log15.Debug("path request received", "name", r.Name, "mnt", mnt) 133 | 134 | if err := v.createMountPoint(config); err != nil { 135 | return buildReponseError(err) 136 | } 137 | 138 | return volume.Response{Mountpoint: mnt} 139 | } 140 | 141 | func (v *Volume) Mount(r volume.Request) volume.Response { 142 | log15.Debug("mount request received", "name", r.Name) 143 | start := time.Now() 144 | 145 | config, err := v.createDiskConfig(r) 146 | if err != nil { 147 | return buildReponseError(err) 148 | } 149 | 150 | if err := v.createMountPoint(config); err != nil { 151 | return buildReponseError(err) 152 | } 153 | 154 | if err := v.p.Attach(config); err != nil { 155 | return buildReponseError(err) 156 | } 157 | 158 | if err := v.fs.Format(config.Dev()); err != nil { 159 | return buildReponseError(err) 160 | } 161 | 162 | if err := v.fs.Mount(config.Dev(), config.MountPoint(v.Root)); err != nil { 163 | return buildReponseError(err) 164 | } 165 | 166 | log15.Info("disk mounted", "disk", r.Name, "elapsed", time.Since(start)) 167 | return volume.Response{ 168 | Mountpoint: config.MountPoint(v.Root), 169 | } 170 | } 171 | 172 | func (v *Volume) createMountPoint(c *providers.DiskConfig) error { 173 | target := c.MountPoint(v.Root) 174 | fi, err := v.fs.Stat(target) 175 | if os.IsNotExist(err) { 176 | return v.fs.MkdirAll(target, 0755) 177 | } 178 | 179 | if err != nil { 180 | return err 181 | } 182 | 183 | if !fi.IsDir() { 184 | return fmt.Errorf("error the mountpoint %q already exists", target) 185 | } 186 | 187 | return nil 188 | } 189 | 190 | func (v *Volume) Unmount(r volume.Request) volume.Response { 191 | log15.Debug("unmount request received", "name", r.Name) 192 | start := time.Now() 193 | config, err := v.createDiskConfig(r) 194 | if err != nil { 195 | return buildReponseError(err) 196 | } 197 | 198 | if err := v.fs.Unmount(config.MountPoint(v.Root)); err != nil { 199 | return buildReponseError(err) 200 | } 201 | 202 | if err := v.p.Detach(config); err != nil { 203 | return buildReponseError(err) 204 | } 205 | 206 | log15.Info("disk unmounted", "disk", r.Name, "elapsed", time.Since(start)) 207 | return volume.Response{} 208 | } 209 | 210 | func (v *Volume) createDiskConfig(r volume.Request) (*providers.DiskConfig, error) { 211 | config := &providers.DiskConfig{Name: r.Name} 212 | 213 | for key, value := range r.Options { 214 | switch key { 215 | case "Name": 216 | config.Name = value 217 | case "Type": 218 | config.Type = value 219 | case "SizeGb": 220 | var err error 221 | config.SizeGb, err = strconv.ParseInt(value, 10, 64) 222 | if err != nil { 223 | return nil, err 224 | } 225 | case "SourceSnapshot": 226 | config.SourceSnapshot = value 227 | case "SourceImage": 228 | config.SourceImage = value 229 | default: 230 | return nil, fmt.Errorf("unknown option %q", key) 231 | } 232 | } 233 | 234 | return config, config.Validate() 235 | } 236 | 237 | func buildReponseError(err error) volume.Response { 238 | log15.Error("request failed", "error", err.Error()) 239 | return volume.Response{Err: err.Error()} 240 | } 241 | -------------------------------------------------------------------------------- /plugin/volume_test.go: -------------------------------------------------------------------------------- 1 | package plugin 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | "github.com/docker/go-plugins-helpers/volume" 8 | "github.com/mcuadros/gce-docker/providers" 9 | "github.com/spf13/afero" 10 | "google.golang.org/api/compute/v1" 11 | . "gopkg.in/check.v1" 12 | ) 13 | 14 | const TimeoutAfterUnmount = 15 * time.Second 15 | 16 | type VolumeSuite struct { 17 | v *Volume 18 | fs *MemFilesystem 19 | p *DiskProviderFixture 20 | } 21 | 22 | var _ = Suite(&VolumeSuite{}) 23 | 24 | func (s *VolumeSuite) SetUpTest(c *C) { 25 | s.fs = NewMemFilesystem() 26 | s.p = NewDiskProviderFixture() 27 | s.v = &Volume{p: s.p, fs: s.fs, Root: "/mnt/"} 28 | } 29 | 30 | func (s *VolumeSuite) TestCreateDiskConfig(c *C) { 31 | config, err := s.v.createDiskConfig(volume.Request{Name: "foo"}) 32 | c.Assert(err, IsNil) 33 | c.Assert(config.Name, Equals, "foo") 34 | 35 | config, err = s.v.createDiskConfig(volume.Request{ 36 | Name: "foo", 37 | Options: map[string]string{"SizeGb": "42"}, 38 | }) 39 | c.Assert(err, IsNil) 40 | c.Assert(config.SizeGb, Equals, int64(42)) 41 | 42 | config, err = s.v.createDiskConfig(volume.Request{ 43 | Name: "foo", 44 | Options: map[string]string{"Type": "foo"}, 45 | }) 46 | c.Assert(err, IsNil) 47 | c.Assert(config.Type, Equals, "foo") 48 | 49 | config, err = s.v.createDiskConfig(volume.Request{ 50 | Name: "foo", 51 | Options: map[string]string{"SourceSnapshot": "foo"}, 52 | }) 53 | c.Assert(err, IsNil) 54 | c.Assert(config.SourceSnapshot, Equals, "foo") 55 | 56 | config, err = s.v.createDiskConfig(volume.Request{ 57 | Name: "foo", 58 | Options: map[string]string{"SourceImage": "foo"}, 59 | }) 60 | c.Assert(err, IsNil) 61 | c.Assert(config.SourceImage, Equals, "foo") 62 | } 63 | 64 | func (s *VolumeSuite) TestCreate(c *C) { 65 | r := s.v.Create(volume.Request{Name: "foo"}) 66 | c.Assert(r.Err, HasLen, 0) 67 | 68 | c.Assert(s.p.disks, HasLen, 1) 69 | c.Assert(s.p.disks["foo"], Equals, true) 70 | } 71 | 72 | func (s *VolumeSuite) TestList(c *C) { 73 | r := s.v.Create(volume.Request{Name: "foo"}) 74 | c.Assert(r.Err, HasLen, 0) 75 | 76 | r = s.v.List(volume.Request{}) 77 | c.Assert(r.Err, HasLen, 0) 78 | c.Assert(r.Volumes, HasLen, 1) 79 | c.Assert(r.Volumes[0].Name, Equals, "foo") 80 | } 81 | 82 | func (s *VolumeSuite) TestRemove(c *C) { 83 | r := s.v.Create(volume.Request{Name: "foo"}) 84 | c.Assert(r.Err, HasLen, 0) 85 | 86 | r = s.v.Remove(volume.Request{Name: "foo"}) 87 | c.Assert(r.Err, HasLen, 0) 88 | 89 | c.Assert(s.p.disks, HasLen, 0) 90 | } 91 | 92 | func (s *VolumeSuite) TestPath(c *C) { 93 | r := s.v.Path(volume.Request{Name: "foo"}) 94 | c.Assert(r.Err, HasLen, 0) 95 | c.Assert(r.Mountpoint, Equals, "/mnt/foo") 96 | 97 | fs, err := s.fs.Stat(r.Mountpoint) 98 | c.Assert(err, IsNil) 99 | c.Assert(fs.IsDir(), Equals, true) 100 | } 101 | 102 | func (s *VolumeSuite) TestMount(c *C) { 103 | r := s.v.Create(volume.Request{Name: "foo"}) 104 | c.Assert(r.Err, HasLen, 0) 105 | 106 | r = s.v.Mount(volume.Request{Name: "foo"}) 107 | c.Assert(r.Err, HasLen, 0) 108 | c.Assert(r.Mountpoint, Equals, "/mnt/foo") 109 | 110 | fs, err := s.fs.Stat(r.Mountpoint) 111 | c.Assert(err, IsNil) 112 | c.Assert(fs.IsDir(), Equals, true) 113 | 114 | c.Assert(s.p.attached, HasLen, 1) 115 | c.Assert(s.p.attached["foo"], Equals, true) 116 | c.Assert(s.fs.Mounted["/mnt/foo"], Equals, "/dev/disk/by-id/google-docker-volume-foo") 117 | } 118 | 119 | func (s *VolumeSuite) TestUnmount(c *C) { 120 | r := s.v.Create(volume.Request{Name: "foo"}) 121 | c.Assert(r.Err, HasLen, 0) 122 | 123 | r = s.v.Mount(volume.Request{Name: "foo"}) 124 | c.Assert(r.Err, HasLen, 0) 125 | c.Assert(r.Mountpoint, Equals, "/mnt/foo") 126 | 127 | r = s.v.Unmount(volume.Request{Name: "foo"}) 128 | c.Assert(r.Err, HasLen, 0) 129 | 130 | c.Assert(s.p.attached, HasLen, 0) 131 | c.Assert(s.fs.Mounted["/mnt/foo"], Equals, "") 132 | } 133 | 134 | type DiskProviderFixture struct { 135 | disks map[string]bool 136 | attached map[string]bool 137 | } 138 | 139 | func NewDiskProviderFixture() *DiskProviderFixture { 140 | return &DiskProviderFixture{ 141 | disks: make(map[string]bool, 0), 142 | attached: make(map[string]bool, 0), 143 | } 144 | } 145 | 146 | func (d *DiskProviderFixture) Create(c *providers.DiskConfig) error { 147 | d.disks[c.Name] = true 148 | return nil 149 | } 150 | 151 | func (d *DiskProviderFixture) Attach(c *providers.DiskConfig) error { 152 | if _, ok := d.disks[c.Name]; !ok { 153 | return fmt.Errorf("unable to find disk %s", c.Name) 154 | } 155 | 156 | d.attached[c.Name] = true 157 | return nil 158 | } 159 | 160 | func (d *DiskProviderFixture) Detach(c *providers.DiskConfig) error { 161 | delete(d.attached, c.Name) 162 | return nil 163 | } 164 | 165 | func (d *DiskProviderFixture) Delete(c *providers.DiskConfig) error { 166 | delete(d.disks, c.Name) 167 | return nil 168 | } 169 | 170 | func (d *DiskProviderFixture) List() ([]*compute.Disk, error) { 171 | var l []*compute.Disk 172 | for name, _ := range d.disks { 173 | l = append(l, &compute.Disk{Name: name, Status: "READY"}) 174 | } 175 | 176 | l = append(l, &compute.Disk{Name: "no-ready", Status: "PENDING"}) 177 | return l, nil 178 | } 179 | 180 | type MemFilesystem struct { 181 | Mounted map[string]string 182 | Formatted map[string]string 183 | afero.Fs 184 | } 185 | 186 | func NewMemFilesystem() *MemFilesystem { 187 | return &MemFilesystem{ 188 | Mounted: make(map[string]string, 0), 189 | Formatted: make(map[string]string, 0), 190 | 191 | Fs: afero.NewMemMapFs(), 192 | } 193 | } 194 | 195 | func (fs *MemFilesystem) Mount(source string, target string) error { 196 | fs.Mounted[target] = source 197 | return nil 198 | } 199 | 200 | func (fs *MemFilesystem) Unmount(target string) error { 201 | fs.Mounted[target] = "" 202 | return nil 203 | } 204 | 205 | func (fs *MemFilesystem) Format(source string) error { 206 | fs.Formatted[source] = "ext4" 207 | return nil 208 | } 209 | -------------------------------------------------------------------------------- /providers/client.go: -------------------------------------------------------------------------------- 1 | package providers 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | "strings" 7 | "time" 8 | 9 | "google.golang.org/api/compute/v1" 10 | "google.golang.org/api/googleapi" 11 | "gopkg.in/inconshreveable/log15.v2" 12 | ) 13 | 14 | const MaxWaitDuration = time.Minute 15 | 16 | type Client struct { 17 | s *compute.Service 18 | zone string 19 | region string 20 | project string 21 | instance string 22 | } 23 | 24 | func NewClient(c *http.Client, project, zone, instance string) (*Client, error) { 25 | s, err := compute.New(c) 26 | if err != nil { 27 | return nil, err 28 | } 29 | 30 | client := &Client{ 31 | s: s, 32 | project: project, 33 | zone: zone, 34 | instance: instance, 35 | } 36 | 37 | return client, client.loadRegion() 38 | } 39 | 40 | func (c *Client) loadRegion() error { 41 | z, err := c.s.Zones.Get(c.project, c.zone).Do() 42 | if err != nil { 43 | return fmt.Errorf("error retrieving region from zone: %s", err) 44 | } 45 | 46 | region := strings.Split(z.Region, "/") 47 | c.region = region[len(region)-1] 48 | return nil 49 | } 50 | 51 | func (c *Client) WaitDone(op *compute.Operation) error { 52 | var doer func(...googleapi.CallOption) (*compute.Operation, error) 53 | switch { 54 | case op.Region != "": 55 | doer = c.s.RegionOperations.Get(c.project, c.region, op.Name).Do 56 | case op.Zone != "": 57 | doer = c.s.ZoneOperations.Get(c.project, c.zone, op.Name).Do 58 | default: 59 | doer = c.s.GlobalOperations.Get(c.project, op.Name).Do 60 | } 61 | 62 | start := time.Now() 63 | ticker := time.Tick(1 * time.Second) 64 | for range ticker { 65 | rop, err := doer() 66 | if err != nil { 67 | log15.Error("error waiting for operation %q: %s", "name", op.Name, err) 68 | continue 69 | } 70 | 71 | if rop.Status == "DONE" { 72 | return nil 73 | } 74 | 75 | if time.Since(start) > MaxWaitDuration { 76 | return fmt.Errorf("max. time reached waiting for operation %q", op.Name) 77 | } 78 | } 79 | 80 | return nil 81 | } 82 | -------------------------------------------------------------------------------- /providers/common.go: -------------------------------------------------------------------------------- 1 | package providers 2 | 3 | import "fmt" 4 | 5 | func contains(haystack []string, needle string) bool { 6 | for _, e := range haystack { 7 | if e == needle { 8 | return true 9 | } 10 | } 11 | 12 | return false 13 | } 14 | 15 | func DiskURL(project, zone, disks string) string { 16 | return fmt.Sprintf( 17 | "https://www.googleapis.com/compute/v1/projects/%s/zones/%s/disks/%s", 18 | project, zone, disks, 19 | ) 20 | } 21 | 22 | func InstanceURL(project, zone, instance string) string { 23 | return fmt.Sprintf( 24 | "https://www.googleapis.com/compute/v1/projects/%s/zones/%s/instances/%s", 25 | project, zone, instance, 26 | ) 27 | } 28 | 29 | func TargetPoolURL(project, region, targetPool string) string { 30 | return fmt.Sprintf( 31 | "https://www.googleapis.com/compute/v1/projects/%s/regions/%s/targetPools/%s", 32 | project, region, targetPool, 33 | ) 34 | } 35 | 36 | func DiskTypeURL(project, zone, diskType string) string { 37 | if diskType == "" { 38 | diskType = "pd-standard" 39 | } 40 | 41 | return fmt.Sprintf( 42 | "https://www.googleapis.com/compute/v1/projects/%s/zones/%s/diskTypes/%s", 43 | project, zone, diskType, 44 | ) 45 | } 46 | -------------------------------------------------------------------------------- /providers/common_test.go: -------------------------------------------------------------------------------- 1 | package providers 2 | 3 | import ( 4 | "encoding/base64" 5 | "flag" 6 | "net/http" 7 | "os" 8 | "testing" 9 | "time" 10 | 11 | "golang.org/x/net/context" 12 | "golang.org/x/oauth2" 13 | "golang.org/x/oauth2/google" 14 | "google.golang.org/api/compute/v1" 15 | . "gopkg.in/check.v1" 16 | ) 17 | 18 | var integration = flag.Bool("integration", false, "Include integration tests") 19 | 20 | func Test(t *testing.T) { TestingT(t) } 21 | 22 | type CommonSuite struct{} 23 | 24 | var _ = Suite(&CommonSuite{}) 25 | 26 | type BaseSuite struct { 27 | key []byte 28 | project, zone, instance string 29 | 30 | c *http.Client 31 | } 32 | 33 | func (s *BaseSuite) SetUpSuite(c *C) { 34 | s.initEnviroment(c) 35 | } 36 | 37 | func (s *BaseSuite) SetUpTest(c *C) { 38 | ctx := context.Background() 39 | jwt, err := google.JWTConfigFromJSON(s.key, compute.ComputeScope) 40 | c.Assert(err, IsNil) 41 | 42 | s.c = oauth2.NewClient(ctx, jwt.TokenSource(ctx)) 43 | } 44 | 45 | func (s *BaseSuite) initEnviroment(c *C) { 46 | s.project = os.Getenv("GCP_DEFAULT_PROJECT") 47 | s.zone = os.Getenv("GCP_DEFAULT_ZONE") 48 | s.instance = os.Getenv("GCP_DEFAULT_INSTANCE") 49 | 50 | var err error 51 | s.key, err = base64.StdEncoding.DecodeString(os.Getenv("GCP_JSON_KEY")) 52 | c.Assert(err, IsNil) 53 | } 54 | 55 | func (s *BaseSuite) getRandomName() string { 56 | return time.Now().Format("20060102150405000000") 57 | } 58 | -------------------------------------------------------------------------------- /providers/config.go: -------------------------------------------------------------------------------- 1 | package providers 2 | 3 | import ( 4 | "crypto/md5" 5 | "encoding/hex" 6 | "fmt" 7 | "path/filepath" 8 | 9 | "github.com/fsouza/go-dockerclient" 10 | "google.golang.org/api/compute/v1" 11 | ) 12 | 13 | var ( 14 | NetworkBaseName = "docker-network-%s-%s" 15 | DiskDeviceNameBaseName = "docker-volume-%s" 16 | DiskDevBasePath = "/dev/disk/by-id/google-%s" 17 | ) 18 | 19 | type DiskConfig struct { 20 | Name string 21 | Type string 22 | SizeGb int64 23 | SourceSnapshot string 24 | SourceImage string 25 | } 26 | 27 | func (c *DiskConfig) Disk(project, zone string) *compute.Disk { 28 | return &compute.Disk{ 29 | Name: c.Name, 30 | Type: DiskTypeURL(project, zone, c.Type), 31 | SizeGb: c.SizeGb, 32 | SourceSnapshot: c.SourceSnapshot, 33 | SourceImage: c.SourceImage, 34 | } 35 | } 36 | 37 | func (c *DiskConfig) DeviceName() string { 38 | return fmt.Sprintf(DiskDeviceNameBaseName, c.Name) 39 | } 40 | 41 | func (c *DiskConfig) Dev() string { 42 | return fmt.Sprintf(DiskDevBasePath, c.DeviceName()) 43 | } 44 | 45 | func (c *DiskConfig) MountPoint(root string) string { 46 | return filepath.Join(root, c.Name) 47 | } 48 | 49 | func (c *DiskConfig) Validate() error { 50 | if c.Name == "" { 51 | return fmt.Errorf("invalid disk config, name field cannot be empty") 52 | } 53 | 54 | if c.SourceSnapshot != "" && c.SourceImage != "" { 55 | return fmt.Errorf("invalid dick config, source snapshot and source image can't be presents at the same time.") 56 | } 57 | 58 | return nil 59 | } 60 | 61 | type SessionAffinity string 62 | type NetworkConfig struct { 63 | GroupName string 64 | Container string 65 | Network string 66 | Address string 67 | Ports []docker.Port 68 | Source struct { 69 | Ranges []string 70 | Tags []string 71 | } 72 | SessionAffinity SessionAffinity 73 | } 74 | 75 | func (c *NetworkConfig) TargetPool(project, zone, instance string) *compute.TargetPool { 76 | return &compute.TargetPool{ 77 | Name: c.Name(instance), 78 | Instances: []string{InstanceURL(project, zone, instance)}, 79 | SessionAffinity: string(c.SessionAffinity), 80 | } 81 | } 82 | 83 | func (c *NetworkConfig) ForwardingRule(instance, targetPoolURL string) []*compute.ForwardingRule { 84 | var rules []*compute.ForwardingRule 85 | for _, p := range c.Ports { 86 | rules = append(rules, &compute.ForwardingRule{ 87 | Name: fmt.Sprintf("%s-%s-%s", c.Name(instance), p.Port(), p.Proto()), 88 | IPAddress: c.Address, 89 | IPProtocol: p.Proto(), 90 | PortRange: p.Port(), 91 | Target: targetPoolURL, 92 | }) 93 | } 94 | 95 | return rules 96 | } 97 | 98 | func (c *NetworkConfig) Firewall(instance string) *compute.Firewall { 99 | sourceRanges := c.Source.Ranges 100 | if len(c.Source.Ranges) == 0 && len(c.Source.Tags) == 0 { 101 | sourceRanges = []string{"0.0.0.0/0"} 102 | } 103 | 104 | network := c.Network 105 | if len(network) == 0 { 106 | network = "global/networks/default" 107 | } 108 | 109 | name := c.Name(instance) 110 | var allowed []*compute.FirewallAllowed 111 | for _, p := range c.Ports { 112 | allowed = append(allowed, &compute.FirewallAllowed{ 113 | IPProtocol: p.Proto(), 114 | Ports: []string{p.Port()}, 115 | }) 116 | } 117 | 118 | return &compute.Firewall{ 119 | Name: name, 120 | SourceRanges: sourceRanges, 121 | SourceTags: c.Source.Tags, 122 | TargetTags: []string{name}, 123 | Network: network, 124 | Allowed: allowed, 125 | } 126 | } 127 | 128 | func (c *NetworkConfig) Name(instance string) string { 129 | return fmt.Sprintf(NetworkBaseName, c.Group(instance), c.ID(instance)) 130 | } 131 | 132 | func (c *NetworkConfig) Group(instance string) string { 133 | if c.GroupName != "" { 134 | return c.GroupName 135 | } 136 | 137 | return fmt.Sprintf("%s-%s", instance, c.Container) 138 | } 139 | 140 | func (c *NetworkConfig) ID(instance string) string { 141 | var unique string 142 | unique += c.Group(instance) 143 | unique += c.Address 144 | for _, p := range c.Ports { 145 | unique += string(p) 146 | } 147 | 148 | hash := md5.Sum([]byte(unique)) 149 | return hex.EncodeToString(hash[:])[:8] 150 | } 151 | 152 | func (c *NetworkConfig) Validate() error { 153 | if c.Container == "" { 154 | return fmt.Errorf("invalid network config, container field cannot be empty") 155 | } 156 | 157 | if len(c.Ports) == 0 { 158 | return fmt.Errorf("invalid network config, ports field cannot be empty") 159 | } 160 | 161 | return nil 162 | } 163 | -------------------------------------------------------------------------------- /providers/config_test.go: -------------------------------------------------------------------------------- 1 | package providers 2 | 3 | import ( 4 | "github.com/fsouza/go-dockerclient" 5 | . "gopkg.in/check.v1" 6 | ) 7 | 8 | type ConfigSuite struct{} 9 | 10 | var _ = Suite(&ConfigSuite{}) 11 | 12 | func (s *ConfigSuite) TestNetworkConfigDisk(c *C) { 13 | config := &DiskConfig{ 14 | Name: "foo", 15 | Type: "qux", 16 | SizeGb: 42, 17 | SourceSnapshot: "bar", 18 | SourceImage: "baz", 19 | } 20 | 21 | d := config.Disk("project", "foo-c") 22 | c.Assert(d.Name, Equals, "foo") 23 | c.Assert(d.Type, Equals, "https://www.googleapis.com/compute/v1/projects/project/zones/foo-c/diskTypes/qux") 24 | c.Assert(d.SizeGb, Equals, int64(42)) 25 | c.Assert(d.SourceSnapshot, Equals, "bar") 26 | c.Assert(d.SourceImage, Equals, "baz") 27 | } 28 | 29 | func (s *ConfigSuite) TestNetworkConfigValidate(c *C) { 30 | config := &DiskConfig{} 31 | err := config.Validate() 32 | c.Assert(err, NotNil) 33 | 34 | config = &DiskConfig{Name: "foo"} 35 | err = config.Validate() 36 | c.Assert(err, IsNil) 37 | 38 | config = &DiskConfig{Name: "foo", SourceSnapshot: "foo", SourceImage: "foo"} 39 | err = config.Validate() 40 | c.Assert(err, NotNil) 41 | } 42 | 43 | func (s *ConfigSuite) TestNetworkConfigDeviceName(c *C) { 44 | config := &DiskConfig{Name: "foo"} 45 | c.Assert(config.DeviceName(), Equals, "docker-volume-foo") 46 | } 47 | 48 | func (s *ConfigSuite) TestNetworkConfigDev(c *C) { 49 | config := &DiskConfig{Name: "docker-volume-foo"} 50 | c.Assert(config.Dev(), Equals, "/dev/disk/by-id/google-docker-volume-docker-volume-foo") 51 | } 52 | 53 | func (s *ConfigSuite) TestNetworkConfigMountPoint(c *C) { 54 | config := &DiskConfig{Name: "foo"} 55 | c.Assert(config.MountPoint("/mnt/"), Equals, "/mnt/foo") 56 | } 57 | 58 | func (s *ConfigSuite) TestNetworkConfigGroup(c *C) { 59 | config := &NetworkConfig{Container: "bar"} 60 | c.Assert(config.Group("foo"), Equals, "foo-bar") 61 | 62 | config = &NetworkConfig{GroupName: "qux"} 63 | c.Assert(config.Group("foo"), Equals, "qux") 64 | } 65 | 66 | func (s *ConfigSuite) TestNetworkConfigID(c *C) { 67 | config := &NetworkConfig{ 68 | Container: "foo", 69 | Address: "qux", 70 | Ports: []docker.Port{docker.Port("baz/bar")}, 71 | } 72 | 73 | c.Assert(config.ID("42"), Equals, "97b1b51f") 74 | } 75 | 76 | func (s *ConfigSuite) TestNetworkConfigName(c *C) { 77 | config := &NetworkConfig{GroupName: "bar"} 78 | c.Assert(config.Name("foo"), Equals, "docker-network-bar-37b51d19") 79 | } 80 | 81 | func (s *ConfigSuite) TestNetworkConfigTargetPool(c *C) { 82 | config := &NetworkConfig{ 83 | Container: "bar", 84 | SessionAffinity: SessionAffinity("qux"), 85 | } 86 | 87 | tp := config.TargetPool("bar", "baz", "foo") 88 | c.Assert(tp.Name, Equals, "docker-network-foo-bar-e5f9ec04") 89 | c.Assert(tp.Instances, HasLen, 1) 90 | c.Assert(tp.Instances[0], Equals, "https://www.googleapis.com/compute/v1/projects/bar/zones/baz/instances/foo") 91 | c.Assert(tp.SessionAffinity, Equals, "qux") 92 | } 93 | -------------------------------------------------------------------------------- /providers/disk.go: -------------------------------------------------------------------------------- 1 | package providers 2 | 3 | import ( 4 | "net/http" 5 | 6 | "google.golang.org/api/compute/v1" 7 | "google.golang.org/api/googleapi" 8 | ) 9 | 10 | type DiskProvider interface { 11 | Create(c *DiskConfig) error 12 | Attach(c *DiskConfig) error 13 | Detach(c *DiskConfig) error 14 | Delete(c *DiskConfig) error 15 | List() ([]*compute.Disk, error) 16 | } 17 | 18 | type Disk struct { 19 | Client 20 | } 21 | 22 | func NewDisk(c *http.Client, project, zone, instance string) (*Disk, error) { 23 | client, err := NewClient(c, project, zone, instance) 24 | if err != nil { 25 | return nil, err 26 | } 27 | 28 | return &Disk{Client: *client}, nil 29 | } 30 | 31 | func (d *Disk) Create(c *DiskConfig) error { 32 | disk := c.Disk(d.project, d.zone) 33 | if _, err := d.s.Disks.Get(d.project, d.zone, disk.Name).Do(); err != nil { 34 | if apiErr, ok := err.(*googleapi.Error); !ok || apiErr.Code != 404 { 35 | return err 36 | } 37 | 38 | op, err := d.s.Disks.Insert(d.project, d.zone, disk).Do() 39 | if err != nil { 40 | return err 41 | } 42 | 43 | return d.WaitDone(op) 44 | } 45 | 46 | return nil 47 | } 48 | 49 | func (d *Disk) Attach(c *DiskConfig) error { 50 | ad := &compute.AttachedDisk{ 51 | Source: DiskURL(d.project, d.zone, c.Name), 52 | DeviceName: c.DeviceName(), 53 | } 54 | 55 | op, err := d.s.Instances.AttachDisk(d.project, d.zone, d.instance, ad).Do() 56 | if err != nil { 57 | return err 58 | } 59 | 60 | return d.WaitDone(op) 61 | } 62 | 63 | func (d *Disk) Detach(c *DiskConfig) error { 64 | op, err := d.s.Instances.DetachDisk(d.project, d.zone, d.instance, c.DeviceName()).Do() 65 | if err != nil { 66 | return err 67 | } 68 | 69 | return d.WaitDone(op) 70 | } 71 | 72 | func (d *Disk) Delete(c *DiskConfig) error { 73 | op, err := d.s.Disks.Delete(d.project, d.zone, c.Name).Do() 74 | if err != nil { 75 | return err 76 | } 77 | 78 | return d.WaitDone(op) 79 | } 80 | 81 | func (d *Disk) List() ([]*compute.Disk, error) { 82 | op, err := d.s.Disks.List(d.project, d.zone).Do() 83 | if err != nil { 84 | return nil, err 85 | } 86 | 87 | return op.Items, err 88 | } 89 | -------------------------------------------------------------------------------- /providers/disk_test.go: -------------------------------------------------------------------------------- 1 | package providers 2 | 3 | import . "gopkg.in/check.v1" 4 | 5 | type DiskSuite struct { 6 | BaseSuite 7 | } 8 | 9 | var _ = Suite(&DiskSuite{}) 10 | 11 | func (s *DiskSuite) TestCreate(c *C) { 12 | if !*integration { 13 | c.Skip("-integration not provided") 14 | } 15 | 16 | n, err := NewDisk(s.c, s.project, s.zone, s.instance) 17 | c.Assert(err, IsNil) 18 | 19 | config := &DiskConfig{ 20 | Name: "test", 21 | } 22 | 23 | err = n.Create(config) 24 | c.Assert(err, IsNil) 25 | 26 | err = n.Attach(config) 27 | c.Assert(err, IsNil) 28 | 29 | err = n.Detach(config) 30 | c.Assert(err, IsNil) 31 | 32 | err = n.Delete(config) 33 | c.Assert(err, IsNil) 34 | } 35 | -------------------------------------------------------------------------------- /providers/network.go: -------------------------------------------------------------------------------- 1 | package providers 2 | 3 | import ( 4 | "fmt" 5 | "net" 6 | "net/http" 7 | 8 | "google.golang.org/api/compute/v1" 9 | "google.golang.org/api/googleapi" 10 | ) 11 | 12 | type NetworkProvider interface { 13 | Create(c *DiskConfig) error 14 | Delete(c *DiskConfig) error 15 | } 16 | 17 | type Network struct { 18 | Client 19 | } 20 | 21 | func NewNetwork(c *http.Client, project, zone, instance string) (*Network, error) { 22 | client, err := NewClient(c, project, zone, instance) 23 | if err != nil { 24 | return nil, err 25 | } 26 | 27 | return &Network{Client: *client}, nil 28 | } 29 | 30 | func (n *Network) Create(c *NetworkConfig) error { 31 | if err := c.Validate(); err != nil { 32 | return err 33 | } 34 | 35 | if err := n.updateInstanceTags(c); err != nil { 36 | } 37 | 38 | if err := n.createOrUpdateTargetPool(c); err != nil { 39 | return fmt.Errorf("error creating/updating target pool: %s", err) 40 | } 41 | 42 | if err := n.createForwardingRules(c); err != nil { 43 | return fmt.Errorf("error creating forwarding rule: %s", err) 44 | } 45 | 46 | if err := n.createOrUpdateFirewall(c); err != nil { 47 | return fmt.Errorf("error creating firewall rule: %s", err) 48 | } 49 | 50 | return nil 51 | } 52 | 53 | func (n *Network) updateInstanceTags(c *NetworkConfig) error { 54 | i, err := n.s.Instances.Get(n.project, n.zone, n.instance).Do() 55 | if err != nil { 56 | return err 57 | } 58 | 59 | tag := c.Name(n.instance) 60 | if contains(i.Tags.Items, tag) { 61 | return nil 62 | } 63 | 64 | op, err := n.s.Instances.SetTags(n.project, n.zone, n.instance, &compute.Tags{ 65 | Items: append(i.Tags.Items, tag), 66 | Fingerprint: i.Tags.Fingerprint, 67 | }).Do() 68 | 69 | if err != nil { 70 | return err 71 | } 72 | 73 | return n.WaitDone(op) 74 | 75 | } 76 | 77 | func (n *Network) createOrUpdateTargetPool(c *NetworkConfig) error { 78 | new := c.TargetPool(n.project, n.zone, n.instance) 79 | old, err := n.s.TargetPools.Get(n.project, n.region, new.Name).Do() 80 | if err != nil { 81 | if apiErr, ok := err.(*googleapi.Error); !ok || apiErr.Code != 404 { 82 | return err 83 | } 84 | 85 | return n.createTargetPool(new) 86 | } 87 | 88 | return n.updateTargetPool(old, new) 89 | } 90 | 91 | func (n *Network) createTargetPool(pool *compute.TargetPool) error { 92 | op, err := n.s.TargetPools.Insert(n.project, n.region, pool).Do() 93 | if err != nil { 94 | return err 95 | } 96 | 97 | return n.WaitDone(op) 98 | } 99 | 100 | func (n *Network) updateTargetPool(old, new *compute.TargetPool) error { 101 | op, err := n.s.TargetPools.AddInstance(n.project, n.region, new.Name, &compute.TargetPoolsAddInstanceRequest{ 102 | Instances: []*compute.InstanceReference{{ 103 | Instance: InstanceURL(n.project, n.zone, n.instance), 104 | }}, 105 | }).Do() 106 | 107 | if err != nil { 108 | return err 109 | } 110 | 111 | return n.WaitDone(op) 112 | } 113 | 114 | func (n *Network) createForwardingRules(c *NetworkConfig) error { 115 | targetPoolURL := TargetPoolURL(n.project, n.region, c.Name(n.instance)) 116 | for _, rule := range c.ForwardingRule(n.instance, targetPoolURL) { 117 | if err := n.createForwardingRule(rule); err != nil { 118 | return err 119 | } 120 | } 121 | 122 | return nil 123 | } 124 | 125 | func (n *Network) createForwardingRule(rule *compute.ForwardingRule) error { 126 | if err := n.resolveForwardingRule(rule); err != nil { 127 | return err 128 | } 129 | 130 | _, err := n.s.ForwardingRules.Get(n.project, n.region, rule.Name).Do() 131 | if err == nil { 132 | return nil 133 | } 134 | 135 | if apiErr, ok := err.(*googleapi.Error); !ok || apiErr.Code != 404 { 136 | return err 137 | } 138 | 139 | op, err := n.s.ForwardingRules.Insert(n.project, n.region, rule).Do() 140 | if err != nil { 141 | return err 142 | } 143 | 144 | return n.WaitDone(op) 145 | } 146 | 147 | func (n *Network) resolveForwardingRule(rule *compute.ForwardingRule) error { 148 | test := net.ParseIP(rule.IPAddress) 149 | if test.To4() != nil { 150 | return nil 151 | } 152 | 153 | addr, err := n.s.Addresses.Get(n.project, n.region, rule.IPAddress).Do() 154 | if err != nil { 155 | return err 156 | } 157 | 158 | rule.IPAddress = addr.Address 159 | return nil 160 | } 161 | 162 | func (n *Network) createOrUpdateFirewall(c *NetworkConfig) error { 163 | rule := c.Firewall(n.instance) 164 | if _, err := n.s.Firewalls.Get(n.project, rule.Name).Do(); err != nil { 165 | if apiErr, ok := err.(*googleapi.Error); !ok || apiErr.Code != 404 { 166 | return err 167 | } 168 | 169 | op, err := n.s.Firewalls.Insert(n.project, rule).Do() 170 | if err != nil { 171 | return err 172 | } 173 | 174 | return n.WaitDone(op) 175 | } 176 | 177 | return nil 178 | } 179 | 180 | func (n *Network) Delete(c *NetworkConfig) error { 181 | if err := n.deleteFirewall(c); err != nil { 182 | return err 183 | } 184 | 185 | if err := n.deleteForwardingRules(c); err != nil { 186 | return err 187 | } 188 | 189 | if err := n.deleteTargetPool(c); err != nil { 190 | return err 191 | } 192 | 193 | return nil 194 | } 195 | 196 | func (n *Network) deleteFirewall(c *NetworkConfig) error { 197 | rule := c.Firewall(n.instance) 198 | op, err := n.s.Firewalls.Delete(n.project, rule.Name).Do() 199 | if err != nil { 200 | return err 201 | } 202 | 203 | return n.WaitDone(op) 204 | } 205 | 206 | func (n *Network) deleteForwardingRules(c *NetworkConfig) error { 207 | targetPoolURL := TargetPoolURL(n.project, n.region, c.Name(n.instance)) 208 | for _, rule := range c.ForwardingRule(n.instance, targetPoolURL) { 209 | if err := n.deleteForwardingRule(rule); err != nil { 210 | return err 211 | } 212 | } 213 | 214 | return nil 215 | } 216 | 217 | func (n *Network) deleteForwardingRule(rule *compute.ForwardingRule) error { 218 | op, err := n.s.ForwardingRules.Delete(n.project, n.region, rule.Name).Do() 219 | if err != nil { 220 | return err 221 | } 222 | 223 | return n.WaitDone(op) 224 | } 225 | 226 | func (n *Network) deleteTargetPool(c *NetworkConfig) error { 227 | pool := c.TargetPool(n.project, n.zone, n.instance) 228 | op, err := n.s.TargetPools.Delete(n.project, n.region, pool.Name).Do() 229 | if err != nil { 230 | return err 231 | } 232 | 233 | return n.WaitDone(op) 234 | } 235 | -------------------------------------------------------------------------------- /providers/network_test.go: -------------------------------------------------------------------------------- 1 | package providers 2 | 3 | import ( 4 | "github.com/fsouza/go-dockerclient" 5 | . "gopkg.in/check.v1" 6 | ) 7 | 8 | type NetworkSuite struct { 9 | BaseSuite 10 | } 11 | 12 | var _ = Suite(&NetworkSuite{}) 13 | 14 | func (s *NetworkSuite) TestCreate(c *C) { 15 | if !*integration { 16 | c.Skip("-integration not provided") 17 | } 18 | 19 | n, err := NewNetwork(s.c, s.project, s.zone, s.instance) 20 | c.Assert(err, IsNil) 21 | 22 | config := &NetworkConfig{ 23 | Container: "test", 24 | Ports: []docker.Port{ 25 | docker.Port("53/udp"), 26 | docker.Port("80/tcp"), 27 | docker.Port("443/tcp"), 28 | }, 29 | } 30 | 31 | err = n.Create(config) 32 | c.Assert(err, IsNil) 33 | 34 | err = n.Delete(config) 35 | c.Assert(err, IsNil) 36 | } 37 | -------------------------------------------------------------------------------- /watcher/watcher.go: -------------------------------------------------------------------------------- 1 | package watcher 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | "strings" 7 | "time" 8 | 9 | "gopkg.in/inconshreveable/log15.v2" 10 | 11 | "github.com/fsouza/go-dockerclient" 12 | "github.com/mcuadros/gce-docker/providers" 13 | ) 14 | 15 | var ( 16 | LabelNetworkPrefix = "gce." 17 | LabelNetworkType = LabelNetworkPrefix + "lb.type" 18 | LabelNetworkGroup = LabelNetworkPrefix + "lb.group" 19 | LabelNetworkAddress = LabelNetworkPrefix + "lb.address" 20 | LabelNetworkSourceRanges = LabelNetworkPrefix + "lb.source.ranges" 21 | LabelNetworkSourceTags = LabelNetworkPrefix + "lb.source.tags" 22 | LabelNetworkSessionAffinity = LabelNetworkPrefix + "lb.session.affinity" 23 | ) 24 | 25 | var validLabels = []string{ 26 | LabelNetworkType, LabelNetworkGroup, LabelNetworkAddress, 27 | LabelNetworkSourceRanges, LabelNetworkSourceTags, LabelNetworkSessionAffinity, 28 | } 29 | 30 | type Watcher struct { 31 | WatchedStatus map[string]bool 32 | WatchedLabelsPrefix string 33 | DefaultDelay time.Duration 34 | 35 | c *docker.Client 36 | p *providers.Network 37 | w *Worker 38 | listener chan *docker.APIEvents 39 | } 40 | 41 | func NewWatcher(d *docker.Client, c *http.Client, project, zone, instance string) (*Watcher, error) { 42 | p, err := providers.NewNetwork(c, project, zone, instance) 43 | if err != nil { 44 | return nil, err 45 | } 46 | 47 | return &Watcher{ 48 | WatchedStatus: map[string]bool{"die": true, "start": true}, 49 | WatchedLabelsPrefix: LabelNetworkPrefix, 50 | DefaultDelay: time.Second * 1, 51 | c: d, 52 | p: p, 53 | w: NewWorker(), 54 | }, nil 55 | } 56 | 57 | func (m *Watcher) Watch() error { 58 | m.listener = make(chan *docker.APIEvents, 0) 59 | 60 | if err := m.c.AddEventListener(m.listener); err != nil { 61 | return err 62 | } 63 | 64 | for e := range m.listener { 65 | if err := m.handleEvent(e); err != nil { 66 | log15.Error("error handling event", "container", e.ID[:12], "error", err) 67 | } 68 | } 69 | 70 | return nil 71 | } 72 | 73 | func (m *Watcher) handleEvent(e *docker.APIEvents) error { 74 | if !m.WatchedStatus[e.Status] { 75 | return nil 76 | } 77 | 78 | c, err := m.c.InspectContainer(e.ID) 79 | if err != nil { 80 | return err 81 | } 82 | 83 | labels := m.watchedLabels(c) 84 | if len(labels) == 0 { 85 | return nil 86 | } 87 | 88 | log15.Debug("event captured", "status", e.Status, "container", e.ID[:12], "labels", labels) 89 | 90 | if err := m.validateLabels(labels); err != nil { 91 | return err 92 | } 93 | 94 | switch e.Status { 95 | case "die": 96 | return m.detach(c, labels) 97 | case "start": 98 | return m.attach(c, labels) 99 | } 100 | 101 | return nil 102 | } 103 | 104 | func (m *Watcher) watchedLabels(c *docker.Container) map[string]string { 105 | var matched = make(map[string]string, 0) 106 | for label, value := range c.Config.Labels { 107 | if !strings.HasPrefix(label, m.WatchedLabelsPrefix) { 108 | continue 109 | } 110 | 111 | matched[label] = value 112 | } 113 | 114 | return matched 115 | } 116 | 117 | func (m *Watcher) attach(c *docker.Container, l map[string]string) error { 118 | jobID := JobID(c.ID) 119 | 120 | m.w.Delete(jobID) 121 | m.w.Add(jobID, func() error { 122 | start := time.Now() 123 | config := m.createNetworkConfig(c, l) 124 | log15.Debug("start event detected, creating network", 125 | "container", c.ID[:12], "ports", config.Ports, 126 | ) 127 | 128 | if err := m.p.Create(config); err != nil { 129 | log15.Error("error creating network", 130 | "container", c.ID[:12], "ports", config.Ports, "error", err, 131 | ) 132 | return nil 133 | } 134 | 135 | log15.Info( 136 | "network started", 137 | "container", c.ID[:12], "ports", config.Ports, "elapsed", time.Since(start), 138 | ) 139 | return nil 140 | }, m.DefaultDelay) 141 | 142 | return nil 143 | } 144 | 145 | func (m *Watcher) detach(c *docker.Container, l map[string]string) error { 146 | jobID := JobID(c.ID) 147 | 148 | m.w.Delete(jobID) 149 | m.w.Add(JobID(c.ID), func() error { 150 | start := time.Now() 151 | config := m.createNetworkConfig(c, l) 152 | log15.Debug("stop event detected, deleting network", 153 | "container", c.ID[:12], "ports", config.Ports, 154 | ) 155 | 156 | if err := m.p.Delete(config); err != nil { 157 | log15.Error("error deleting network", 158 | "container", c.ID[:12], "ports", config.Ports, "error", err, 159 | ) 160 | 161 | return nil 162 | } 163 | 164 | log15.Info( 165 | "network deleted", 166 | "container", c.ID[:12], "ports", config.Ports, "elapsed", time.Since(start), 167 | ) 168 | return nil 169 | }, m.DefaultDelay) 170 | 171 | return nil 172 | } 173 | 174 | func (m *Watcher) validateLabels(l map[string]string) error { 175 | if l[LabelNetworkType] == "" { 176 | return fmt.Errorf("invalid label %q, should be provided`", LabelNetworkType) 177 | } 178 | 179 | if l[LabelNetworkType] != "static" && l[LabelNetworkType] != "ephemeral" { 180 | return fmt.Errorf("invalid label %q value must be `static` or `ephemeral`", LabelNetworkType) 181 | } 182 | 183 | if l[LabelNetworkType] == "static" && l[LabelNetworkAddress] == "" { 184 | return fmt.Errorf("invalid label %q, cannot be empty when %q is static", LabelNetworkAddress, LabelNetworkType) 185 | } 186 | 187 | return nil 188 | } 189 | 190 | func (m *Watcher) createNetworkConfig(c *docker.Container, l map[string]string) *providers.NetworkConfig { 191 | n := m.createNetworkConfigFromLabels(l) 192 | n.Container = c.ID[:12] 193 | 194 | if c.HostConfig == nil { 195 | return n 196 | } 197 | 198 | for internal, externals := range c.HostConfig.PortBindings { 199 | for _, external := range externals { 200 | if external.HostIP != "0.0.0.0" && external.HostIP != "" { 201 | continue 202 | } 203 | 204 | n.Ports = append(n.Ports, docker.Port(external.HostPort+"/"+internal.Proto())) 205 | } 206 | } 207 | 208 | return n 209 | } 210 | 211 | func (m *Watcher) createNetworkConfigFromLabels(l map[string]string) *providers.NetworkConfig { 212 | n := &providers.NetworkConfig{} 213 | 214 | for key, value := range l { 215 | switch key { 216 | case LabelNetworkGroup: 217 | n.GroupName = value 218 | case LabelNetworkAddress: 219 | n.Address = value 220 | case LabelNetworkSourceRanges: 221 | n.Source.Ranges = strings.Split(value, ",") 222 | case LabelNetworkSourceTags: 223 | n.Source.Ranges = strings.Split(value, ",") 224 | case LabelNetworkSessionAffinity: 225 | n.SessionAffinity = providers.SessionAffinity(value) 226 | } 227 | } 228 | 229 | return n 230 | } 231 | -------------------------------------------------------------------------------- /watcher/watcher_test.go: -------------------------------------------------------------------------------- 1 | package watcher 2 | 3 | import ( 4 | "encoding/base64" 5 | "net/http" 6 | "os" 7 | 8 | "github.com/fsouza/go-dockerclient" 9 | "golang.org/x/net/context" 10 | "golang.org/x/oauth2" 11 | "golang.org/x/oauth2/google" 12 | "google.golang.org/api/compute/v1" 13 | 14 | . "gopkg.in/check.v1" 15 | ) 16 | 17 | type WatcherSuite struct { 18 | key []byte 19 | project, zone, instance string 20 | 21 | c *http.Client 22 | } 23 | 24 | var _ = Suite(&WatcherSuite{}) 25 | 26 | func (s *WatcherSuite) SetUpSuite(c *C) { 27 | s.initEnviroment(c) 28 | } 29 | 30 | func (s *WatcherSuite) SetUpTest(c *C) { 31 | ctx := context.Background() 32 | jwt, err := google.JWTConfigFromJSON(s.key, compute.ComputeScope) 33 | c.Assert(err, IsNil) 34 | 35 | s.c = oauth2.NewClient(ctx, jwt.TokenSource(ctx)) 36 | } 37 | 38 | func (s *WatcherSuite) initEnviroment(c *C) { 39 | s.project = os.Getenv("GCP_DEFAULT_PROJECT") 40 | s.zone = os.Getenv("GCP_DEFAULT_ZONE") 41 | s.instance = os.Getenv("GCP_DEFAULT_INSTANCE") 42 | 43 | var err error 44 | s.key, err = base64.StdEncoding.DecodeString(os.Getenv("GCP_JSON_KEY")) 45 | c.Assert(err, IsNil) 46 | } 47 | 48 | func (s *WatcherSuite) TestStart(c *C) { 49 | c.Skip("playground") 50 | client, err := docker.NewClientFromEnv() 51 | c.Assert(err, IsNil) 52 | 53 | w, err := NewWatcher(client, s.c, s.project, s.zone, s.instance) 54 | c.Assert(err, IsNil) 55 | 56 | container := &docker.Container{ 57 | ID: "abcdefghijklm", 58 | NetworkSettings: &docker.NetworkSettings{ 59 | Ports: map[docker.Port][]docker.PortBinding{ 60 | docker.Port("80/tcp"): []docker.PortBinding{ 61 | {HostIP: "0.0.0.0", HostPort: "80"}, 62 | }, 63 | docker.Port("443/tcp"): []docker.PortBinding{ 64 | {HostIP: "0.0.0.0", HostPort: "443"}, 65 | }, 66 | docker.Port("53/udp"): []docker.PortBinding{ 67 | {HostIP: "127.0.0.1", HostPort: "53"}, 68 | }, 69 | }, 70 | }, 71 | } 72 | 73 | w.createNetworkConfig(container, map[string]string{ 74 | "gce.network.group": "foo", 75 | }) 76 | 77 | err = w.Watch() 78 | c.Assert(err, IsNil) 79 | } 80 | -------------------------------------------------------------------------------- /watcher/worker.go: -------------------------------------------------------------------------------- 1 | package watcher 2 | 3 | import ( 4 | "sync" 5 | "time" 6 | ) 7 | 8 | type JobID string 9 | type Job func() error 10 | 11 | type Worker struct { 12 | jobs map[JobID]Job 13 | sync.Mutex 14 | } 15 | 16 | func NewWorker() *Worker { 17 | return &Worker{ 18 | jobs: make(map[JobID]Job, 0), 19 | } 20 | } 21 | 22 | func (w *Worker) Add(id JobID, j Job, delay time.Duration) { 23 | w.Lock() 24 | defer w.Unlock() 25 | 26 | w.jobs[id] = j 27 | go w.do(id, delay) 28 | } 29 | 30 | func (w *Worker) do(id JobID, delay time.Duration) { 31 | <-time.After(delay) 32 | defer w.Delete(id) 33 | 34 | if j, ok := w.jobs[id]; ok { 35 | j() 36 | } 37 | } 38 | 39 | func (w *Worker) Delete(id JobID) bool { 40 | w.Lock() 41 | defer w.Unlock() 42 | 43 | _, ok := w.jobs[id] 44 | delete(w.jobs, id) 45 | 46 | return ok 47 | } 48 | -------------------------------------------------------------------------------- /watcher/worker_test.go: -------------------------------------------------------------------------------- 1 | package watcher 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | . "gopkg.in/check.v1" 8 | ) 9 | 10 | func Test(t *testing.T) { TestingT(t) } 11 | 12 | type WorkerSuite struct{} 13 | 14 | var _ = Suite(&WorkerSuite{}) 15 | 16 | func (s *WorkerSuite) TestAdd(c *C) { 17 | delay := 10 * time.Millisecond 18 | start := time.Now() 19 | var since time.Duration 20 | 21 | w := NewWorker() 22 | w.Add(JobID(""), func() error { 23 | since = time.Since(start) 24 | return nil 25 | }, delay) 26 | 27 | time.Sleep(delay * 2) 28 | c.Assert(since > delay, Equals, true) 29 | } 30 | 31 | func (s *WorkerSuite) TestAddAndDelete(c *C) { 32 | id := JobID("foo") 33 | delay := 10 * time.Millisecond 34 | start := time.Now() 35 | var since time.Duration 36 | 37 | w := NewWorker() 38 | w.Add(id, func() error { 39 | since = time.Since(start) 40 | return nil 41 | }, delay) 42 | 43 | w.Delete(id) 44 | 45 | time.Sleep(delay * 2) 46 | 47 | c.Assert(since, Equals, time.Duration(0)) 48 | } 49 | --------------------------------------------------------------------------------