├── .gitignore ├── .travis.yml ├── LICENSE ├── README.md ├── Vagrantfile ├── contrib └── upstart │ ├── README.md │ └── etc │ ├── default │ └── docker-volume-glusterfs │ └── init │ └── docker-volume-glusterfs.conf ├── driver.go ├── main.go └── rest └── client.go /.gitignore: -------------------------------------------------------------------------------- 1 | .vagrant 2 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | --- 2 | language: go 3 | sudo: false 4 | notifications: 5 | email: false 6 | go: 7 | - 1.5 8 | - tip 9 | install: 10 | - go get -t ./... 11 | - go get github.com/golang/lint/golint 12 | script: 13 | - go vet ./... 14 | - test -z "$(golint ./... | tee /dev/stderr)" 15 | - test -z "$(gofmt -s -l . | tee /dev/stderr)" 16 | - go test -v ./... 17 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2015 David Calavera 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining 4 | a copy of this software and associated documentation files (the 5 | "Software"), to deal in the Software without restriction, including 6 | without limitation the rights to use, copy, modify, merge, publish, 7 | distribute, sublicense, and/or sell copies of the Software, and to 8 | permit persons to whom the Software is furnished to do so, subject to 9 | the following conditions: 10 | 11 | The above copyright notice and this permission notice shall be 12 | included in all copies or substantial portions of the Software. 13 | 14 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 15 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 16 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 17 | IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 18 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 19 | TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 20 | SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 21 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Docker volume plugin for GlusterFS 2 | 3 | # UNMAINTAINED: This library is not maintained anymore. Fork it, copy it or do what you please the the code, but this repository won't get updates and fixes. 4 | 5 | This plugin uses GlusterFS as distributed data storage for containers. 6 | 7 | [![TravisCI](https://travis-ci.org/calavera/docker-volume-glusterfs.svg)](https://travis-ci.org/calavera/docker-volume-glusterfs) 8 | 9 | ## Installation 10 | 11 | Using go (until we get proper binaries): 12 | 13 | ``` 14 | $ go get github.com/calavera/docker-volume-glusterfs 15 | ``` 16 | 17 | ## Usage 18 | 19 | This plugin doesn't create volumes in your GlusterFS cluster yet, so you'll have to create them yourself first. 20 | 21 | 1 - Start the plugin using this command: 22 | 23 | ``` 24 | $ sudo docker-volume-glusterfs -servers gfs-1:gfs-2:gfs-3 25 | ``` 26 | 27 | We use the flag `-servers` to specify where to find the GlusterFS servers. The server names are separated by colon. 28 | 29 | 2 - Start your docker containers with the option `--volume-driver=glusterfs` and use the first part of `--volume` to specify the remote volume that you want to connect to: 30 | 31 | ``` 32 | $ sudo docker run --volume-driver glusterfs --volume datastore:/data alpine touch /data/helo 33 | ``` 34 | 35 | See this video for a slightly longer usage explanation: 36 | 37 | https://youtu.be/SVtsT9WVujs 38 | 39 | ### Volume creation on demand 40 | 41 | This extension can create volumes on the remote cluster if you install https://github.com/aravindavk/glusterfs-rest in one of the nodes of the cluster. 42 | 43 | You need to set two extra flags when you start the extension if you want to let containers to create their volumes on demand: 44 | 45 | - rest: is the URL address to the remote api. 46 | - gfs-base: is the base path where the volumes will be created. 47 | 48 | This is an example of the command line to start the plugin: 49 | 50 | ``` 51 | $ docker-volume-glusterfs -servers gfs-1:gfs2 \ 52 | -rest http://gfs-1:9000 -gfs-base /var/lib/gluster/volumes 53 | ``` 54 | 55 | These volumes are replicated among all the peers in the cluster that you specify in the `-servers` flag. 56 | 57 | ## LICENSE 58 | 59 | MIT 60 | -------------------------------------------------------------------------------- /Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | user_config = """ 5 | adduser --disabled-password david 6 | adduser david sudo 7 | chown -R david /home/david/go 8 | 9 | mkdir -p /home/david/.ssh 10 | chown -R david /home/david 11 | cat << END > /home/david/.ssh/authorized_keys 12 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDFoY7zn6ZP4EgovBHnVsqPeJ16LuRw4u0Yv8ScHGCMIRMTUM1vW+hO8VIH7DjgabhvzV/OJ4/BEFAJ8NYVouTsW89+vPHqJtWpMUqUN1iCGahYKwXgTXNuHCv+NUMc2rrHP+hizDc/s64djxdGT6iMNKHg9uLv7HLGQFjVSXmCK9Mrdg+d/H3Yhrsoqavdn61Y/H7CxMCvaGsnFIDPsI/BkG4p28GsNPyFpIZoPXdbBXwyaU6EGTPgQgpizbZ1HkMTKNYJeLQLP05Uwa/5KHLZAp74UVYfaSXTqsZrDtGZ8Q4pbKsQ11jrOj99vIDSs9el/9FT0pYaqEMPKbur/5wD david.calavera@gmail.com 13 | END 14 | 15 | cat << END > /etc/sudoers.d/david 16 | david ALL=(ALL) NOPASSWD:ALL 17 | END 18 | """ 19 | 20 | hosts_config = """cat << END >> /etc/hosts 21 | 172.21.12.11 gfs-server-1 22 | 172.21.12.12 gfs-server-2 23 | 172.21.12.13 gfs-server-3 24 | 25 | 172.21.12.10 gfs-client-1 26 | 172.21.12.20 gfs-client-2 27 | END 28 | """ 29 | 30 | server_shell = """ 31 | DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -yq python-software-properties 32 | DEBIAN_FRONTEND=noninteractive add-apt-repository ppa:semiosis/ubuntu-glusterfs-3.5 33 | DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -yq glusterfs-server 34 | 35 | #{user_config} 36 | 37 | #{hosts_config} 38 | """ 39 | 40 | client_shell = %Q{ 41 | DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -yq python-software-properties 42 | DEBIAN_FRONTEND=noninteractive add-apt-repository ppa:semiosis/ubuntu-glusterfs-3.5 43 | DEBIAN_FRONTEND=noninteractive apt-get update && apt-get install -yq glusterfs-client 44 | 45 | #{user_config} 46 | 47 | cd /home/david 48 | curl -z go1.4.2.linux-amd64.tar.gz -L -O https://storage.googleapis.com/golang/go1.4.2.linux-amd64.tar.gz 49 | tar -C /usr/local -zxf /home/david/go1.4.2.linux-amd64.tar.gz 50 | 51 | cat << END > /etc/profile.d/go.sh 52 | export GOPATH=\\/home/david/go 53 | export PATH=\\$GOPATH/bin:/usr/local/go/bin:\\$PATH 54 | END 55 | 56 | cat << END > /etc/sudoers.d/go 57 | Defaults env_keep += "GOPATH" 58 | END 59 | 60 | #{hosts_config} 61 | } 62 | 63 | Vagrant.configure("2") do |config| 64 | config.vm.box = "ubuntu/trusty64" 65 | # We setup three nodes to be gluster hosts, and two gluster client to mount the volume 66 | 3.times do |i| 67 | id = i+1 68 | config.vm.define vm_name = "gfs-server-#{id}" do |config| 69 | config.vm.hostname = vm_name 70 | ip = "172.21.12.#{id+10}" 71 | config.vm.network :private_network, ip: ip 72 | config.vm.provision :shell, :inline => server_shell, :privileged => true 73 | end 74 | end 75 | 76 | 2.times do |i| 77 | id = i+1 78 | config.vm.define vm_name = "gfs-client-#{id}" do |config| 79 | config.ssh.forward_agent = true 80 | config.vm.synced_folder ".", "/home/david/go/src/github.com/calavera/docker-volume-glusterfs", create: true 81 | 82 | config.vm.hostname = vm_name 83 | ip = "172.21.12.#{id * 10}" 84 | config.vm.network :private_network, ip: ip 85 | config.vm.provision :shell, :inline => client_shell, :privileged => true 86 | end 87 | end 88 | end 89 | -------------------------------------------------------------------------------- /contrib/upstart/README.md: -------------------------------------------------------------------------------- 1 | # Upstart script for docker-volume-glusterfs 2 | ## configure your glusterfs nodes 3 | Similar to the docker service you can pass parameters thru a config file 4 | ```bash 5 | vi etc/default/docker-volume-glusterfs 6 | ``` 7 | 8 | ## copy the config and init script inside of your /etc/ folder 9 | ```bash 10 | sudo cp ./etc/default/docker-volume-glusterfs /etc/default/docker-volume-glusterfs 11 | sudo cp ./etc/init/docker-volume-glusterfs 12 | ``` 13 | ## reload the upstart configuration 14 | ```bash 15 | sudo initctl reload-configuration 16 | ``` 17 | 18 | ## start the docker-volume-glusterfs 19 | ```bash 20 | sudo service docker-volume-glusterfs start 21 | sudo service docker-volume-glusterfs status 22 | ``` 23 | 24 | ## check the logs 25 | ```bash 26 | sudo tail -f /var/log/upstart/docker-volume-glusterfs.log 27 | ``` 28 | -------------------------------------------------------------------------------- /contrib/upstart/etc/default/docker-volume-glusterfs: -------------------------------------------------------------------------------- 1 | GLUSTERFS_DRIVER_OPTS=' 2 | -servers server-01:server-02 3 | ' 4 | -------------------------------------------------------------------------------- /contrib/upstart/etc/init/docker-volume-glusterfs.conf: -------------------------------------------------------------------------------- 1 | #docker-volume-glusterfs - docker-volume-glusterfs job file 2 | 3 | description "Docker GlusterFS volume driver" 4 | author "Angel Dimitrov angel@sourcestream.de" 5 | 6 | start on starting docker 7 | stop on stopped docker 8 | respawn 9 | 10 | script 11 | GLUSTERFS_DRIVER_OPTS= 12 | if [ -f /etc/default/$UPSTART_JOB ]; then 13 | . /etc/default/$UPSTART_JOB 14 | fi 15 | exec docker-volume-glusterfs $GLUSTERFS_DRIVER_OPTS 16 | end script 17 | -------------------------------------------------------------------------------- /driver.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "os" 7 | "os/exec" 8 | "path/filepath" 9 | "strings" 10 | "sync" 11 | 12 | "github.com/calavera/docker-volume-glusterfs/rest" 13 | "github.com/docker/go-plugins-helpers/volume" 14 | ) 15 | 16 | type volumeName struct { 17 | name string 18 | connections int 19 | } 20 | 21 | type glusterfsDriver struct { 22 | root string 23 | restClient *rest.Client 24 | servers []string 25 | volumes map[string]*volumeName 26 | m *sync.Mutex 27 | } 28 | 29 | func newGlusterfsDriver(root, restAddress, gfsBase string, servers []string) glusterfsDriver { 30 | d := glusterfsDriver{ 31 | root: root, 32 | servers: servers, 33 | volumes: map[string]*volumeName{}, 34 | m: &sync.Mutex{}, 35 | } 36 | if len(restAddress) > 0 { 37 | d.restClient = rest.NewClient(restAddress, gfsBase) 38 | } 39 | return d 40 | } 41 | 42 | func (d glusterfsDriver) Create(r volume.Request) volume.Response { 43 | log.Printf("Creating volume %s\n", r.Name) 44 | d.m.Lock() 45 | defer d.m.Unlock() 46 | m := d.mountpoint(r.Name) 47 | 48 | if _, ok := d.volumes[m]; ok { 49 | return volume.Response{} 50 | } 51 | 52 | if d.restClient != nil { 53 | exist, err := d.restClient.VolumeExist(r.Name) 54 | if err != nil { 55 | return volume.Response{Err: err.Error()} 56 | } 57 | 58 | if !exist { 59 | if err := d.restClient.CreateVolume(r.Name, d.servers); err != nil { 60 | return volume.Response{Err: err.Error()} 61 | } 62 | } 63 | } 64 | return volume.Response{} 65 | } 66 | 67 | func (d glusterfsDriver) Remove(r volume.Request) volume.Response { 68 | log.Printf("Removing volume %s\n", r.Name) 69 | d.m.Lock() 70 | defer d.m.Unlock() 71 | m := d.mountpoint(r.Name) 72 | 73 | if s, ok := d.volumes[m]; ok { 74 | if s.connections <= 1 { 75 | if d.restClient != nil { 76 | if err := d.restClient.StopVolume(r.Name); err != nil { 77 | return volume.Response{Err: err.Error()} 78 | } 79 | } 80 | delete(d.volumes, m) 81 | } 82 | } 83 | return volume.Response{} 84 | } 85 | 86 | func (d glusterfsDriver) Path(r volume.Request) volume.Response { 87 | return volume.Response{Mountpoint: d.mountpoint(r.Name)} 88 | } 89 | 90 | func (d glusterfsDriver) Mount(r volume.Request) volume.Response { 91 | d.m.Lock() 92 | defer d.m.Unlock() 93 | m := d.mountpoint(r.Name) 94 | log.Printf("Mounting volume %s on %s\n", r.Name, m) 95 | 96 | s, ok := d.volumes[m] 97 | if ok && s.connections > 0 { 98 | s.connections++ 99 | return volume.Response{Mountpoint: m} 100 | } 101 | 102 | fi, err := os.Lstat(m) 103 | 104 | if os.IsNotExist(err) { 105 | if err := os.MkdirAll(m, 0755); err != nil { 106 | return volume.Response{Err: err.Error()} 107 | } 108 | } else if err != nil { 109 | return volume.Response{Err: err.Error()} 110 | } 111 | 112 | if fi != nil && !fi.IsDir() { 113 | return volume.Response{Err: fmt.Sprintf("%v already exist and it's not a directory", m)} 114 | } 115 | 116 | if err := d.mountVolume(r.Name, m); err != nil { 117 | return volume.Response{Err: err.Error()} 118 | } 119 | 120 | d.volumes[m] = &volumeName{name: r.Name, connections: 1} 121 | 122 | return volume.Response{Mountpoint: m} 123 | } 124 | 125 | func (d glusterfsDriver) Unmount(r volume.Request) volume.Response { 126 | d.m.Lock() 127 | defer d.m.Unlock() 128 | m := d.mountpoint(r.Name) 129 | log.Printf("Unmounting volume %s from %s\n", r.Name, m) 130 | 131 | if s, ok := d.volumes[m]; ok { 132 | if s.connections == 1 { 133 | if err := d.unmountVolume(m); err != nil { 134 | return volume.Response{Err: err.Error()} 135 | } 136 | } 137 | s.connections-- 138 | } else { 139 | return volume.Response{Err: fmt.Sprintf("Unable to find volume mounted on %s", m)} 140 | } 141 | 142 | return volume.Response{} 143 | } 144 | 145 | func (d glusterfsDriver) Get(r volume.Request) volume.Response { 146 | d.m.Lock() 147 | defer d.m.Unlock() 148 | m := d.mountpoint(r.Name) 149 | if s, ok := d.volumes[m]; ok { 150 | return volume.Response{Volume: &volume.Volume{Name: s.name, Mountpoint: d.mountpoint(s.name)}} 151 | } 152 | 153 | return volume.Response{Err: fmt.Sprintf("Unable to find volume mounted on %s", m)} 154 | } 155 | 156 | func (d glusterfsDriver) List(r volume.Request) volume.Response { 157 | d.m.Lock() 158 | defer d.m.Unlock() 159 | var vols []*volume.Volume 160 | for _, v := range d.volumes { 161 | vols = append(vols, &volume.Volume{Name: v.name, Mountpoint: d.mountpoint(v.name)}) 162 | } 163 | return volume.Response{Volumes: vols} 164 | } 165 | 166 | func (d *glusterfsDriver) mountpoint(name string) string { 167 | return filepath.Join(d.root, name) 168 | } 169 | 170 | func (d *glusterfsDriver) mountVolume(name, destination string) error { 171 | var serverNodes []string 172 | for _, server := range d.servers { 173 | serverNodes = append(serverNodes, fmt.Sprintf("-s %s", server)) 174 | } 175 | 176 | cmd := fmt.Sprintf("glusterfs --volfile-id=%s %s %s", name, strings.Join(serverNodes[:], " "), destination) 177 | if out, err := exec.Command("sh", "-c", cmd).CombinedOutput(); err != nil { 178 | log.Println(string(out)) 179 | return err 180 | } 181 | return nil 182 | } 183 | 184 | func (d *glusterfsDriver) unmountVolume(target string) error { 185 | cmd := fmt.Sprintf("umount %s", target) 186 | if out, err := exec.Command("sh", "-c", cmd).CombinedOutput(); err != nil { 187 | log.Println(string(out)) 188 | return err 189 | } 190 | return nil 191 | } 192 | 193 | func (d glusterfsDriver) Capabilities(r volume.Request) volume.Response { 194 | var res volume.Response 195 | res.Capabilities = volume.Capability{Scope: "local"} 196 | return res 197 | } 198 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "os" 7 | "path/filepath" 8 | "strings" 9 | 10 | "github.com/docker/go-plugins-helpers/volume" 11 | ) 12 | 13 | const glusterfsID = "_glusterfs" 14 | 15 | var ( 16 | defaultDir = filepath.Join(volume.DefaultDockerRootDirectory, glusterfsID) 17 | serversList = flag.String("servers", "", "List of glusterfs servers") 18 | restAddress = flag.String("rest", "", "URL to glusterfsrest api") 19 | gfsBase = flag.String("gfs-base", "/mnt/gfs", "Base directory where volumes are created in the cluster") 20 | root = flag.String("root", defaultDir, "GlusterFS volumes root directory") 21 | ) 22 | 23 | func main() { 24 | var Usage = func() { 25 | fmt.Fprintf(os.Stderr, "Usage: %s [options]\n", os.Args[0]) 26 | flag.PrintDefaults() 27 | } 28 | 29 | flag.Parse() 30 | if len(*serversList) == 0 { 31 | Usage() 32 | os.Exit(1) 33 | } 34 | 35 | servers := strings.Split(*serversList, ":") 36 | 37 | d := newGlusterfsDriver(*root, *restAddress, *gfsBase, servers) 38 | h := volume.NewHandler(d) 39 | fmt.Println(h.ServeUnix("root", "glusterfs")) 40 | } 41 | -------------------------------------------------------------------------------- /rest/client.go: -------------------------------------------------------------------------------- 1 | package rest 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "net/http" 7 | "net/url" 8 | "path/filepath" 9 | "strconv" 10 | "strings" 11 | ) 12 | 13 | const ( 14 | volumesPath = "/api/1.0/volumes" 15 | volumeCreatePath = "/api/1.0/volume/%s" 16 | volumeStopPath = "/api/1.0/volume/%s/stop" 17 | ) 18 | 19 | type peer struct { 20 | ID string `json:"id"` 21 | Name string `json:"name"` 22 | Status string `json:"status"` 23 | } 24 | 25 | type volume struct { 26 | Name string `json:"name"` 27 | UUID string `json:"uuid"` 28 | Type string `json:"type"` 29 | Status string `json:"status"` 30 | NumBricks int `json:"num_bricks"` 31 | Distribute int `json:"distribute"` 32 | Stripe int `json:"stripe"` 33 | Replica int `json:"replica"` 34 | Transport string `json:"transport"` 35 | } 36 | 37 | type response struct { 38 | Ok bool `json:"ok"` 39 | Err string `json:"error,omitempty"` 40 | } 41 | 42 | type peerResponse struct { 43 | Data []peer `json:"data,omitempty"` 44 | response 45 | } 46 | 47 | type volumeResponse struct { 48 | Data []volume `json:"data,omitempty"` 49 | response 50 | } 51 | 52 | // Client is the http client that sends requests to the gluster API. 53 | type Client struct { 54 | addr string 55 | base string 56 | } 57 | 58 | // NewClient initializes a new client. 59 | func NewClient(addr, base string) *Client { 60 | return &Client{addr, base} 61 | } 62 | 63 | // VolumeExist returns whether a volume exist in the cluster with a given name or not. 64 | func (r Client) VolumeExist(name string) (bool, error) { 65 | vols, err := r.volumes() 66 | if err != nil { 67 | return false, err 68 | } 69 | 70 | for _, v := range vols { 71 | if v.Name == name { 72 | return true, nil 73 | } 74 | } 75 | 76 | return false, nil 77 | } 78 | 79 | func (r Client) volumes() ([]volume, error) { 80 | u := fmt.Sprintf("%s%s", r.addr, volumesPath) 81 | 82 | res, err := http.Get(u) 83 | if err != nil { 84 | return nil, err 85 | } 86 | 87 | var d volumeResponse 88 | if err := json.NewDecoder(res.Body).Decode(&d); err != nil { 89 | return nil, err 90 | } 91 | 92 | if !d.Ok { 93 | return nil, fmt.Errorf(d.Err) 94 | } 95 | return d.Data, nil 96 | } 97 | 98 | // CreateVolume creates a new volume with the given name in the cluster. 99 | func (r Client) CreateVolume(name string, peers []string) error { 100 | u := fmt.Sprintf("%s%s", r.addr, fmt.Sprintf(volumeCreatePath, name)) 101 | fmt.Println(u) 102 | 103 | bricks := make([]string, len(peers)) 104 | for i, p := range peers { 105 | bricks[i] = fmt.Sprintf("%s:%s", p, filepath.Join(r.base, name)) 106 | } 107 | 108 | params := url.Values{ 109 | "bricks": {strings.Join(bricks, ",")}, 110 | "replica": {strconv.Itoa(len(peers))}, 111 | "transport": {"tcp"}, 112 | "start": {"true"}, 113 | "force": {"true"}, 114 | } 115 | 116 | resp, err := http.PostForm(u, params) 117 | if err != nil { 118 | return err 119 | } 120 | 121 | return responseCheck(resp) 122 | } 123 | 124 | // StopVolume stops the volume with the given name in the cluster. 125 | func (r Client) StopVolume(name string) error { 126 | u := fmt.Sprintf("%s%s", r.addr, fmt.Sprintf(volumeStopPath, name)) 127 | 128 | req, err := http.NewRequest("PUT", u, nil) 129 | if err != nil { 130 | return err 131 | } 132 | 133 | resp, err := http.DefaultClient.Do(req) 134 | if err != nil { 135 | return err 136 | } 137 | 138 | return responseCheck(resp) 139 | } 140 | 141 | func responseCheck(resp *http.Response) error { 142 | var p response 143 | if err := json.NewDecoder(resp.Body).Decode(&p); err != nil { 144 | return err 145 | } 146 | 147 | if !p.Ok { 148 | return fmt.Errorf(p.Err) 149 | } 150 | 151 | return nil 152 | } 153 | --------------------------------------------------------------------------------