├── .dockerignore ├── .gitignore ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── build-tests.sh ├── config.json ├── doc └── ipo.md ├── docker-compose.yml ├── index.js ├── package-lock.json ├── package.json ├── run-tests.sh └── test ├── Dockerfile ├── docker-compose.yml ├── docker-entrypoint.sh ├── test-environment.sh └── test-run.sh /.dockerignore: -------------------------------------------------------------------------------- 1 | node_modules/ 2 | plugin/ 3 | .git/ 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules/ 2 | plugin/ 3 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:10-alpine 2 | 3 | #### 4 | # Install SeaweedFS Client 5 | #### 6 | 7 | ARG SEAWEEDFS_VERSION=3.43 8 | ENV SEAWEEDFS_VERSION=$SEAWEEDFS_VERSION 9 | ARG GOARCH=amd64 10 | ENV GOARCH=$GOARCH 11 | 12 | RUN apk update && \ 13 | apk add fuse3 && \ 14 | apk add fuse && \ 15 | apk add --no-cache --virtual build-dependencies --update wget curl ca-certificates && \ 16 | wget -qO /tmp/linux_${GOARCH}.tar.gz https://github.com/chrislusf/seaweedfs/releases/download/${SEAWEEDFS_VERSION}/linux_${GOARCH}.tar.gz && \ 17 | tar -C /usr/bin/ -xzvf /tmp/linux_${GOARCH}.tar.gz && \ 18 | apk del build-dependencies && \ 19 | rm -rf /tmp/* 20 | 21 | #### 22 | # Install Docker volume driver API server 23 | #### 24 | 25 | # Create directories for mounts 26 | RUN mkdir -p /mnt/seaweedfs 27 | RUN mkdir -p /mnt/docker-volumes 28 | 29 | # Copy in package.json 30 | COPY package.json package-lock.json /project/ 31 | 32 | # Switch to the project directory 33 | WORKDIR /project 34 | 35 | # Install project dependencies 36 | RUN npm install 37 | 38 | # Set Configuration Defaults 39 | ENV HOST=filer \ 40 | PORT=8888 \ 41 | ALIAS=seaweedfs \ 42 | ROOT_VOLUME_NAME="" \ 43 | MOUNT_OPTIONS="" \ 44 | REMOTE_PATH=/docker/volumes \ 45 | LOCAL_PATH="" \ 46 | CONNECT_TIMEOUT=10000 \ 47 | LOG_LEVEL=info 48 | 49 | # Copy in source code 50 | COPY index.js /project 51 | 52 | # Set the Docker entrypoint 53 | ENTRYPOINT ["node", "index.js"] 54 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2018 Kadima Solutions 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | 24 | Makefile based on a Makefile for vieux/docker-volume-sshfs and is under the 25 | following license. 26 | 27 | The MIT License (MIT) 28 | 29 | Copyright (c) 2015-2017 Victor Vieux 30 | 31 | Permission is hereby granted, free of charge, to any person obtaining a copy 32 | of this software and associated documentation files (the "Software"), to deal 33 | in the Software without restriction, including without limitation the rights 34 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 35 | copies of the Software, and to permit persons to whom the Software is 36 | furnished to do so, subject to the following conditions: 37 | 38 | The above copyright notice and this permission notice shall be included in all 39 | copies or substantial portions of the Software. 40 | 41 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 42 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 43 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 44 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 45 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 46 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 47 | SOFTWARE. 48 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | PLUGIN_NAME = katharostech/seaweedfs-volume-plugin 2 | PLUGIN_TAG ?= rootfs 3 | PRIVATE_REGISTRY ?= localhost:5000 4 | 5 | all: clean rootfs create 6 | 7 | clean: 8 | @echo "### rm ./plugin" 9 | @rm -rf ./plugin 10 | 11 | config: 12 | @echo "### copy config.json to ./plugin/" 13 | @mkdir -p ./plugin 14 | @cp config.json ./plugin/ 15 | 16 | rootfs: config 17 | @echo "### docker build: rootfs image with" 18 | @docker build -t ${PLUGIN_NAME}:rootfs --build-arg http_proxy=${http_proxy} --build-arg https_proxy=${https_proxy} . 19 | @echo "### create rootfs directory in ./plugin/rootfs" 20 | @mkdir -p ./plugin/rootfs 21 | @docker create --name tmp ${PLUGIN_NAME}:rootfs 22 | @docker export tmp | tar -x -C ./plugin/rootfs 23 | @docker rm -vf tmp 24 | 25 | create: 26 | @echo "### remove existing plugin ${PLUGIN_NAME}:${PLUGIN_TAG} if exists" 27 | @docker plugin rm -f ${PLUGIN_NAME}:${PLUGIN_TAG} || true 28 | @echo "### create new plugin ${PLUGIN_NAME}:${PLUGIN_TAG} from ./plugin" 29 | @docker plugin create ${PLUGIN_NAME}:${PLUGIN_TAG} ./plugin 30 | 31 | 32 | create_private: 33 | @echo "### remove existing plugin (for private registry) ${PRIVATE_REGISTRY}/${PLUGIN_NAME}:${PLUGIN_TAG} if exists" 34 | @docker plugin rm -f ${PRIVATE_REGISTRY}/${PLUGIN_NAME}:${PLUGIN_TAG} || true 35 | @echo "### create new plugin (for private registry) ${PRIVATE_REGISTRY}/${PLUGIN_NAME}:${PLUGIN_TAG} from ./plugin" 36 | @docker plugin create ${PRIVATE_REGISTRY}/${PLUGIN_NAME}:${PLUGIN_TAG} ./plugin 37 | 38 | enable: 39 | @echo "### enable plugin ${PLUGIN_NAME}:${PLUGIN_TAG}" 40 | @docker plugin enable ${PLUGIN_NAME}:${PLUGIN_TAG} 41 | 42 | disable: 43 | @echo "### disable plugin ${PLUGIN_NAME}:${PLUGIN_TAG}" 44 | @docker plugin disable ${PLUGIN_NAME}:${PLUGIN_TAG} 45 | 46 | push: clean rootfs create enable 47 | @echo "### push plugin ${PLUGIN_NAME}:${PLUGIN_TAG}" 48 | @docker plugin push ${PLUGIN_NAME}:${PLUGIN_TAG} 49 | 50 | push_private: clean rootfs create_private 51 | @echo "### push plugin ${PRIVATE_REGISTRY}/${PLUGIN_NAME}:${PLUGIN_TAG}" 52 | @docker plugin push ${PRIVATE_REGISTRY}/${PLUGIN_NAME}:${PLUGIN_TAG} 53 | 54 | 55 | 56 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # SeaweedFS Docker Plugin 2 | 3 | > :warning: **Warning:** This project is not maintained and may not be properly functioning. PRs and forks are welcome, but there are no guarantees to the current state of this plugin or its effectiveness. You may be interested in checking out the @onaci fork: https://github.com/onaci/docker-plugin-seaweedfs. 4 | 5 | > **Note:** This plugin was forked from a [LizardFS Docker Plugin](https://github.com/kadimasolutions/docker-plugin_lizardfs) so there may still be references to LizardFS somewhere in here that I haven't found and replaced yet. 6 | 7 | A Docker volume driver plugin for mounting a [SeaweedFS](https://github.com/chrislusf/seaweedfs) filesystem. Allows you to transparently provide storage for your Docker containers using SeaweedFS. This plugin can be used in combination with the [SeaweedFS Docker Image](https://github.com/chrislusf/seaweedfs/tree/master/docker) to create a fully containerized, clustered storage solution for Docker Swarm. Documentation and development are still in progress. 8 | 9 | ## Usage 10 | 11 | ### Prerequisites 12 | 13 | Before you can use the plugin you must have: 14 | 15 | * A running SeaweedFS cluster with a [Filer](https://github.com/chrislusf/seaweedfs/wiki/Directories-and-Files) that your Docker host can access. 16 | * A directory on the SeaweedFS filesystem that can be used by the plugin to store Docker volumes. This can be any normal directory. By default the plugin will use `/docker/volumes`, but this can be changed to something else like the root directory, for example ( see [REMOTE_PATH](#remote-path) ). 17 | 18 | Once these conditions are met you are ready to install the plugin. 19 | 20 | ### Installation 21 | 22 | The plugin is simple use and can be installed as a Docker container without having to install any other system dependencies. 23 | 24 | $ docker plugin install --alias seaweedfs katharostech/seaweedfs-volume-plugin HOST=localhost:8888 25 | 26 | Docker will prompt asking if you want to grant the permissions required to run the plugin. Select yes and the plugin will download and install. 27 | 28 | > **Note:** We set the plugin alias to `seaweedfs`. This is completely optional, but it allows us to refer to the plugin with a much shorter name. Throughout this readme, when reference is made to the `seaweedfs` driver, it is referring to this alias. 29 | 30 | That's it! You can now see your newly installed Docker plugin by running `docker plugin ls`. 31 | 32 | $ docker plugin ls 33 | ID NAME DESCRIPTION ENABLED 34 | 4a08a23cf2eb seaweedfs:latest SeaweedFS volume plugin for Docker true 35 | 36 | You should now be able to create a Docker volume using our new `seaweedfs` driver. 37 | 38 | $ docker volume create --driver seaweedfs weed-vol 39 | weed-vol 40 | 41 | You can see it by running `docker volume ls`. 42 | 43 | $ docker volume ls 44 | DRIVER VOLUME NAME 45 | seaweedfs:latest weed-vol 46 | 47 | Now that you have created the volume you can mount it into a container using its name. Lets mount it into an alpine container and put some data in it. 48 | 49 | ```sh 50 | $ docker run -it --rm -v weed-vol:/data alpine sh 51 | / $ cd /data # Switch to our volume mountpoint 52 | /data $ cp -R /etc . # Copy the whole container /etc directory to it 53 | /data $ ls # See that the copy was successful 54 | etc 55 | /data $ exit # Exit ( the container will be removed because of the --rm ) 56 | ``` 57 | 58 | We should now have a copy of the alpine container's whole `/etc` directory on our `weed-vol` volume. You can verify this by checking the `/docker/volumes/weed-vol/` directory on your SeaweedFS installation. You should see the `etc` folder with all of its files and folders in it. Congratulations! You have successfully mounted your SeaweedFS filesytem into a docker container and stored data in it! 59 | 60 | If you run another container, you can mount the same volume into it and that container will also see the data. Your data will stick around as long as that volume exists. When you are done with it, you can remove the volume by running `docker volume rm weed-vol`. 61 | 62 | ### Features 63 | 64 | #### Shared Mounts 65 | 66 | Any number of containers on any number of hosts can mount the same volume at the same time. The only requirement is that each Docker host have the SeaweedFS plugin installed on it. 67 | 68 | #### Transparent Data Storage ( No Hidden Metadata ) 69 | 70 | Each SeaweedFS Docker volume maps 1-to-1 to a directory on the SeaweedFS filesystem. All directories in the [REMOTE_PATH](#remote-path) on the SeaweedFS filesystem will be exposed as a Docker volume regardless of whether or not the directory was created by running `docker volume create`. There is no special metadata or any other extra information used by the plugin to keep track of what volumes exist. If there is a directory there, it is a Docker volume and it can be mounted ( and removed ) by the SeaweedFS plugin. This makes it easy to understand and allows you to manage your Docker volumes directly on the filesystem, if necessary, for things like backup and restore. 71 | 72 | #### Multiple SeaweedFS Clusters 73 | 74 | It is also possible, if you have multiple SeaweedFS clusters, to install the plugin multiple times with different settings for the different clusters. For example, if you have two SeaweedFS clusters, one at `host1` and another at `host2`, you can install the plugin two times, with different aliases, to allow you to create volumes on both clusters. 75 | 76 | $ docker plugin install --alias seaweedfs1 --grant-all-permissions katharostech/seaweedfs-volume-plugin HOST=host1:8888 77 | $ docker plugin install --alias seaweedfs2 --grant-all-permissions kadimasolutions/seaweedfs-volume-plugin HOST=host2:8888 78 | 79 | This gives you the ability to create volumes for both clusters by specifying either `seaweedfs1` or `seaweedfs2` as the volume driver when creating a volume. 80 | 81 | #### Root Mount Option 82 | 83 | The plugin has the ability to provide a volume that contains *all* of the SeaweedFS Docker volumes in it. This is called the Root Volume and is identical to mounting the configured `REMOTE_PATH` on your SeaweedFS filesystem into your container. This volume does not exist by default. The Root Volume is enabled by setting the `ROOT_VOLUME_NAME` to the name that you want the volume to have. You should pick a name that does not conflict with any other volume. If there is a volume with the same name as the Root Volume, the Root Volume will take precedence over the other volume. 84 | 85 | There are a few different uses for the Root Volume. Katharos Technology designed the Root Volume feature to accommodate for containerized backup solutions. By mounting the Root Volume into a container that manages your Backups, you can backup *all* of your SeaweedFS Docker volumes without having to manually add a mount to the container every time you create a new volume that needs to be backed up. 86 | 87 | The Root Volume also give you the ability to have containers create and remove SeaweedFS volumes without having to mount the Docker socket and make Docker API calls. Volumes can be added, removed, and otherwise manipulated simply by mounting the Root Volume and making the desired changes. 88 | 89 | ## Configuration 90 | 91 | ### Plugin Configuration 92 | 93 | You can configure the plugin through plugin variables. You may set these variables at installation time by putting `VARIABLE_NAME=value` after the plugin name, or you can set them after the plugin has been installed using `docker plugin set katharostech/seaweedfs-volume-plugin VARIABLE_NAME=value`. 94 | 95 | > **Note:** When configuring the plugin after installation, the plugin must first be disabled before you can set variables. There is no danger of accidentally setting variables while the plugin is enabled, though. Docker will simply tell you that it is not possible. 96 | 97 | #### HOST 98 | 99 | The hostname/ip address and port that will be used when connecting to the SeaweedFS filer. Concatenate multiple host and port combinations with a comma, to connect to multiple filer instances. For example `HOST=host1:8888,host2:8888` 100 | 101 | > **Note:** The plugin runs in `host` networking mode. This means that even though it is in a container, it shares its network configuration with the host and should resolve all network addresses as the host system would. 102 | 103 | **Default:** `localhost:8080` 104 | 105 | #### MOUNT_OPTIONS 106 | 107 | Options passed to the `weed mount` command when mounting SeaweedFS volumes. 108 | 109 | **Default:** empty string 110 | 111 | #### REMOTE_PATH 112 | 113 | The path on the SeaweedFS filesystem that Docker volumes will be stored in. This path will be mounted for volume storage by the plugin and must exist on the SeaweedFS filesystem. 114 | 115 | **Default:** `/docker/volumes` 116 | 117 | #### ROOT_VOLUME_NAME 118 | 119 | The name of the Root Volume. If specified, a special volume will be created of the given name will be created that will contain all of the SeaweedFS volumes. It is equivalent to mounting the whole of `REMOTE_PATH` on the SeaweedFS filesystem. See [Root Mount Option](#root-mount-option). 120 | 121 | **Default:** empty string 122 | 123 | #### LOG_LEVEL 124 | 125 | Plugin logging level. Set to `debug` to get more verbose log messages. Logs from Docker plugins can be found in the Docker log and will be suffixed with the plugin ID. 126 | 127 | **Default:** `info` 128 | 129 | ## Log files 130 | 131 | The log files are in the folder `/run/docker/plugins/$your_plugin_id`. 132 | The Id `$your_plugin_id` of the plugin can be obtained via `docker plugin inspect katharostech/seaweedfs-volume-plugin:rootfs | grep "\"Id\""`. 133 | 134 | Log file can be seen with `cat < init-stdout` or `cat < init-stderr`. 135 | 136 | ## Development 137 | 138 | Docker plugins are made up of a `config.json` file and `rootfs` directory. The `config.json` has all of the metadata and information about the plugin that Docker needs when installing and configuring the plugin. The `rootfs` is the root filesystem of the plugin container. Unfortunately the Docker CLI doesn't allow you to create Docker plugins using a Dockerfile so we use a Makefile to automate the process of creating the plugin `rootfs` from a Dockerfile. 139 | 140 | ### Building the Plugin 141 | 142 | To build the plugin simply run `make rootfs` in the project directory. 143 | 144 | $ make rootfs 145 | 146 | This will build the Dockerfile, export the new Docker image's rootfs, and copy the rootfs and the config.json file to the `plugin` directory. When it is done you should have a new plugin directory with a config.json file and a rootfs folder in it. 147 | 148 | ``` 149 | plugin/ 150 | config.json 151 | rootfs/ 152 | ``` 153 | 154 | After that is finished you can run `make create`. 155 | 156 | $ make create 157 | 158 | This will install the Docker plugin from the `plugin` dirctory with the name `katharostech/seaweedfs-volume-plugin`. 159 | 160 | Finally run `make enable` to start the plugin. 161 | 162 | $ make enable 163 | 164 | Here is a list of the `make` targets: 165 | 166 | * **clean**: Remove the `plugin` directory 167 | * **config**: Copy the `config.json` file to the `plugin` directory 168 | * **rootfs**: Generate the plugin rootfs from the Dockerfile and put it in the `plugin` directory with the `config.json` 169 | * **create**: Install the plugin from the `plugin` directory 170 | * **enable**: Enable the plugin 171 | * **disable**: Disable the plugin 172 | * **push**: Run the `clean`, `rootfs`, `create`, and `enable` targets, and push the plugin to DockerHub 173 | 174 | ### Push to private registry 175 | 176 | **Optional**: to make the plugin available to a private registry `PRIVATE_REGISTRY` you can run 177 | 178 | `make PRIVATE_REGISTRY= push_private` 179 | 180 | 181 | ### Running the tests 182 | 183 | > **Note:** The tests have not be migrated from the LizardFS version of this plugin. The information in this section about tests is straight from the LizardFS version and hasn't been tested after porting the plugin. 184 | 185 | The automated tests for the plugin are run using a Docker-in-Docker container that creates a Dockerized SeaweedFS cluster to test the plugin against. When you run the test container, it will install the plugin inside the Docker-in-Docker container and proceed to create a Dockerized LizardFS cluster in it as well. A shell script is run that manipulates the plugin and runs containers to ensure the plugin behaves as is expected. 186 | 187 | Before you can run the tests, the test Docker image must first be built. This is done by running the `build-tests.sh` script. 188 | 189 | $ ./build-tests.sh 190 | 191 | This will build a Docker image, `lizardfs-volume-plugin_test`, using the Dockerfile in the `test` directory. After the image has been built, you can use it to run the tests against the plugin. This is done with the `run-tests.sh` script. 192 | 193 | $ ./run-tests.sh 194 | 195 | By default running `run-tests.sh` will install the plugin from the `plugin` directory before running the tests against it. This means that you must first build the plugin by running `make rootfs`, if you have not already done so. Alternatively, you can also run the tests against a version of the plugin from DockerHub by passing in the plugin tag as a parameter to the `run-tests.sh` script. 196 | 197 | $ ./run-tests.sh kadimasolutions/lizardfs-volume-plugin:latest 198 | 199 | This will download the plugin from DockerHub and run the tests against that version of the plugin. 200 | 201 | ### Tips & Tricks 202 | 203 | If you don't have a fast disk on your development machine, developing Docker plugins can be somewhat tricky, because it can take some time to build and install the plugin every time you need to make a change. Here are some tricks that you can use to help maximize your development time. 204 | 205 | #### Patching the Plugin Rootfs 206 | 207 | All of the plugin logic is in the `index.js` file. During development it can take a long time to rebuild the entire plugin every time you need to test a change to `index.js`. To get around this, it is possible to copy just that file into the installed plugin without having to reinstall the entire plugin. 208 | 209 | When you install a Docker plugin, it is given a plugin ID. You can see the first 12 characters of the plugin ID by running `docker plugin ls`. 210 | 211 | ``` 212 | $ docker plugin ls 213 | ID NAME DESCRIPTION ENABLED 214 | 2f5b68535b92 katharostech/seaweedfs-volume-plugin:latest SeaweedFS volume plugin for Docker false 215 | ``` 216 | 217 | Using that ID you can find where the plugin's rootfs was installed. By default, it should be located in `/var/lib/docker/plugins/[pluginID]/rootfs`. For our particular plugin, the file that we need to replace is the `/project/index.js` file in the plugin's rootfs. By replacing that file with an updated version and restarting ( disabling and re-enabling ) the plugin, you can update the plugin without having to re-install it. 218 | 219 | #### Exec-ing Into the Plugin Container 220 | 221 | It may be useful during development to exec into the plugin container while it is running. You can find out how in the [Docker Documentation](https://docs.docker.com/engine/extend/#debugging-plugins). 222 | 223 | #### Test Case Development 224 | 225 | > **Note:** The tests have not be migrated from the LizardFS version of this plugin. The information in this section about tests is straight from the LizardFS version and hasn't been tested after porting the plugin. 226 | 227 | Writing new automated test cases for the plugin can also be difficult because of the time required for the test container to start. When writing new test cases for the plugin, it may be useful to start the container and interactively run the tests. If you make a mistake that causes a test to fail, even though the plugin *is* working, you can still edit and re-run the tests without having to restart the test container completely. 228 | 229 | Once you have built the test image using the `build-tests.sh` script, you need to run the test container as a daemon that you can exec into. We override the entrypoint of the container so that it won't run the test script as soon as it starts. We want it just to sit there and wait for us to run commands in it. 230 | 231 | $ docker run -it --rm -d --name lizardfs-test --privileged \ 232 | -v $(pwd)/plugin:/plugin \ 233 | -v $(pwd)/test/test-run.sh:/test-run.sh \ 234 | --entrypoint=sh \ 235 | lizardfs-volume-plugin_test 236 | 237 | > **Note:** We also mount our `test-run.sh` script into the container so that updates to the script are reflected immediately in the container. 238 | 239 | After the container is running we can shell into it and run the script that starts up Docker. 240 | 241 | $ docker exec -it lizardfs-test sh 242 | /project # /test-environment.sh 243 | 244 | This will start Docker, load the LizardFS image used for creating the test LizardFS environment, and install the plugin from the plugin directory. Once this is done you can run the tests. 245 | 246 | /project # sh /test-run.sh 247 | 248 | This will run through all of the tests. If the tests fail, you can still edit and re-run the `test-run.sh` script without having to re-install the plugin. 249 | 250 | When you are done writing your test cases, you can `exit` the shell and `docker stop lizardfs-test`. The container will be automatically removed after it stops. You should make sure that your tests still run correctly in a completely fresh environment by rebuilding and re-running the tests using the `build-tests.sh` and `run-tests.sh` scripts. 251 | -------------------------------------------------------------------------------- /build-tests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | pushd test 3 | docker build \ 4 | --build-arg http_proxy="$http_proxy" \ 5 | --build-arg https_proxy="$https_proxy" \ 6 | -t lizardfs-volume-plugin_test . 7 | popd 8 | -------------------------------------------------------------------------------- /config.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "A Docker volume plugin for SeaweedFS", 3 | "documentation": "https://docs.docker.com/engine/extend/plugins/", 4 | "workdir": "/project", 5 | "entrypoint": [ 6 | "node", 7 | "index.js" 8 | ], 9 | "env": [ 10 | { 11 | "name": "HOST", 12 | "settable": [ 13 | "value" 14 | ], 15 | "value": "localhost:8888" 16 | }, 17 | { 18 | "name": "ROOT_VOLUME_NAME", 19 | "settable": [ 20 | "value" 21 | ], 22 | "value": "" 23 | }, 24 | { 25 | "name": "MOUNT_OPTIONS", 26 | "settable": [ 27 | "value" 28 | ], 29 | "value": "" 30 | }, 31 | { 32 | "name": "REMOTE_PATH", 33 | "settable": [ 34 | "value" 35 | ], 36 | "value": "/docker/volumes" 37 | }, 38 | { 39 | "name": "LOG_LEVEL", 40 | "settable": [ 41 | "value" 42 | ], 43 | "value": "info" 44 | } 45 | ], 46 | "interface": { 47 | "socket": "seaweedfs.sock", 48 | "types": [ 49 | "docker.volumedriver/2.0" 50 | ] 51 | }, 52 | "linux": { 53 | "capabilities": [ 54 | "CAP_SYS_ADMIN" 55 | ], 56 | "devices": [ 57 | { 58 | "path": "/dev/fuse" 59 | } 60 | ] 61 | }, 62 | "network": { 63 | "type": "host" 64 | }, 65 | "propagatedMount": "/mnt/docker-volumes" 66 | } 67 | -------------------------------------------------------------------------------- /doc/ipo.md: -------------------------------------------------------------------------------- 1 | # IPO Outline 2 | 3 | This document outlines the basic Input-Process-Output flow of the volume plugin. 4 | 5 | ## Environment 6 | 7 | The SeaweedFS Docker plugin implements the [Docker Plugin API](https://docs.docker.com/engine/extend/plugin_api/). The Inputs to the program are requests made by the Docker daemon to the plugin. Request such as `Plugin.Activate`, and `VolumeDriver.Create`, will be sent by the Docker daemon to the the unix socket, `/run/docker/plugins/$PLUGIN_ID/seaweedfs.sock`, and the SeaweedFS Docker plugin will process the request, take the required actions, and respond with an appropriate response. 8 | 9 | ## Requests 10 | 11 | These are the requests that Docker will make to the plugin over the Unix socket. All requests will be HTTP POST requests and may contain a JSON payload. The plugin's response to the request should also be a JSON payload if applicable. Details about these requests can be found in the Docker documentation for the [Plugins API](https://docs.docker.com/engine/extend/plugin_api/) and the [Volume Plugin API](https://docs.docker.com/engine/extend/plugins_volume/#volumedrivercapabilities). 12 | 13 | ### /Plugin.Activate 14 | 15 | #### Input 16 | 17 | Empty payload. 18 | 19 | #### Process 20 | 21 | * Mount a subpath of the SeaweedFS filesystem specified by the `REMOTE_PATH` environment variable ( `/docker/volumes` by default) to `/mnt/seaweedfs`. This is where the docker volumes will be stored. The `/mnt/lizardfs` directory will be referred to as the "volume root" throughout this document. 22 | 23 | #### Output 24 | 25 | ```json 26 | { 27 | "Implements": ["VolumeDriver"] 28 | } 29 | ``` 30 | 31 | ### /VolumeDriver.Create 32 | 33 | #### Input 34 | 35 | ```json 36 | { 37 | "Name": "volume_name" 38 | } 39 | ``` 40 | 41 | #### Process 42 | 43 | * Create sub-directory of volume root with the given `Name`. For example, `/mnt/seaweedfs/volume_name`. 44 | 45 | #### Output 46 | 47 | Error message ( if one occurred ). 48 | 49 | ```json 50 | { 51 | "Err": "" 52 | } 53 | ``` 54 | 55 | ### /VolumeDriver.Remove 56 | 57 | #### Input 58 | 59 | ```json 60 | { 61 | "Name": "volume_name" 62 | } 63 | ``` 64 | 65 | #### Process 66 | 67 | * Delete the directory in the volume root with the given `Name`. For example, `/mnt/seaweedfs/volume_name`. 68 | 69 | #### Output 70 | 71 | Error message ( if one occurred ). 72 | 73 | ```json 74 | { 75 | "Err": "" 76 | } 77 | ``` 78 | 79 | ### /VolumeDriver.Mount 80 | 81 | #### Input 82 | 83 | ```json 84 | { 85 | "Name": "volume_name", 86 | "ID": "b87d7442095999a92b65b3d9691e697b61713829cc0ffd1bb72e4ccd51aa4d6c" 87 | } 88 | ``` 89 | 90 | #### Process 91 | 92 | * Create a directory outside of the SeaweedFS root mountpoint using the given `Name`, such as `/mnt/docker-volumes/volume_name`. 93 | * Mount the subpath of the SeaweedFS filesystem ( for example, `/docker/volumes/volume_name` ) to the newly created mountpoint. 94 | * Add the `ID` to the list of containers that have mounted `Name` in the `mounted_volumes` Javascript object. This variable is used to keep track of which containers have mounted the volume. 95 | 96 | #### Output 97 | 98 | We need to tell Docker where we mounted the volume or give an error message if there was a problem. 99 | 100 | ```json 101 | { 102 | "Mountpoint": "/mnt/docker-volumes/volume_name", 103 | "Err": "" 104 | } 105 | ``` 106 | 107 | ### /VolumeDriver.Path 108 | 109 | #### Input 110 | 111 | ```json 112 | { 113 | "Name": "volume_name" 114 | } 115 | ``` 116 | 117 | #### Process 118 | 119 | * Determine the path at which the volume is mounted based on the `Name`. 120 | 121 | #### Output 122 | 123 | Error message ( if one occurred ). 124 | 125 | ```json 126 | { 127 | "Mountpoint": "/mnt/docker-volumes/volume_name", 128 | "Err": "" 129 | } 130 | ``` 131 | 132 | ### /VolumeDriver.Unmount 133 | 134 | #### Input 135 | 136 | ```json 137 | { 138 | "Name": "volume_name", 139 | "ID": "b87d7442095999a92b65b3d9691e697b61713829cc0ffd1bb72e4ccd51aa4d6c" 140 | } 141 | ``` 142 | 143 | #### Process 144 | 145 | * Remove the `ID` from the list of containers that have mounted `Name` in `mounted_volumes` Javascript variable. 146 | * If there are no containers in the list anymore, unmount the `/mnt/docker-volumes/volume_name` because it no longer needs to be mounted. 147 | 148 | #### Output 149 | 150 | Error message ( if one occurred ). 151 | 152 | ```json 153 | { 154 | "Err": "" 155 | } 156 | ``` 157 | 158 | ### /VolumeDriver.Get 159 | 160 | #### Input 161 | 162 | ```json 163 | { 164 | "Name": "volume_name" 165 | } 166 | ``` 167 | 168 | #### Process 169 | 170 | * Make sure the volume exists: check that the directory of the name `volume_name` exists and that the process has read-write access. 171 | * If the volume is mounted, return the mountpoint as well as the name. 172 | 173 | #### Output 174 | 175 | Return the volume name 176 | 177 | ```json 178 | { 179 | "Volume": { 180 | "Name": "volume_name", 181 | "Mountpoint": "/mnt/docker-volumes/volume_name", 182 | }, 183 | "Err": "Error if directory doesn't exist or we don't have read-write access to it." 184 | } 185 | ``` 186 | 187 | ### /VolumeDriver.List 188 | 189 | #### Input 190 | 191 | ```json 192 | {} 193 | ``` 194 | 195 | #### Process 196 | 197 | * Get a list of the directories in the volume root: `/mnt/seaweedfss/`. 198 | * If the volume is mounted on the host, provide the `Mountpoint`. 199 | 200 | #### Output 201 | 202 | Error message ( if one occurred ). 203 | 204 | ```json 205 | { 206 | "Volumes": [ 207 | { 208 | "Name": "volume_name", 209 | "Mountpoint": "/mnt/docker-volumes/volume_name" 210 | } 211 | ], 212 | "Err": "" 213 | } 214 | ``` 215 | 216 | ### /VolumeDriver.Capabilities 217 | 218 | #### Input 219 | 220 | ```json 221 | {} 222 | ``` 223 | 224 | #### Process 225 | 226 | Not applicable. 227 | 228 | #### Output 229 | 230 | ```json 231 | { 232 | "Capabilities": { 233 | "Scope": "global" 234 | } 235 | } 236 | ``` 237 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | # 2 | # WIP docker-compose file for running SeaweedFS cluster 3 | # 4 | # For quick testing of the plugin it is easier to start weed like this: 5 | # `weed server -filer=true` 6 | # 7 | version: "3.3" 8 | 9 | services: 10 | master: 11 | networks: 12 | - hostnet 13 | image: chrislusf/seaweedfs:latest 14 | ports: 15 | # - 9333:9333 16 | # - 19333:19333 17 | command: "master" 18 | 19 | volume: 20 | image: chrislusf/seaweedfs:latest 21 | networks: 22 | - hostnet 23 | ports: 24 | # - 8080:8080 25 | # - 18080:18080 26 | command: 'volume -max=15 -mserver="master:9333" -port=8080' 27 | 28 | filer: 29 | image: chrislusf/seaweedfs:latest 30 | networks: 31 | - hostnet 32 | ports: 33 | # - 8888:8888 34 | # - 18888:18888 35 | command: 'filer -master="master:9333"' 36 | tty: true 37 | stdin_open: true 38 | 39 | s3: 40 | image: chrislusf/seaweedfs:latest 41 | networks: 42 | - hostnet 43 | ports: 44 | # - 8333:8333 45 | command: 's3 -filer="filer:8888"' 46 | 47 | networks: 48 | hostnet: 49 | external: true 50 | name: host 51 | -------------------------------------------------------------------------------- /index.js: -------------------------------------------------------------------------------- 1 | // 2 | // Imports 3 | // 4 | const fs = require('fs-extra') 5 | const ls = require('ls') 6 | const path = require('path') 7 | const { execFileSync, spawn } = require('child_process') 8 | 9 | const http = require('http') 10 | const terminus = require('@godaddy/terminus') 11 | const express = require('express') 12 | 13 | // 14 | // Globals 15 | // 16 | 17 | // Path on remote SeaweedFS filesystem that will be used for volume storage 18 | const remote_path = process.env['REMOTE_PATH'] 19 | // Used when not running as a Docker plugin to set the driver alias 20 | var plugin_alias = process.env['ALIAS'] 21 | if (plugin_alias == undefined || plugin_alias == '') { 22 | plugin_alias = 'seaweedfs' 23 | } 24 | // The name of the "root" volume ( if specified ) 25 | const root_volume_name = process.env['ROOT_VOLUME_NAME'] 26 | // Mountpoint for remote SeaweedFS filesystem 27 | const volume_root = '/mnt/seaweedfs' 28 | // Directory to mount volumes to inside the container 29 | const container_volume_path = '/mnt/docker-volumes' 30 | // Address that the webserver will listen on 31 | const bind_address = `/run/docker/plugins/${plugin_alias}.sock` 32 | 33 | // The directory that volumes are mounted to on the host system 34 | var host_volume_path = process.env['LOCAL_PATH'] 35 | 36 | // If the `host_volume_basedir` is not set by the user, assume that API server 37 | // running as a Docker plugin and that the host volume path is handled by Docker 38 | // under the propagated mount: /mnt/docker-volumes. 39 | if (host_volume_path == undefined || host_volume_path == '') { 40 | host_volume_path = container_volume_path 41 | } 42 | 43 | // Options to the `mfsmount` command 44 | var mount_options = [] 45 | if (process.env['MOUNT_OPTIONS'].length != 0) { 46 | mount_options = process.env['MOUNT_OPTIONS'].split(' ') 47 | } 48 | 49 | /* 50 | * Used to keep track of which volumes are in use by containers. For example: 51 | * { 52 | * "volume_name": [ 53 | * "mount_id1", 54 | * "mount_id2" 55 | * ] 56 | * } 57 | */ 58 | var mounted_volumes = {} 59 | 60 | // Records whether or not we have mounted the SeaweedFS volume root 61 | var has_mounted_volume_root = false 62 | 63 | // 64 | // Logging 65 | // 66 | 67 | const log = require('loglevel-message-prefix')(require('loglevel'), { 68 | prefixes: ['level'], 69 | }) 70 | 71 | // Log level set by plugin config 72 | log.setLevel(process.env['LOG_LEVEL']) 73 | 74 | log.info('Starting up SeaweedFS volume plugin') 75 | 76 | // 77 | // Express webserver and middleware 78 | // 79 | 80 | var app = express() 81 | // JSON body parser 82 | app.use(express.json({type: () => true})) 83 | 84 | // Plugin activation 85 | app.use(function (req, res, next) { 86 | log.debug(container_volume_path) 87 | log.debug(host_volume_path) 88 | // If this is an activation request 89 | if (req.method == 'POST' && req.path == '/Plugin.Activate') { 90 | log.debug('/Plugin.Activate') 91 | res.json({ 92 | Implements: ['VolumeDriver'] 93 | }) 94 | return 95 | } else { 96 | next() 97 | } 98 | }) 99 | 100 | /* 101 | * Custom middleware that makes sure the SeaweedFS remote filesystem is mounted 102 | * before any other plugin functions are executed. 103 | */ 104 | app.use(function (req, res, next) { 105 | // If we haven't mounted the SeaweedFS remote 106 | if (has_mounted_volume_root == false) { 107 | log.info('Mounting SeaweedFS remote path') 108 | 109 | try { 110 | // Mount SeaweedFS remote path 111 | const proc = spawn( 112 | 'weed', 113 | [ 114 | 'mount', 115 | `-dir=${volume_root}`, 116 | `-filer=${process.env['HOST']}`, 117 | `-filer.path=${remote_path}`, 118 | ...mount_options 119 | ] 120 | ) 121 | 122 | proc.stdout.on('data', (data) => { 123 | log.info(data.toString()); 124 | }); 125 | 126 | proc.stderr.on('data', (data) => { 127 | log.error(data.toString()); 128 | }); 129 | 130 | // Success 131 | has_mounted_volume_root = true 132 | 133 | // Pass traffic on to the next handler 134 | next() 135 | 136 | } catch (err) { 137 | // Failure 138 | res.json({ 139 | Err: err.toString() 140 | }) 141 | return 142 | } 143 | 144 | // If we have already mounted SeaweedFS remote 145 | } else { 146 | // Nothing to do, pass traffic to the next handler 147 | next() 148 | } 149 | }) 150 | 151 | // 152 | // Helper Functions 153 | // 154 | 155 | /* 156 | * Determine whether or not a volume is mounted by a container based on our 157 | * `mounted_volumes` object. 158 | */ 159 | function volume_is_mounted(volume_name) { 160 | if (mounted_volumes[volume_name] != undefined && 161 | mounted_volumes[volume_name].length != 0) { 162 | return true 163 | } else { 164 | return false 165 | } 166 | } 167 | 168 | // 169 | // Implement the Docker volume plugin API 170 | // 171 | 172 | app.post('/VolumeDriver.Create', function (req, res) { 173 | var volume_name = req.body.Name 174 | var volume_path = path.join(volume_root, volume_name) 175 | 176 | log.info(`/VolumeDriver.Create: ${volume_name}`) 177 | 178 | if (volume_name == root_volume_name) { 179 | // You cannot create a volume with the same name as the root volume. 180 | log.warn("Tried to create a volume with same name as root volume. Ignoring request.") 181 | 182 | // Return without doing anything. 183 | res.json({}) 184 | return 185 | } 186 | 187 | try { 188 | // Create volume on SeaweedFS filesystem 189 | fs.ensureDirSync(volume_path) 190 | 191 | // Success 192 | res.json({}) 193 | return 194 | 195 | } catch (err) { 196 | // Failure 197 | res.json({ 198 | Err: err.toString() 199 | }) 200 | return 201 | } 202 | }) 203 | 204 | app.post('/VolumeDriver.Remove', function (req, res) { 205 | var volume_name = req.body.Name 206 | var volume_path = path.join(volume_root, volume_name) 207 | 208 | log.info(`/VolumeDriver.Remove: ${volume_name}`) 209 | 210 | if (volume_name == root_volume_name) { 211 | // You cannot delete the root volume. 212 | // Return an error. 213 | res.json({ 214 | Err: 'You cannot delete the SeaweedFS root volume.' 215 | }) 216 | return 217 | } 218 | 219 | try{ 220 | // Remove volume on SeaweedFS filesystem 221 | fs.removeSync(volume_path) 222 | 223 | // Success 224 | res.json({}) 225 | return 226 | 227 | } catch (err) { 228 | // Failure 229 | res.json({ 230 | Err: err.toString() 231 | }) 232 | return 233 | } 234 | 235 | }) 236 | 237 | app.post('/VolumeDriver.Mount', function (req, res) { 238 | var volume_name = req.body.Name 239 | var mount_id = req.body.ID 240 | var container_mountpoint = path.join(container_volume_path, volume_name) 241 | var host_mountpoint = path.join(host_volume_path, volume_name) 242 | 243 | log.debug(`/VolumeDriver.Mount: ${volume_name}`) 244 | log.debug(` Mount ID: ${mount_id}`) 245 | 246 | // If the volume is already mounted 247 | if (volume_is_mounted(volume_name)) { 248 | // Add the container to the list of containers that have mounted this volume 249 | mounted_volumes[volume_name].push(mount_id) 250 | 251 | // Return the mountpoint 252 | res.json({ 253 | Mountpoint: host_mountpoint 254 | }) 255 | return 256 | 257 | // If the volume has not been mounted yet 258 | } else { 259 | try { 260 | // Create volume mountpoint 261 | fs.ensureDirSync(container_mountpoint) 262 | 263 | var mount_remote_path = "" 264 | // If we are mounting the root volume 265 | if (volume_name == root_volume_name) { 266 | // We mount the directory containing *all* of the volumes 267 | mount_remote_path = remote_path 268 | } else { 269 | // We mount the specified volume 270 | mount_remote_path = path.join(remote_path, volume_name) 271 | } 272 | 273 | // Mount volume 274 | const proc = spawn( 275 | 'weed', 276 | [ 277 | 'mount', 278 | `-dir=${container_mountpoint}`, 279 | `-filer=${process.env['HOST']}`, 280 | `-filer.path=${mount_remote_path}`, 281 | ...mount_options 282 | ] 283 | ) 284 | 285 | proc.stdout.on('data', (data) => { 286 | log.info(data.toString()); 287 | }); 288 | 289 | proc.stderr.on('data', (data) => { 290 | log.error(data.toString()); 291 | }); 292 | 293 | // Start a list of containers that have mounted this volume 294 | mounted_volumes[volume_name] = [mount_id] 295 | 296 | // Success: Return the mountpoint 297 | res.json({ 298 | Mountpoint: host_mountpoint 299 | }) 300 | return 301 | 302 | } catch (err) { 303 | // Failure 304 | res.json({ 305 | Err: err.toString() 306 | }) 307 | return 308 | } 309 | } 310 | }) 311 | 312 | app.post('/VolumeDriver.Path', function (req, res) { 313 | var volume_name = req.body.Name 314 | var host_mountpoint = path.join(host_volume_path, volume_name) 315 | 316 | log.debug(`/VolumeDriver.Path: ${volume_name}`) 317 | 318 | // If the volume is mounted 319 | if (volume_is_mounted(volume_name)) { 320 | // Return the Mountpoint 321 | res.json({ 322 | Mountpoint: host_mountpoint 323 | }) 324 | return 325 | 326 | } else { 327 | // Nothing to return 328 | res.json({}) 329 | return 330 | } 331 | }) 332 | 333 | app.post('/VolumeDriver.Unmount', function (req, res) { 334 | var volume_name = req.body.Name 335 | var mount_id = req.body.ID 336 | var container_mountpoint = path.join(container_volume_path, volume_name) 337 | 338 | log.debug(`/VolumeDriver.Unmount: ${volume_name}`) 339 | 340 | // Remove this from the list of mounted volumes 341 | mounted_volumes[volume_name].pop(mount_id) 342 | 343 | // If there are no longer any containers that are mounting this volume 344 | if (mounted_volumes[volume_name].length == 0) { 345 | try { 346 | // Unmount the volume 347 | execFileSync('umount', [container_mountpoint]) 348 | 349 | // Success 350 | res.json({}) 351 | return 352 | 353 | } catch (err) { 354 | // Failure 355 | res.json({ 356 | Err: err.toString() 357 | }) 358 | return 359 | } 360 | 361 | } else { 362 | // Success 363 | res.json({}) 364 | return 365 | } 366 | }) 367 | 368 | app.post('/VolumeDriver.Get', function (req, res) { 369 | var volume_name = req.body.Name 370 | var host_mountpoint = path.join(host_volume_path, volume_name) 371 | 372 | log.debug(`/VolumeDriver.Get: ${volume_name}`) 373 | 374 | // If the volume is the root volume 375 | if (volume_name == root_volume_name) { 376 | // If the root volume is mounted 377 | if (volume_is_mounted(root_volume_name)) { 378 | // Return the volume name and the mountpoint 379 | res.json({ 380 | Volume: { 381 | Name: root_volume_name, 382 | Mountpoint: host_mountpoint 383 | } 384 | }) 385 | return 386 | 387 | // If the root volume is not mounted 388 | } else { 389 | // Return the volume name 390 | res.json({ 391 | Volume: { 392 | Name: root_volume_name 393 | } 394 | }) 395 | return 396 | } 397 | } 398 | 399 | try { 400 | // Check directory access on SeaweedFS directory 401 | fs.accessSync(path.join(volume_root, req.body.Name), 402 | fs.constants.R_OK | fs.constants.W_OK) 403 | 404 | log.debug(`Found Volume: ${volume_name}`) 405 | 406 | // If the volume is mounted 407 | if (volume_is_mounted(volume_name)) { 408 | // Return volume name and mountpoint 409 | res.json({ 410 | Volume: { 411 | Name: volume_name, 412 | Mountpoint: host_mountpoint 413 | } 414 | }) 415 | return 416 | 417 | // If volume is not mounted 418 | } else { 419 | // Return volume name 420 | res.json({ 421 | Volume: { 422 | Name: volume_name 423 | } 424 | }) 425 | return 426 | } 427 | 428 | } catch (err) { 429 | // Failure 430 | log.warn(`Cannot Access Volume: ${volume_name}`) 431 | 432 | res.json({ 433 | Err: err.toString() 434 | }) 435 | return 436 | } 437 | }) 438 | 439 | app.post('/VolumeDriver.List', function (req, res) { 440 | var volumes = [] 441 | 442 | log.debug('/VolumeDriver.List') 443 | 444 | // If the root volume name has been specified 445 | if (root_volume_name != "") { 446 | // If the root volume has been mounted 447 | if (volume_is_mounted(root_volume_name)) { 448 | // Add the volume name and mountpoint 449 | volumes.push({ 450 | Name: root_volume_name, 451 | Mountpoint: path.join(host_volume_path, root_volume_name) 452 | }) 453 | 454 | // If the root volume has not been mounted 455 | } else { 456 | // Add the volume name 457 | volumes.push({ 458 | Name: root_volume_name 459 | }) 460 | } 461 | } 462 | 463 | // For every file or folder in the volume root directory 464 | for (var file of ls(volume_root + "/*")) { 465 | // If it is a directory 466 | if (file.stat.isDirectory()) { 467 | // If the directory has the same name as the root volume 468 | if (file.name == root_volume_name) { 469 | // Skip this volume, the root volume takes precedence 470 | log.warn('Found volume with same name as root volume: ' + 471 | `'${root_volume_name}' Skipping volume, root volume takes precedence.`) 472 | continue 473 | } 474 | 475 | // If the volume is mounted 476 | if (volume_is_mounted(file.name)) { 477 | // Add the volume name and mountpoint 478 | volumes.push({ 479 | Name: file.name, 480 | Mountpoint: path.join(host_volume_path, file.name) 481 | }) 482 | 483 | // If the volume is not mounted 484 | } else { 485 | // Add the volume name 486 | volumes.push({ 487 | Name: file.name 488 | }) 489 | } 490 | } 491 | } 492 | 493 | // Return the volume list 494 | res.json({ 495 | Volumes: volumes 496 | }) 497 | return 498 | }) 499 | 500 | app.post('/VolumeDriver.Capabilities', function (req, res) { 501 | log.debug('/VolumeDriver.Capabilities') 502 | res.json({ 503 | Capabilities: { 504 | Scope: 'global' 505 | } 506 | }) 507 | return 508 | }) 509 | 510 | // 511 | // Shutdown sequence 512 | // 513 | 514 | function onSignal() { 515 | log.info('Termination signal detected, shutting down') 516 | 517 | // For each volume 518 | for (volume_name in mounted_volumes) { 519 | // If the volume is mounted 520 | if (volume_is_mounted(volume_name)) { 521 | try { 522 | log.debug(`Unmounting volume: ${volume_name}`) 523 | 524 | // Unmount the volume 525 | execFileSync('umount', [path.join(container_volume_path, volume_name)]) 526 | 527 | } catch (err) { 528 | // Failure 529 | log.warn(`Couldn't unmount volume: ${volume_name}: ${err.toString()}`) 530 | } 531 | } 532 | } 533 | 534 | // Unmount volume root 535 | if (has_mounted_volume_root) { 536 | try { 537 | log.debug(`Unmounting volume root: ${volume_root}`) 538 | 539 | // Unmount volume root 540 | execFileSync('umount', [volume_root]) 541 | 542 | } catch (err) { 543 | // Failure 544 | log.warn(`Couldn't unmount volume root '${volume_root}': ${err.toString()}`) 545 | } 546 | } 547 | } 548 | 549 | // 550 | // Start Server 551 | // 552 | 553 | log.info(`Starting plugin API server at ${bind_address}`) 554 | 555 | // Start webserver using terminus for lifecycle management 556 | terminus(http.createServer(app), { 557 | logger: log.error, 558 | onSignal, 559 | onShutdown: () => { 560 | log.info("Server shutdown complete") 561 | } 562 | }).listen(bind_address) 563 | -------------------------------------------------------------------------------- /package-lock.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "docker-plugin_seaweedfs", 3 | "version": "1.0.0", 4 | "lockfileVersion": 1, 5 | "requires": true, 6 | "dependencies": { 7 | "@godaddy/terminus": { 8 | "version": "2.2.0", 9 | "resolved": "https://registry.npmjs.org/@godaddy/terminus/-/terminus-2.2.0.tgz", 10 | "integrity": "sha512-olNnWo38hBYEGGGUp1YLpZSNpyjk8iywrF5KpWNcVcywy4CeD+sO/TwO/kEfRw75g6oRw1TmN8tDaL8r4kcY0w==", 11 | "requires": { 12 | "es6-promisify": "5.0.0", 13 | "stoppable": "1.0.6" 14 | } 15 | }, 16 | "accepts": { 17 | "version": "1.3.5", 18 | "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.5.tgz", 19 | "integrity": "sha1-63d99gEXI6OxTopywIBcjoZ0a9I=", 20 | "requires": { 21 | "mime-types": "2.1.18", 22 | "negotiator": "0.6.1" 23 | } 24 | }, 25 | "array-flatten": { 26 | "version": "1.1.1", 27 | "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", 28 | "integrity": "sha1-ml9pkFGx5wczKPKgCJaLZOopVdI=" 29 | }, 30 | "balanced-match": { 31 | "version": "1.0.0", 32 | "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.0.tgz", 33 | "integrity": "sha1-ibTRmasr7kneFk6gK4nORi1xt2c=" 34 | }, 35 | "body-parser": { 36 | "version": "1.18.2", 37 | "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.18.2.tgz", 38 | "integrity": "sha1-h2eKGdhLR9hZuDGZvVm84iKxBFQ=", 39 | "requires": { 40 | "bytes": "3.0.0", 41 | "content-type": "1.0.4", 42 | "debug": "2.6.9", 43 | "depd": "1.1.2", 44 | "http-errors": "1.6.3", 45 | "iconv-lite": "0.4.19", 46 | "on-finished": "2.3.0", 47 | "qs": "6.5.1", 48 | "raw-body": "2.3.2", 49 | "type-is": "1.6.16" 50 | } 51 | }, 52 | "brace-expansion": { 53 | "version": "1.1.11", 54 | "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", 55 | "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", 56 | "requires": { 57 | "balanced-match": "1.0.0", 58 | "concat-map": "0.0.1" 59 | } 60 | }, 61 | "bytes": { 62 | "version": "3.0.0", 63 | "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz", 64 | "integrity": "sha1-0ygVQE1olpn4Wk6k+odV3ROpYEg=" 65 | }, 66 | "concat-map": { 67 | "version": "0.0.1", 68 | "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", 69 | "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=" 70 | }, 71 | "content-disposition": { 72 | "version": "0.5.2", 73 | "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.2.tgz", 74 | "integrity": "sha1-DPaLud318r55YcOoUXjLhdunjLQ=" 75 | }, 76 | "content-type": { 77 | "version": "1.0.4", 78 | "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.4.tgz", 79 | "integrity": "sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA==" 80 | }, 81 | "cookie": { 82 | "version": "0.3.1", 83 | "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.3.1.tgz", 84 | "integrity": "sha1-5+Ch+e9DtMi6klxcWpboBtFoc7s=" 85 | }, 86 | "cookie-signature": { 87 | "version": "1.0.6", 88 | "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz", 89 | "integrity": "sha1-4wOogrNCzD7oylE6eZmXNNqzriw=" 90 | }, 91 | "debug": { 92 | "version": "2.6.9", 93 | "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", 94 | "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", 95 | "requires": { 96 | "ms": "2.0.0" 97 | } 98 | }, 99 | "depd": { 100 | "version": "1.1.2", 101 | "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz", 102 | "integrity": "sha1-m81S4UwJd2PnSbJ0xDRu0uVgtak=" 103 | }, 104 | "destroy": { 105 | "version": "1.0.4", 106 | "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.0.4.tgz", 107 | "integrity": "sha1-l4hXRCxEdJ5CBmE+N5RiBYJqvYA=" 108 | }, 109 | "ee-first": { 110 | "version": "1.1.1", 111 | "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", 112 | "integrity": "sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0=" 113 | }, 114 | "encodeurl": { 115 | "version": "1.0.2", 116 | "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", 117 | "integrity": "sha1-rT/0yG7C0CkyL1oCw6mmBslbP1k=" 118 | }, 119 | "es6-object-assign": { 120 | "version": "1.1.0", 121 | "resolved": "https://registry.npmjs.org/es6-object-assign/-/es6-object-assign-1.1.0.tgz", 122 | "integrity": "sha1-wsNYJlYkfDnqEHyx5mUrb58kUjw=" 123 | }, 124 | "es6-polyfills": { 125 | "version": "2.0.0", 126 | "resolved": "https://registry.npmjs.org/es6-polyfills/-/es6-polyfills-2.0.0.tgz", 127 | "integrity": "sha1-fzWP04jYyIjQDPyaHuqJ+XFoOTE=", 128 | "requires": { 129 | "es6-object-assign": "1.1.0", 130 | "es6-promise-polyfill": "1.2.0" 131 | } 132 | }, 133 | "es6-promise": { 134 | "version": "4.2.4", 135 | "resolved": "https://registry.npmjs.org/es6-promise/-/es6-promise-4.2.4.tgz", 136 | "integrity": "sha512-/NdNZVJg+uZgtm9eS3O6lrOLYmQag2DjdEXuPaHlZ6RuVqgqaVZfgYCepEIKsLqwdQArOPtC3XzRLqGGfT8KQQ==" 137 | }, 138 | "es6-promise-polyfill": { 139 | "version": "1.2.0", 140 | "resolved": "https://registry.npmjs.org/es6-promise-polyfill/-/es6-promise-polyfill-1.2.0.tgz", 141 | "integrity": "sha1-84kl8jyz4+jObNqP93T867sJDN4=" 142 | }, 143 | "es6-promisify": { 144 | "version": "5.0.0", 145 | "resolved": "https://registry.npmjs.org/es6-promisify/-/es6-promisify-5.0.0.tgz", 146 | "integrity": "sha1-UQnWLz5W6pZ8S2NQWu8IKRyKUgM=", 147 | "requires": { 148 | "es6-promise": "4.2.4" 149 | } 150 | }, 151 | "escape-html": { 152 | "version": "1.0.3", 153 | "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", 154 | "integrity": "sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg=" 155 | }, 156 | "etag": { 157 | "version": "1.8.1", 158 | "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", 159 | "integrity": "sha1-Qa4u62XvpiJorr/qg6x9eSmbCIc=" 160 | }, 161 | "express": { 162 | "version": "4.16.3", 163 | "resolved": "https://registry.npmjs.org/express/-/express-4.16.3.tgz", 164 | "integrity": "sha1-avilAjUNsyRuzEvs9rWjTSL37VM=", 165 | "requires": { 166 | "accepts": "1.3.5", 167 | "array-flatten": "1.1.1", 168 | "body-parser": "1.18.2", 169 | "content-disposition": "0.5.2", 170 | "content-type": "1.0.4", 171 | "cookie": "0.3.1", 172 | "cookie-signature": "1.0.6", 173 | "debug": "2.6.9", 174 | "depd": "1.1.2", 175 | "encodeurl": "1.0.2", 176 | "escape-html": "1.0.3", 177 | "etag": "1.8.1", 178 | "finalhandler": "1.1.1", 179 | "fresh": "0.5.2", 180 | "merge-descriptors": "1.0.1", 181 | "methods": "1.1.2", 182 | "on-finished": "2.3.0", 183 | "parseurl": "1.3.2", 184 | "path-to-regexp": "0.1.7", 185 | "proxy-addr": "2.0.3", 186 | "qs": "6.5.1", 187 | "range-parser": "1.2.0", 188 | "safe-buffer": "5.1.1", 189 | "send": "0.16.2", 190 | "serve-static": "1.13.2", 191 | "setprototypeof": "1.1.0", 192 | "statuses": "1.4.0", 193 | "type-is": "1.6.16", 194 | "utils-merge": "1.0.1", 195 | "vary": "1.1.2" 196 | } 197 | }, 198 | "finalhandler": { 199 | "version": "1.1.1", 200 | "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.1.1.tgz", 201 | "integrity": "sha512-Y1GUDo39ez4aHAw7MysnUD5JzYX+WaIj8I57kO3aEPT1fFRL4sr7mjei97FgnwhAyyzRYmQZaTHb2+9uZ1dPtg==", 202 | "requires": { 203 | "debug": "2.6.9", 204 | "encodeurl": "1.0.2", 205 | "escape-html": "1.0.3", 206 | "on-finished": "2.3.0", 207 | "parseurl": "1.3.2", 208 | "statuses": "1.4.0", 209 | "unpipe": "1.0.0" 210 | } 211 | }, 212 | "forwarded": { 213 | "version": "0.1.2", 214 | "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.1.2.tgz", 215 | "integrity": "sha1-mMI9qxF1ZXuMBXPozszZGw/xjIQ=" 216 | }, 217 | "fresh": { 218 | "version": "0.5.2", 219 | "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", 220 | "integrity": "sha1-PYyt2Q2XZWn6g1qx+OSyOhBWBac=" 221 | }, 222 | "fs-extra": { 223 | "version": "6.0.0", 224 | "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-6.0.0.tgz", 225 | "integrity": "sha512-lk2cUCo8QzbiEWEbt7Cw3m27WMiRG321xsssbcIpfMhpRjrlC08WBOVQqj1/nQYYNnPtyIhP1oqLO3QwT2tPCw==", 226 | "requires": { 227 | "graceful-fs": "4.1.11", 228 | "jsonfile": "4.0.0", 229 | "universalify": "0.1.1" 230 | } 231 | }, 232 | "fs.realpath": { 233 | "version": "1.0.0", 234 | "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", 235 | "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=" 236 | }, 237 | "glob": { 238 | "version": "7.0.5", 239 | "resolved": "https://registry.npmjs.org/glob/-/glob-7.0.5.tgz", 240 | "integrity": "sha1-tCAqaQmbu00pKnwblbZoK2fr3JU=", 241 | "requires": { 242 | "fs.realpath": "1.0.0", 243 | "inflight": "1.0.6", 244 | "inherits": "2.0.3", 245 | "minimatch": "3.0.4", 246 | "once": "1.4.0", 247 | "path-is-absolute": "1.0.1" 248 | } 249 | }, 250 | "graceful-fs": { 251 | "version": "4.1.11", 252 | "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.1.11.tgz", 253 | "integrity": "sha1-Dovf5NHduIVNZOBOp8AOKgJuVlg=" 254 | }, 255 | "http-errors": { 256 | "version": "1.6.3", 257 | "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz", 258 | "integrity": "sha1-i1VoC7S+KDoLW/TqLjhYC+HZMg0=", 259 | "requires": { 260 | "depd": "1.1.2", 261 | "inherits": "2.0.3", 262 | "setprototypeof": "1.1.0", 263 | "statuses": "1.4.0" 264 | } 265 | }, 266 | "iconv-lite": { 267 | "version": "0.4.19", 268 | "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.19.tgz", 269 | "integrity": "sha512-oTZqweIP51xaGPI4uPa56/Pri/480R+mo7SeU+YETByQNhDG55ycFyNLIgta9vXhILrxXDmF7ZGhqZIcuN0gJQ==" 270 | }, 271 | "inflight": { 272 | "version": "1.0.6", 273 | "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", 274 | "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", 275 | "requires": { 276 | "once": "1.4.0", 277 | "wrappy": "1.0.2" 278 | } 279 | }, 280 | "inherits": { 281 | "version": "2.0.3", 282 | "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", 283 | "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=" 284 | }, 285 | "ipaddr.js": { 286 | "version": "1.6.0", 287 | "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.6.0.tgz", 288 | "integrity": "sha1-4/o1e3c9phnybpXwSdBVxyeW+Gs=" 289 | }, 290 | "jsonfile": { 291 | "version": "4.0.0", 292 | "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz", 293 | "integrity": "sha1-h3Gq4HmbZAdrdmQPygWPnBDjPss=", 294 | "requires": { 295 | "graceful-fs": "4.1.11" 296 | } 297 | }, 298 | "loglevel": { 299 | "version": "1.6.1", 300 | "resolved": "https://registry.npmjs.org/loglevel/-/loglevel-1.6.1.tgz", 301 | "integrity": "sha1-4PyVEztu8nbNyIh82vJKpvFW+Po=" 302 | }, 303 | "loglevel-message-prefix": { 304 | "version": "3.0.0", 305 | "resolved": "https://registry.npmjs.org/loglevel-message-prefix/-/loglevel-message-prefix-3.0.0.tgz", 306 | "integrity": "sha1-ER/bltlPlh2PyLiqv7ZrBqw+dq0=", 307 | "requires": { 308 | "es6-polyfills": "2.0.0", 309 | "loglevel": "1.6.1" 310 | } 311 | }, 312 | "ls": { 313 | "version": "0.2.1", 314 | "resolved": "https://registry.npmjs.org/ls/-/ls-0.2.1.tgz", 315 | "integrity": "sha1-DZbMhwYAgG+ua9iSl9xcZkVMv3E=", 316 | "requires": { 317 | "glob": "7.0.5" 318 | } 319 | }, 320 | "media-typer": { 321 | "version": "0.3.0", 322 | "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", 323 | "integrity": "sha1-hxDXrwqmJvj/+hzgAWhUUmMlV0g=" 324 | }, 325 | "merge-descriptors": { 326 | "version": "1.0.1", 327 | "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz", 328 | "integrity": "sha1-sAqqVW3YtEVoFQ7J0blT8/kMu2E=" 329 | }, 330 | "methods": { 331 | "version": "1.1.2", 332 | "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", 333 | "integrity": "sha1-VSmk1nZUE07cxSZmVoNbD4Ua/O4=" 334 | }, 335 | "mime": { 336 | "version": "1.4.1", 337 | "resolved": "https://registry.npmjs.org/mime/-/mime-1.4.1.tgz", 338 | "integrity": "sha512-KI1+qOZu5DcW6wayYHSzR/tXKCDC5Om4s1z2QJjDULzLcmf3DvzS7oluY4HCTrc+9FiKmWUgeNLg7W3uIQvxtQ==" 339 | }, 340 | "mime-db": { 341 | "version": "1.33.0", 342 | "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.33.0.tgz", 343 | "integrity": "sha512-BHJ/EKruNIqJf/QahvxwQZXKygOQ256myeN/Ew+THcAa5q+PjyTTMMeNQC4DZw5AwfvelsUrA6B67NKMqXDbzQ==" 344 | }, 345 | "mime-types": { 346 | "version": "2.1.18", 347 | "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.18.tgz", 348 | "integrity": "sha512-lc/aahn+t4/SWV/qcmumYjymLsWfN3ELhpmVuUFjgsORruuZPVSwAQryq+HHGvO/SI2KVX26bx+En+zhM8g8hQ==", 349 | "requires": { 350 | "mime-db": "1.33.0" 351 | } 352 | }, 353 | "minimatch": { 354 | "version": "3.0.4", 355 | "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", 356 | "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", 357 | "requires": { 358 | "brace-expansion": "1.1.11" 359 | } 360 | }, 361 | "ms": { 362 | "version": "2.0.0", 363 | "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", 364 | "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" 365 | }, 366 | "negotiator": { 367 | "version": "0.6.1", 368 | "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.1.tgz", 369 | "integrity": "sha1-KzJxhOiZIQEXeyhWP7XnECrNDKk=" 370 | }, 371 | "on-finished": { 372 | "version": "2.3.0", 373 | "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz", 374 | "integrity": "sha1-IPEzZIGwg811M3mSoWlxqi2QaUc=", 375 | "requires": { 376 | "ee-first": "1.1.1" 377 | } 378 | }, 379 | "once": { 380 | "version": "1.4.0", 381 | "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", 382 | "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", 383 | "requires": { 384 | "wrappy": "1.0.2" 385 | } 386 | }, 387 | "parseurl": { 388 | "version": "1.3.2", 389 | "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.2.tgz", 390 | "integrity": "sha1-/CidTtiZMRlGDBViUyYs3I3mW/M=" 391 | }, 392 | "path-is-absolute": { 393 | "version": "1.0.1", 394 | "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", 395 | "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=" 396 | }, 397 | "path-to-regexp": { 398 | "version": "0.1.7", 399 | "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz", 400 | "integrity": "sha1-32BBeABfUi8V60SQ5yR6G/qmf4w=" 401 | }, 402 | "proxy-addr": { 403 | "version": "2.0.3", 404 | "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.3.tgz", 405 | "integrity": "sha512-jQTChiCJteusULxjBp8+jftSQE5Obdl3k4cnmLA6WXtK6XFuWRnvVL7aCiBqaLPM8c4ph0S4tKna8XvmIwEnXQ==", 406 | "requires": { 407 | "forwarded": "0.1.2", 408 | "ipaddr.js": "1.6.0" 409 | } 410 | }, 411 | "qs": { 412 | "version": "6.5.1", 413 | "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.1.tgz", 414 | "integrity": "sha512-eRzhrN1WSINYCDCbrz796z37LOe3m5tmW7RQf6oBntukAG1nmovJvhnwHHRMAfeoItc1m2Hk02WER2aQ/iqs+A==" 415 | }, 416 | "range-parser": { 417 | "version": "1.2.0", 418 | "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.0.tgz", 419 | "integrity": "sha1-9JvmtIeJTdxA3MlKMi9hEJLgDV4=" 420 | }, 421 | "raw-body": { 422 | "version": "2.3.2", 423 | "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.3.2.tgz", 424 | "integrity": "sha1-vNYMd9Prk83gBQKVw/N5OJvIj4k=", 425 | "requires": { 426 | "bytes": "3.0.0", 427 | "http-errors": "1.6.2", 428 | "iconv-lite": "0.4.19", 429 | "unpipe": "1.0.0" 430 | }, 431 | "dependencies": { 432 | "depd": { 433 | "version": "1.1.1", 434 | "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.1.tgz", 435 | "integrity": "sha1-V4O04cRZ8G+lyif5kfPQbnoxA1k=" 436 | }, 437 | "http-errors": { 438 | "version": "1.6.2", 439 | "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.6.2.tgz", 440 | "integrity": "sha1-CgAsyFcHGSp+eUbO7cERVfYOxzY=", 441 | "requires": { 442 | "depd": "1.1.1", 443 | "inherits": "2.0.3", 444 | "setprototypeof": "1.0.3", 445 | "statuses": "1.4.0" 446 | } 447 | }, 448 | "setprototypeof": { 449 | "version": "1.0.3", 450 | "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.0.3.tgz", 451 | "integrity": "sha1-ZlZ+NwQ+608E2RvWWMDL77VbjgQ=" 452 | } 453 | } 454 | }, 455 | "safe-buffer": { 456 | "version": "5.1.1", 457 | "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.1.tgz", 458 | "integrity": "sha512-kKvNJn6Mm93gAczWVJg7wH+wGYWNrDHdWvpUmHyEsgCtIwwo3bqPtV4tR5tuPaUhTOo/kvhVwd8XwwOllGYkbg==" 459 | }, 460 | "send": { 461 | "version": "0.16.2", 462 | "resolved": "https://registry.npmjs.org/send/-/send-0.16.2.tgz", 463 | "integrity": "sha512-E64YFPUssFHEFBvpbbjr44NCLtI1AohxQ8ZSiJjQLskAdKuriYEP6VyGEsRDH8ScozGpkaX1BGvhanqCwkcEZw==", 464 | "requires": { 465 | "debug": "2.6.9", 466 | "depd": "1.1.2", 467 | "destroy": "1.0.4", 468 | "encodeurl": "1.0.2", 469 | "escape-html": "1.0.3", 470 | "etag": "1.8.1", 471 | "fresh": "0.5.2", 472 | "http-errors": "1.6.3", 473 | "mime": "1.4.1", 474 | "ms": "2.0.0", 475 | "on-finished": "2.3.0", 476 | "range-parser": "1.2.0", 477 | "statuses": "1.4.0" 478 | } 479 | }, 480 | "serve-static": { 481 | "version": "1.13.2", 482 | "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.13.2.tgz", 483 | "integrity": "sha512-p/tdJrO4U387R9oMjb1oj7qSMaMfmOyd4j9hOFoxZe2baQszgHcSWjuya/CiT5kgZZKRudHNOA0pYXOl8rQ5nw==", 484 | "requires": { 485 | "encodeurl": "1.0.2", 486 | "escape-html": "1.0.3", 487 | "parseurl": "1.3.2", 488 | "send": "0.16.2" 489 | } 490 | }, 491 | "setprototypeof": { 492 | "version": "1.1.0", 493 | "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz", 494 | "integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ==" 495 | }, 496 | "statuses": { 497 | "version": "1.4.0", 498 | "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.4.0.tgz", 499 | "integrity": "sha512-zhSCtt8v2NDrRlPQpCNtw/heZLtfUDqxBM1udqikb/Hbk52LK4nQSwr10u77iopCW5LsyHpuXS0GnEc48mLeew==" 500 | }, 501 | "stoppable": { 502 | "version": "1.0.6", 503 | "resolved": "https://registry.npmjs.org/stoppable/-/stoppable-1.0.6.tgz", 504 | "integrity": "sha512-d1B/3QXeT2+MixdC+EqQ9/llq3yvZkdsh8hrML52NmememiIAus0MBsnebYmzojJ2Ls5drhDqo2PFH1FLx2DWA==" 505 | }, 506 | "type-is": { 507 | "version": "1.6.16", 508 | "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.16.tgz", 509 | "integrity": "sha512-HRkVv/5qY2G6I8iab9cI7v1bOIdhm94dVjQCPFElW9W+3GeDOSHmy2EBYe4VTApuzolPcmgFTN3ftVJRKR2J9Q==", 510 | "requires": { 511 | "media-typer": "0.3.0", 512 | "mime-types": "2.1.18" 513 | } 514 | }, 515 | "universalify": { 516 | "version": "0.1.1", 517 | "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.1.tgz", 518 | "integrity": "sha1-+nG63UQ3r0wUiEHjs7Fl+enlkLc=" 519 | }, 520 | "unpipe": { 521 | "version": "1.0.0", 522 | "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", 523 | "integrity": "sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw=" 524 | }, 525 | "utils-merge": { 526 | "version": "1.0.1", 527 | "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", 528 | "integrity": "sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM=" 529 | }, 530 | "vary": { 531 | "version": "1.1.2", 532 | "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", 533 | "integrity": "sha1-IpnwLG3tMNSllhsLn3RSShj2NPw=" 534 | }, 535 | "wrappy": { 536 | "version": "1.0.2", 537 | "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", 538 | "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=" 539 | } 540 | } 541 | } 542 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "docker-plugin_seaweedfs", 3 | "version": "1.0.0", 4 | "description": "Docker volume driver plugin for SeaweedFS", 5 | "main": "index.js", 6 | "scripts": { 7 | "start": "node index.js", 8 | "test": "echo \"Error: no test specified\" && exit 1" 9 | }, 10 | "repository": { 11 | "type": "git", 12 | "url": "https://phab.katharostech.com/diffusion/DPSFS/docker-plugin_seaweedfs.git" 13 | }, 14 | "keywords": [ 15 | "SeaweedFS", 16 | "Docker" 17 | ], 18 | "author": "Zicklag ", 19 | "license": "MIT", 20 | "dependencies": { 21 | "@godaddy/terminus": "^2.2.0", 22 | "express": "^4.16.3", 23 | "fs-extra": "^6.0.0", 24 | "loglevel": "^1.6.1", 25 | "loglevel-message-prefix": "^3.0.0", 26 | "ls": "^0.2.1" 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /run-tests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | docker run -it --rm --privileged \ 3 | -e http_proxy="$http_proxy" \ 4 | -e https_proxy="$https_proxy" \ 5 | -e no_proxy="$no_proxy" \ 6 | -v $(pwd)/plugin:/plugin \ 7 | lizardfs-volume-plugin_test $@ 8 | -------------------------------------------------------------------------------- /test/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM docker:stable-dind 2 | 3 | # Install dependencies 4 | RUN apk add --no-cache bash curl jq python3 wget 5 | 6 | # Install Docker compose 7 | RUN pip3 install docker-compose 8 | 9 | # Create our working directory 10 | RUN mkdir /project 11 | 12 | # Switch to our working directory 13 | WORKDIR /project 14 | 15 | # Pull the LizardFS image used for creating the test environment 16 | RUN wget https://raw.githubusercontent.com/moby/moby/master/contrib/download-frozen-image-v2.sh -O /download-image.sh 17 | RUN chmod 744 /download-image.sh 18 | RUN mkdir -p /images/lizardfs 19 | RUN /download-image.sh /images/lizardfs kadimasolutions/lizardfs:latest 20 | 21 | # Copy in the docker compose file that we will use to create test LizardFS 22 | # clusters 23 | COPY ./docker-compose.yml /project/ 24 | 25 | # Copy in the test scripts 26 | COPY ./test-environment.sh /test-environment.sh 27 | RUN chmod 744 /test-environment.sh 28 | COPY ./test-run.sh /test-run.sh 29 | RUN chmod 744 /test-run.sh 30 | 31 | # Copy in our entrypoint script 32 | COPY ./docker-entrypoint.sh /docker-entrypoint.sh 33 | RUN chmod 744 /docker-entrypoint.sh 34 | 35 | # Set the entrypoint 36 | ENTRYPOINT ["/docker-entrypoint.sh"] 37 | -------------------------------------------------------------------------------- /test/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | mfsmaster: 4 | image: kadimasolutions/lizardfs 5 | command: master 6 | restart: on-failure 7 | volumes: 8 | - /var/lib/mfs 9 | ports: 10 | - ${MASTER_PORT}:9421 11 | chunkserver: 12 | image: kadimasolutions/lizardfs 13 | command: chunkserver 14 | restart: on-failure 15 | environment: 16 | # This lets you run the chunkserver with less available disk space 17 | MFSCHUNKSERVER_HDD_LEAVE_SPACE_DEFAULT: 20Mi # 4Gi is the default 18 | MFSHDD_1: /mnt/mfshdd 19 | volumes: 20 | - /mnt/mfshdd 21 | client: 22 | image: kadimasolutions/lizardfs 23 | command: client /mnt/mfs 24 | restart: on-failure 25 | # Required permissions and devices for container to mount filesystem 26 | cap_add: 27 | - SYS_ADMIN 28 | devices: 29 | - /dev/fuse:/dev/fuse 30 | security_opt: 31 | - apparmor:unconfined 32 | -------------------------------------------------------------------------------- /test/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | image_tag=$1 4 | 5 | log_prefix="[Root]" 6 | 7 | echo "$log_prefix Creating Test Environment" 8 | /test-environment.sh $image_tag 9 | 10 | echo "$log_prefix Running Tests" 11 | /test-run.sh 12 | 13 | echo "$log_prefix All done. Stopping Docker" 14 | kill -SIGTERM $(cat /run/dockerd-entrypoint.pid) 15 | -------------------------------------------------------------------------------- /test/test-environment.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | image_tag=$1 4 | 5 | log_prefix="[Plugin Environment]" 6 | 7 | echo "$log_prefix Starting Docker" 8 | dockerd-entrypoint.sh 2> /var/log/docker.log & 9 | echo $! > /run/dockerd-entrypoint.pid 10 | 11 | # Wait for Docker to startup 12 | while ! docker ps > /var/log/docker.log; do 13 | sleep 1 14 | done 15 | echo "$log_prefix Docker finished startup" 16 | 17 | echo "$log_prefix Loading baked LizardFS image" 18 | tar -cC '/images/lizardfs' . | docker load 19 | 20 | # Install plugin 21 | if [ -z "$image_tag" ]; then 22 | echo "$log_prefix Installing plugin from local dir" 23 | docker plugin create lizardfs /plugin 24 | else 25 | echo "$log_prefix Installing Plugin from DockerHub: $image_tag" 26 | docker plugin install --alias lizardfs --grant-all-permissions --disable $image_tag 27 | fi 28 | -------------------------------------------------------------------------------- /test/test-run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | #### 4 | # Plugin Test Cases 5 | #### 6 | 7 | log_prefix="[Plugin Test]" 8 | 9 | # Start a LizardFS cluster for the plugin to connect to 10 | 11 | # Set the LizardFS master port 12 | echo "MASTER_PORT=9421" > .env 13 | 14 | echo "$log_prefix Starting up local LizardFS cluster" 15 | docker-compose down -v 16 | docker-compose up -d 17 | 18 | echo "$log_prefix Creating volume directory on LizardFS filesystem" 19 | docker-compose exec client mkdir -p /mnt/mfs/docker/volumes 20 | 21 | # Configure and enable plugin 22 | 23 | echo "$log_prefix Configurin plugin to connect to 127.0.0.1:9421" 24 | docker plugin disable lizardfs 2> /dev/null 25 | docker plugin set lizardfs HOST=127.0.0.1 && \ 26 | docker plugin set lizardfs PORT=9421 && \ 27 | docker plugin set lizardfs REMOTE_PATH=/docker/volumes && \ 28 | docker plugin set lizardfs ROOT_VOLUME_NAME="" && \ 29 | docker plugin set lizardfs MOUNT_OPTIONS="" && \ 30 | docker plugin set lizardfs CONNECT_TIMEOUT=10000 && \ 31 | docker plugin set lizardfs LOG_LEVEL=info && \ 32 | docker plugin enable lizardfs 33 | 34 | if [ ! $? -eq 0 ]; then echo "TEST FAILED"; exit $?; fi 35 | 36 | # Create volumes and make sure that they exist 37 | 38 | echo "$log_prefix Create volume: lizardfs-volume-1" && \ 39 | docker volume create --driver lizardfs lizardfs-volume-1 && \ 40 | \ 41 | echo "$log_prefix Make sure lizardfs-volume-1 exists in volume list" && \ 42 | docker volume ls | grep "lizardfs.*lizardfs-volume-1" && \ 43 | \ 44 | echo "$log_prefix Make sure lizardfs-volume-1 exists on LizardFS filesystem" && \ 45 | docker-compose exec client ls /mnt/mfs/docker/volumes | grep lizardfs-volume-1 && \ 46 | \ 47 | echo "$log_prefix Create a second volume: lizardfs-volume-2" && \ 48 | docker volume create --driver lizardfs lizardfs-volume-2 && \ 49 | \ 50 | echo "$log_prefix Make sure lizardfs-volume-2 exists" && \ 51 | docker volume ls | grep "lizardfs.*lizardfs-volume-2" && \ 52 | \ 53 | echo "$log_prefix Make sure lizardfs-volume-1 still exists" && \ 54 | docker volume ls | grep "lizardfs.*lizardfs-volume-1" 55 | 56 | if [ ! $? -eq 0 ]; then echo "TEST FAILED"; exit $?; fi 57 | 58 | # Store data in a volume and make sure that the data is persisted 59 | 60 | echo "$log_prefix Create test data on lizardfs-volume-1" && \ 61 | docker run -it --rm -v lizardfs-volume-1:/data --entrypoint=bash \ 62 | kadimasolutions/lizardfs -c 'echo "Hello World" > /data/test-data.txt' && \ 63 | \ 64 | echo "$log_prefix Make sure data exists in volume" && \ 65 | docker run -it --rm -v lizardfs-volume-1:/data --entrypoint=cat \ 66 | kadimasolutions/lizardfs /data/test-data.txt | grep "Hello World" && \ 67 | \ 68 | echo "$log_prefix Make sure data exists on LizardFS filesystem" && \ 69 | docker-compose exec client cat \ 70 | /mnt/mfs/docker/volumes/lizardfs-volume-1/test-data.txt | grep "Hello World" 71 | 72 | if [ ! $? -eq 0 ]; then echo "TEST FAILED"; exit $?; fi 73 | 74 | # Mount a volume into multiple containers, then remove the containers, 75 | # and remount 76 | 77 | echo "$log_prefix Mount lizardfs-volume-1 into container1 and container2" && \ 78 | docker run -d --name container1 -it --rm -v lizardfs-volume-1:/data --entrypoint=bash \ 79 | kadimasolutions/lizardfs && \ 80 | \ 81 | docker run -d --name container2 -it --rm -v lizardfs-volume-1:/data --entrypoint=bash \ 82 | kadimasolutions/lizardfs && \ 83 | \ 84 | echo "$log_prefix Make sure data exists in container1" && \ 85 | docker exec -it container1 cat /data/test-data.txt | grep "Hello World" && \ 86 | \ 87 | echo "$log_prefix Make sure data exists in container2" && \ 88 | docker exec -it container2 cat /data/test-data.txt | grep "Hello World" && \ 89 | \ 90 | echo "$log_prefix Remove container1" && \ 91 | docker stop container1 && \ 92 | \ 93 | echo "$log_prefix Make sure data still exists in container2" && \ 94 | docker exec -it container2 cat /data/test-data.txt | grep "Hello World" && \ 95 | \ 96 | echo "$log_prefix Remove container2" && \ 97 | docker stop container2 && \ 98 | \ 99 | echo "$log_prefix Make sure lizardfs-volume-1 can still be mounted into a new container" && \ 100 | docker run -it --rm -v lizardfs-volume-1:/data --entrypoint=cat \ 101 | kadimasolutions/lizardfs /data/test-data.txt | grep "Hello World" 102 | 103 | if [ ! $? -eq 0 ]; then echo "TEST FAILED"; exit $?; fi 104 | 105 | # Create a volume with a specified replication goal and check that it is set 106 | # when the volume is created 107 | 108 | echo "$log_prefix Create lizardfs-volume-3 with a replication goal of '3'" && \ 109 | docker volume create --driver lizardfs lizardfs-volume-3 -o ReplicationGoal=3 && \ 110 | \ 111 | echo "$log_prefix Make sure that the volume has a replication goal of '3'" && \ 112 | docker-compose exec \ 113 | client lizardfs getgoal /mnt/mfs/docker/volumes/lizardfs-volume-3 | \ 114 | grep ".*lizardfs-volume-3: 3" 115 | 116 | if [ ! $? -eq 0 ]; then echo "TEST FAILED"; exit $?; fi 117 | 118 | # Bring down the cluster 119 | 120 | echo "$log_prefix Bringing down LizardFS cluster" && \ 121 | echo "$log_prefix Remove volumes" && \ 122 | docker volume rm lizardfs-volume-1 && \ 123 | docker volume rm lizardfs-volume-2 && \ 124 | docker volume rm lizardfs-volume-3 && \ 125 | echo "$log_prefix Remove LizardFS cluster" && \ 126 | docker-compose down -v && \ 127 | echo "$log_prefix Disable plugin" && \ 128 | docker plugin disable -f lizardfs 129 | 130 | if [ ! $? -eq 0 ]; then echo "TEST FAILED"; exit $?; fi 131 | 132 | # Test connecting to cluster on a different port, storage directory, with mount 133 | # options, and with the root volume name set 134 | 135 | echo "MASTER_PORT=9900" > .env 136 | 137 | echo "$log_prefix Creating cluster with master port 9900" && \ 138 | docker-compose up -d && \ 139 | \ 140 | echo "$log_prefix Creating storage directory, /alternate-volumes, on LizardFS filesystem" && \ 141 | docker-compose exec client mkdir -p /mnt/mfs/alternate-volumes && \ 142 | \ 143 | echo "$log_prefix Enabling plugin with PORT=9900, REMOTE_PATH=/alternate-volumes," && \ 144 | echo "$log_prefix MOUNT_OPTIONS='-o allow_other', and ROOT_VOLUME_NAME=lizardfs" && \ 145 | docker plugin set lizardfs PORT=9900 REMOTE_PATH=/alternate-volumes \ 146 | MOUNT_OPTIONS='-o allow_other' ROOT_VOLUME_NAME=lizardfs && \ 147 | docker plugin enable lizardfs && \ 148 | \ 149 | echo "$log_prefix Create volume 'volume-on-different-port' to test connection" && \ 150 | docker volume create --driver lizardfs volume-on-different-port && \ 151 | \ 152 | echo "$log_prefix Make sure volume-on-different-port exists in volume list" && \ 153 | docker volume ls | grep "lizardfs.*volume-on-different-port" && \ 154 | \ 155 | echo "$log_prefix Make sure that the mount options are getting set" && \ 156 | ps -ef | grep "allow_other" | grep -v "grep" && \ 157 | \ 158 | echo "$log_prefix Remove volume: volume-on-different-port" && \ 159 | docker volume rm volume-on-different-port 160 | 161 | if [ ! $? -eq 0 ]; then echo "TEST FAILED"; exit $?; fi 162 | 163 | # Run tests for the Root Volume 164 | 165 | echo "$log_prefix Create volumes: liz-1, liz-2" && \ 166 | docker volume create --driver lizardfs liz-1 && \ 167 | docker volume create --driver lizardfs liz-2 && \ 168 | \ 169 | echo "$log_prefix Add test-files liz-1, liz-2" && \ 170 | docker run -it --rm -v liz-1:/data --entrypoint=touch \ 171 | kadimasolutions/lizardfs /data/liz-1.txt && \ 172 | docker run -it --rm -v liz-2:/data --entrypoint=touch \ 173 | kadimasolutions/lizardfs /data/liz-2.txt && \ 174 | \ 175 | echo "$log_prefix Mount Root Volume and make sure liz-1, liz-2, and their files are in it" && \ 176 | docker run -it --rm -v lizardfs:/lizardfs --entrypoint=ls \ 177 | kadimasolutions/lizardfs /lizardfs/liz-1 | grep "liz-1.txt" && \ 178 | docker run -it --rm -v lizardfs:/lizardfs --entrypoint=ls \ 179 | kadimasolutions/lizardfs /lizardfs/liz-2 | grep "liz-2.txt" && \ 180 | \ 181 | echo "$log_prefix Create a new directory, liz-3, in the Root Volume" && \ 182 | docker run -it --rm -v lizardfs:/lizardfs --entrypoint=mkdir \ 183 | kadimasolutions/lizardfs /lizardfs/liz-3 && \ 184 | \ 185 | echo "$log_prefix Make sure the new directory registers in the volume list" && \ 186 | docker volume ls | grep "lizardfs.*liz-3" && \ 187 | \ 188 | echo "$log_prefix Create a volume with the same name as the Root Volume" && \ 189 | docker run -it --rm -v lizardfs:/lizardfs --entrypoint=mkdir \ 190 | kadimasolutions/lizardfs /lizardfs/lizardfs && \ 191 | \ 192 | echo "$log_prefix Make sure that the Root Volume takes precedence when mounting" && \ 193 | docker run -it --rm -v lizardfs:/lizardfs --entrypoint=ls \ 194 | kadimasolutions/lizardfs /lizardfs/liz-1 | grep "liz-1.txt" 195 | 196 | if [ ! $? -eq 0 ]; then echo "TEST FAILED"; exit $?; fi 197 | 198 | echo "$log_prefix Make sure you can't delete the Root Volume" && 199 | docker volume rm lizardfs 200 | 201 | if [ $? -eq 0 ]; then echo "TEST FAILED"; exit $?; fi 202 | 203 | echo "$log_prefix Make sure all volumes still exist after attempting to delete the Root Volume" 204 | docker volume ls | grep "lizardfs.*liz-1" && \ 205 | docker volume ls | grep "lizardfs.*liz-2" && \ 206 | docker volume ls | grep "lizardfs.*liz-3" && \ 207 | \ 208 | echo "$log_prefix Delete the volumes" && \ 209 | docker volume rm liz-1 && \ 210 | docker volume rm liz-2 && \ 211 | docker volume rm liz-3 212 | 213 | if [ ! $? -eq 0 ]; then echo "TEST FAILED"; exit $?; fi 214 | 215 | # Test setting the log level 216 | 217 | plugin_id=$(docker plugin ls | grep lizardfs | awk '{print $1}') 218 | 219 | echo "$log_prefix Test a 'docker volume ls'" && \ 220 | docker volume ls 221 | if [ ! $? -eq 0 ]; then echo "TEST FAILED"; exit $?; fi 222 | 223 | echo "$log_prefix Make sure plugin is not logging DEBUG messages" 224 | cat /var/log/docker.log | grep $plugin_id | tail -n 1 | grep DEBUG 225 | if [ $? -eq 0 ]; then echo "TEST FAILED"; exit $?; fi 226 | 227 | echo "$log_prefix Set log level to 'DEBUG'" && \ 228 | docker plugin disable -f lizardfs && \ 229 | docker plugin set lizardfs LOG_LEVEL=DEBUG && \ 230 | docker plugin enable lizardfs && \ 231 | \ 232 | echo "$log_prefix Test a 'docker volume ls'" && \ 233 | docker volume ls && \ 234 | \ 235 | echo "$log_prefix Make Sure that the plugin does log a DEBUG message" && \ 236 | cat /var/log/docker.log | grep $plugin_id | tail -n 1 | grep DEBUG 237 | 238 | if [ ! $? -eq 0 ]; then echo "TEST FAILED"; exit $?; fi 239 | 240 | # Bring down the cluster 241 | 242 | echo "$log_prefix Remove LizardFS cluster" && \ 243 | docker-compose down -v 244 | 245 | if [ ! $? -eq 0 ]; then echo "TEST FAILED"; exit $?; fi 246 | 247 | # Test setting the CONNECT_TIMEOUT 248 | 249 | echo "$log_prefix Setting the plugin HOST='not-a-cluster' CONNECT_TIMEOUT='3000'" && \ 250 | docker plugin disable lizardfs && \ 251 | docker plugin set lizardfs HOST=not-a-cluster && \ 252 | docker plugin set lizardfs CONNECT_TIMEOUT=3000 && \ 253 | docker plugin enable lizardfs 254 | 255 | if [ ! $? -eq 0 ]; then echo "TEST FAILED"; exit $?; fi 256 | 257 | echo "$log_prefix Check timeout when connecting to non-existent cluster" 258 | time -f %e -o /tmp/elapsed docker volume ls 259 | elapsed=$(cat /tmp/elapsed | awk -F . '{print $1}') 260 | 261 | if [ $elapsed -gt 4 -o $elapsed -lt 2 ]; then echo "TEST FAILED"; exit 1; fi 262 | 263 | echo "$log_prefix Setting the plugin HOST='not-a-cluster' CONNECT_TIMEOUT='10000'" && \ 264 | docker plugin disable lizardfs && \ 265 | docker plugin set lizardfs HOST=not-a-cluster && \ 266 | docker plugin set lizardfs CONNECT_TIMEOUT=10000 && \ 267 | docker plugin enable lizardfs 268 | 269 | if [ ! $? -eq 0 ]; then echo "TEST FAILED"; exit $?; fi 270 | 271 | echo "$log_prefix Check timeout when connecting to non-existent cluster" 272 | time -f %e -o /tmp/elapsed docker volume ls 273 | elapsed=$(cat /tmp/elapsed | awk -F . '{print $1}') 274 | 275 | if [ $elapsed -gt 11 -o $elapsed -lt 9 ]; then echo "TEST FAILED"; exit 1; fi 276 | 277 | echo "$log_prefix ALL DONE. SUCCESS!" 278 | --------------------------------------------------------------------------------