├── .papr.yml ├── .travis.yml ├── LICENSE ├── Makefile ├── NEWS.md ├── README.md ├── container-storage-setup.1 ├── container-storage-setup.conf ├── container-storage-setup.sh ├── container-storage-setup.spec ├── css-child-read-write.sh ├── docker-storage-setup-override.conf ├── docker-storage-setup.1 ├── docker-storage-setup.service ├── libcss.sh └── tests ├── 001-test-use-devs-to-create-thin-pool.sh ├── 002-test-reject-disk-with-lvm-signature.sh ├── 003-test-override-signature-wipes-existing-signatures.sh ├── 004-test-non-absolute-disk-name-support.sh ├── 005-test-devmapper-cleanup.sh ├── 006-test-overlay-cleanup.sh ├── 007-test-setting-extra-opts.sh ├── 008-test-overlay2-setup-cleanup.sh ├── 009-test-follow-symlinked-devices.sh ├── 010-test-use-devs-to-create-docker-root-volume.sh ├── 011-test-docker-root-volume-cleanup.sh ├── 012-test-use-devs-to-create-container-root-volume.sh ├── 013-test-container-root-volume-cleanup.sh ├── 014-test-use-loop-to-create-thin-pool.sh ├── 015-test-fail-loop-to-create-thin-pool.sh ├── 016-test-gpt-partition-table-creation-2tb.sh ├── 017-test-storage-driver-change-to-overlay2-in-atomic.sh ├── 018-test-thinpool-reset-after-driver-change-to-overlay2-in-atomic.sh ├── 101-test-use-devs-to-create-thin-pool.sh ├── 102-test-reject-disk-with-lvm-signature.sh ├── 103-test-override-signature-wipes-existing-signatures.sh ├── 105-test-devmapper-cleanup.sh ├── 106-test-overlay-cleanup.sh ├── 107-test-setting-extra-opts.sh ├── 108-test-overlay2-setup-cleanup.sh ├── 109-test-follow-symlinked-devices.sh ├── 110-test-fail-if-no-container-thinpool.sh ├── 112-test-use-devs-to-create-container-root-volume.sh ├── 113-test-container-root-volume-cleanup.sh ├── 114-test-use-loop-to-create-thin-pool.sh ├── 115-test-fail-loop-to-create-thin-pool.sh ├── 116-test-storage-driver-nil-create-remove.sh ├── 117-test-devmapper-activation-deactivation.sh ├── 118-test-container-root-lv-activation-deactivation.sh ├── 119-test-gpt-partition-table-creation-2tb.sh ├── README ├── css-test-config ├── libtest.sh └── run-tests.sh /.papr.yml: -------------------------------------------------------------------------------- 1 | branches: 2 | - master 3 | - auto 4 | - try 5 | 6 | host: 7 | distro: fedora/28/atomic 8 | specs: 9 | secondary-disk: 10 10 | 11 | context: fedora/28/atomic 12 | 13 | required: true 14 | 15 | tests: 16 | - mkdir -p /srv/install 17 | - docker run --rm --privileged -v $PWD:/code -v /srv/install:/srv/install --workdir /code registry.fedoraproject.org/fedora:28 sh -c 'dnf install -y make && make install DESTDIR=/srv/install' 18 | - systemctl stop docker 19 | - rm -rf /var/lib/docker/* 20 | - ostree admin unlock 21 | - rsync -rlv /srv/install/usr/ /usr/ 22 | - rsync -rlv /srv/install/etc/ /etc/ 23 | - rm -f /etc/sysconfig/docker-storage-setup 24 | - rm -f /etc/sysconfig/docker-storage 25 | - if findmnt /dev/vdb; then umount /dev/vdb; fi 26 | - wipefs -a /dev/vdb 27 | - echo "DEVS=/dev/vdb" >> tests/css-test-config 28 | - tests/run-tests.sh 29 | 30 | timeout: 15m 31 | 32 | artifacts: 33 | - temp/logs.txt 34 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | # The language is a lie but eh, maybe in the future we'll use some C 2 | # code. 3 | language: c 4 | addons: 5 | apt: 6 | packages: 7 | - automake 8 | - autotools-dev 9 | script: 10 | # For now, just a syntax check. 11 | - bash -n container-storage-setup.sh 12 | 13 | notifications: 14 | webhooks: http://escher.verbum.org:54856/travis 15 | email: false 16 | 17 | branches: 18 | only: 19 | - auto 20 | 21 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | https://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | Copyright 2013-2016 Docker, Inc. 180 | 181 | Licensed under the Apache License, Version 2.0 (the "License"); 182 | you may not use this file except in compliance with the License. 183 | You may obtain a copy of the License at 184 | 185 | https://www.apache.org/licenses/LICENSE-2.0 186 | 187 | Unless required by applicable law or agreed to in writing, software 188 | distributed under the License is distributed on an "AS IS" BASIS, 189 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 190 | See the License for the specific language governing permissions and 191 | limitations under the License. 192 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | DOCKER ?= docker 2 | # Installation directories. 3 | PREFIX ?= $(DESTDIR)/usr 4 | BINDIR ?= $(PREFIX)/bin 5 | MANDIR ?= $(PREFIX)/share/man 6 | UNITDIR ?= $(PREFIX)/lib/systemd/system 7 | CSSLIBDIR ?= $(PREFIX)/share/container-storage-setup 8 | SYSCONFDIR ?= $(DESTDIR)/etc/sysconfig 9 | 10 | .PHONY: clean 11 | clean: 12 | -rm -rf *~ \#* .#* 13 | 14 | .PHONY: install 15 | install: install-docker install-core 16 | 17 | .PHONY: install-docker 18 | install-docker: 19 | install -D -m 644 docker-storage-setup.service ${UNITDIR}/${DOCKER}-storage-setup.service 20 | if [ ! -f ${SYSCONFDIR}/${DOCKER}-storage-setup ]; then \ 21 | install -D -m 644 docker-storage-setup-override.conf ${SYSCONFDIR}/${DOCKER}-storage-setup; \ 22 | echo "STORAGE_DRIVER=overlay2" >> ${SYSCONFDIR}/${DOCKER}-storage-setup; \ 23 | fi 24 | install -d -m 755 ${BINDIR} 25 | (cd ${BINDIR}; ln -sf /usr/bin/container-storage-setup ${DOCKER}-storage-setup) 26 | install -D -m 644 docker-storage-setup.1 ${MANDIR}/man1/${DOCKER}-storage-setup.1 27 | 28 | .PHONY: install-core 29 | install-core: 30 | install -D -m 755 container-storage-setup.sh ${BINDIR}/container-storage-setup 31 | install -D -m 644 container-storage-setup.conf ${CSSLIBDIR}/container-storage-setup 32 | install -D -m 755 libcss.sh ${CSSLIBDIR}/libcss.sh 33 | install -D -m 755 css-child-read-write.sh ${CSSLIBDIR}/css-child-read-write 34 | install -D -m 644 container-storage-setup.1 ${MANDIR}/man1/container-storage-setup.1 35 | 36 | dist: container-storage-setup.spec 37 | spectool -g container-storage-setup.spec 38 | 39 | srpm: dist 40 | rpmbuild --define "_sourcedir `pwd`" --define "_specdir `pwd`" \ 41 | --define "_rpmdir `pwd`" --define "_srcrpmdir `pwd`" -bs container-storage-setup.spec 42 | rpm: dist 43 | rpmbuild --define "_sourcedir `pwd`" --define "_specdir `pwd`" \ 44 | --define "_rpmdir `pwd`" --define "_srcrpmdir `pwd`" -ba container-storage-setup.spec 45 | -------------------------------------------------------------------------------- /NEWS.md: -------------------------------------------------------------------------------- 1 | 0.5 2 | --- 3 | 4 | This release has many changes contributed primarily by Vivek Goyal. 5 | 6 | ### Docker pool just uses 60%, and will auto-grow using LVM 7 | 8 | - docker-storage-setup: Reserve 60% of free space for data volume 9 | - docker-storage-setup: Enable automatic pool extension using lvm facilities 10 | - docker-storage-setup: Do not grow data volumes upon restart 11 | 12 | These three changes mean that storage is now more dynamic in a 13 | more reliable fashion. 14 | 15 | Previously, the pool would use all configured space, which meant 16 | things like Docker volumes or regular host storage would be limited to 17 | the OS default (for Project Atomic, 3G). With this change, the root 18 | LV can be grown by the system administrator dynamically. 19 | 20 | The growing of the Docker pool is now managed by LVM dynamically, and 21 | will not be automatically resized whenever d-s-s runs (normally once 22 | on boot). 23 | 24 | ### Growpart logic reworked 25 | 26 | In cloud environments, a "growpart" logic is common where the partition 27 | table is changed on first boot with extra storage provided by the hypervisor. 28 | 29 | However, one essentially never wants to do this with real physical 30 | disks. 31 | 32 | The growpart logic is disabled by default, and virtualization images 33 | should be tweaked to turn it on. For example, the Fedora 34 | spin-kickstarts git module has a kickstart file with a %post that 35 | would be an appropriate place. 36 | 37 | ### Performance optimizations 38 | 39 | - docker-storage-setup: Skip block zeroing in thin pool 40 | - docker-storage-setup: Use chunk size 512K by default 41 | 42 | Will make Docker devicemapper usage faster. 43 | 44 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Container Storage Setup 2 | 3 | ## Tool for setting up container runtimes storage 4 | 5 | `container-storage-setup` is part of the [Project Atomic](http://www.projectatomic.io/) suite of container projects, formerly known as docker-storage-setup. 6 | 7 | 8 | A crucial aspect to container runtimes is the concept of the copy-on-write (COW) layered filesystems. The [Docker Engine Storage docs](https://docs.docker.com/engine/userguide/storagedriver/) site explains how the docker daemon uses uses COW file systems 9 | 10 | `container-storage-setup` is a script to configure COW File systems like devicemapper and overlayfs. It is usually run via a systemd service. For example `docker-storage-setup.service`, runs `container-storage-setup` before the docker.service script starts the docker daemon. 11 | 12 | The `container-storage-service` script takes an input file and an output file as parameters. The input file is usually provided by the distribution and 13 | is expected to be modified by administrators. The script generates the specified output file as a configuration file bash script which sets environment variables to be used by the container runtime service script. 14 | 15 | For example if I configured an runtime-storage-setup to look like 16 | 17 | ``` 18 | cat /etc/sysconfig/runtime-storage-setup 19 | STORAGE_DRIVER="overlay2" 20 | ``` 21 | 22 | If I then executed 23 | 24 | ``` 25 | container-storage-setup /etc/sysconfig/runtime-storage-setup /etc/sysconfig/runtime-storage 26 | ``` 27 | 28 | I will end up with a runtime storage file which looks like. 29 | 30 | ``` 31 | cat /etc/sysconfig/runtime-storage 32 | STORAGE_OPTIONS="--storage-driver overlay2 " 33 | ``` 34 | 35 | The service script of the container runtime should have something like 36 | 37 | ``` 38 | EnvironmentFile=-/etc/sysconfig/runtime-storage 39 | ... 40 | ExecStart=/usr/bin/container-runtime $STORAGE_OPTIONS 41 | ... 42 | ``` 43 | 44 | Obviously the container runtime must handle the --storage-driver option. 45 | 46 | NOTE: `container-storage-setup` has legacy support for docker-storage-setup. If you execute the script without specifying an input file and and output file, it will default to an input file of `/etc/sysconfig/docker-storage-setup` and an output file of `/etc/sysconfig/docker-storage`. The Environment name in the output file will be set to DOCKER_STORAGE_OPTIONS. 47 | 48 | ``` 49 | cat /etc/sysconfig/docker-storage-setup 50 | STORAGE_DRIVER="overlay2" 51 | ``` 52 | 53 | If I then executed 54 | 55 | ``` 56 | container-storage-setup 57 | ``` 58 | 59 | I will end up with a runtime storage file which looks like. 60 | 61 | ``` 62 | cat /etc/sysconfig/docker-storage 63 | DOCKER_STORAGE_OPTIONS="--storage-driver overlay2 " 64 | ``` 65 | 66 | #### Input File 67 | 68 | The input file should be setup by distributions or by the packagers of the 69 | container runtimes. The contents can also be set during system 70 | bootstrap, e.g. in a `cloud-init` `bootcmd:` hook, or via 71 | kickstart `%post`. 72 | 73 | 74 | For more information on configuration, see 75 | [man container-storage-setup](container-storage-setup.1). 76 | -------------------------------------------------------------------------------- /container-storage-setup.1: -------------------------------------------------------------------------------- 1 | .TH "CONTAINER-STORAGE-SETUP" "1" "FEBRUARY 2017" "Helper Script for Container Storage Setup" "" 2 | .SH NAME 3 | .PP 4 | container\-storage\-setup - Tool for setting up storage for container runtimes. 5 | .SH SYNOPSIS 6 | .PP 7 | \f[B]container-storage-setup\f[] [OPTIONS] 8 | 9 | \f[B]container-storage-setup\f[] [OPTIONS] COMMAND [args] 10 | 11 | .SH DESCRIPTION 12 | container-storage-setup configures storage for use by container 13 | runtimes. 14 | 15 | container-storage-setup without specifying a command defaults to 16 | using docker config files /etc/sysconfig/docker-storage-setup for 17 | input and /etc/sysconfig/docker-storage for output. 18 | 19 | container-storage-setup with a commnad creates and manages storage 20 | configurations. 21 | 22 | container-storage-setup can configure multiple backends: 23 | devicemapper, overlay, and overlay2. 24 | 25 | .SH OPTIONS 26 | .PP 27 | \f[B]--help\f[] 28 | Print usage statement 29 | 30 | \f[B]--reset\f[] 31 | Reset your container storage to init state. Reset does not remove 32 | volume groups or remove any of the disks added previously. 33 | 34 | Note: The \f[B]--reset\f[] 35 | command is not always sufficient to cleanup your 36 | container runtime environment. Other tools (\f[B]atomic storage reset\f[]) 37 | use this command to cleanup all storage. 38 | 39 | \f[B]--version\f[] 40 | Print version information 41 | 42 | .SH COMMANDS 43 | Following commands are supported. 44 | 45 | \f[B]create\f[] Create storage configuration 46 | 47 | \f[B]remove\f[] Remove storage configuration 48 | 49 | \f[B]list\f[] List currently created storage configurations 50 | 51 | \f[B]activate\f[] Activate storage configuration 52 | 53 | \f[B]deactivate\f[] Deactivate storage configuration 54 | 55 | \f[B]add-dev\f[] Add block device to storage configuration 56 | 57 | \f[B]export\f[] Export file which can be used to set environment variables for use by container runtimes 58 | 59 | .SH EXAMPLES 60 | Run \f[B]container-storage-setup\f[] after setting up your configuration in 61 | the INPUTFILE or /etc/sysconfig/docker-storage-setup. One can look at 62 | /usr/share/container-storage-setup/container-storage-setup for various options and 63 | their default settings. Anything that the user wants to change, should be 64 | changed in the INPUTFILE. This is the file which will override any 65 | settings specified in /usr/share/container-storage-setup/container-storage-setup. 66 | 67 | Create storage configuration example-config. 68 | 69 | .B container-storage-setup create -o OUTPUTFILE example-config INPUTFILE 70 | 71 | Above will create a storage configuration named example-config as 72 | specified in INPUTFILE and will put the output in OUTPUTFILE. OUTPUTFILE is 73 | a file which can be parsed by container runtime for various config 74 | options. In a typical form, output file can be passed in the 75 | \f[B]EnvironmentFile\f[] directive of container runtime systemd unit file. 76 | That will set STORAGE_OPTIONS environment variable which in turn can 77 | be parsed by container runtime process. 78 | 79 | To cleanup storage configuration, execute remove command. 80 | 81 | .B container-storage-setup remove example-config 82 | 83 | lvm2 version should be same or higher than lvm2-2.02.112 for lvm thin pool 84 | functionality to work properly. 85 | 86 | \f[B]Supported options for the configuration file\f[]: 87 | 88 | STORAGE_DRIVER: 89 | Specify a storage driver to be used with container runtime. 90 | Default: "devicemapper". 91 | Valid values are overlay, overlay2 and "". 92 | "" tells container-storage-setup to not perform any storage setup. 93 | 94 | CONTAINER_THINPOOL: 95 | Specify the thinpool name for the lvm thinpool. This is required 96 | when using the devicemapper STORAGE_DRIVER. CONTAINER_THINPOOL 97 | is logical volume name passed to \f[B]lvcreate\f[] when creating 98 | the thin pool volume. 99 | 100 | EXTRA_STORAGE_OPTIONS: 101 | A set of extra options that should be passed to the container 102 | runtime daemon. 103 | Note: EXTRA_STORAGE_OPTIONS replaces EXTRA_DOCKER_STORAGE_OPTIONS 104 | which has been deprecated 105 | 106 | DEVS: A quoted, space-separated list of devices to be used. 107 | If a drive is partitioned and contains a ${dev}1 partition, 108 | that partition will be configured for use. Unpartitioned 109 | drives will be partitioned and configured for use. If "VG" 110 | is not specified, then use of the root disk's extra space 111 | is implied. 112 | 113 | VG: The volume group to use for container storage. Defaults to the 114 | volume group where the root filesystem resides. If VG is 115 | specified and the volume group does not exist, it will be 116 | created (which requires that "DEVS" be nonempty, since we don't 117 | currently support putting a second partition on the root disk). 118 | 119 | Note: lvm2 needs to be lvm2-2.02.112 or later for lvm thin pool functionality to work properly. 120 | 121 | GROWPART: 122 | One can use this option to enable/disable growing of partition 123 | table backing root volume group. This is intended for 124 | virtualization and cloud installations. By default it is 125 | disabled. Use GROWPART=true to enable automatic partition 126 | table resizing. 127 | 128 | AUTO_EXTEND_POOL: 129 | Enable automatic extension of pool by lvm. lvm can monitor 130 | the pool and automatically extend it when pool is getting full. 131 | 132 | POOL_AUTOEXTEND_THRESHOLD: 133 | Determines the pool extension threshold in terms of percentage 134 | of pool size. For example, if threshold is 60, that means when 135 | pool is 60% full, threshold has been hit. 136 | 137 | POOL_AUTOEXTEND_PERCENT: 138 | Determines the amount by which pool needs to be grown. This is 139 | specified in terms of % of pool size. So a value of 20 means 140 | that when threshold is hit, pool will be grown by 20% of existing 141 | pool size. 142 | 143 | CHUNK_SIZE: 144 | Controls the chunk size/block size of thin pool. CHUNK_SIZE value 145 | must be suitable for passing to \f[B]lvconvert --chunk-size\f[]. 146 | 147 | DEVICE_WAIT_TIMEOUT: 148 | Specifies a device wait timeout value in seconds. In certain 149 | cases required devices might not be immediately available and 150 | container-storage-setup might decide to wait for it. This timeout 151 | specifies how long one should wait for the device. 152 | Default is 60 seconds. 0 disables wait. 153 | 154 | WIPE_SIGNATURES: 155 | Wipe any signatures found on disk. Valid values are 156 | true/false and default value is false. By default if any 157 | signatures are found on disk operation is aborted. If this value 158 | is set to true, then signatures will either be wiped or 159 | overwritten as suitable. This also means that if there is any 160 | data on disk, it will be lost. 161 | 162 | CONTAINER_ROOT_LV_NAME: 163 | Name of the logical volume that will be mounted on 164 | CONTAINER_ROOT_LV_MOUNT_PATH. If a user is setting 165 | CONTAINER_ROOT_LV_MOUNT_PATH, he/she must set 166 | CONTAINER_ROOT_LV_NAME. 167 | 168 | CONTAINER_ROOT_LV_MOUNT_PATH: 169 | Creates a logical volume named CONTAINER_ROOT_LV_NAME and mounts 170 | it at the specified path. By default no new logical volume will 171 | be created. For example: 172 | \f[B]CONTAINER_ROOT_LV_MOUNT_PATH=/var/lib/containers/container-runtime\f[] 173 | would carve out a logical volume, format it with an XFS filesystem 174 | and mount it on /var/lib/containers/container-runtime. 175 | 176 | Note: DOCKER_ROOT_VOLUME is deprecated. Specifying 177 | DOCKER_ROOT_VOLUME and CONTAINER_ROOT_LV_MOUNT_PATH at the same 178 | time is not allowed. 179 | 180 | CONTAINER_ROOT_LV_SIZE: 181 | Specify the desired size for CONTAINER_ROOT_LV_MOUNT_PATH 182 | root volume. It defaults to 40% of all free space. 183 | 184 | CONTAINER_ROOT_LV_SIZE can take values acceptable to 185 | \f[B]lvcreate -L\f[] as well as some values acceptable to 186 | \f[B]lvcreate -l\f[]. If user intends to pass values acceptable 187 | to \f[B]lvcreate -l\f[], then only those values which contains "%" 188 | in syntax are acceptable. If value does not contain "%" it 189 | is assumed value is suitable for \f[B]lvcreate -L\f[]. 190 | 191 | Note: If both STORAGE_DRIVER=devicemapper and 192 | CONTAINER_ROOT_LV_MOUNT_PATH is set, container-storage-setup 193 | would set up the thin pool for devicemapper first, 194 | followed by extra volume. e.g if free space in the 195 | volume group is 10G, devicemapper thin pool size 196 | would be 4G (40% of 10G) and extra volume would be 197 | 2.4G (40% of 6G). 198 | 199 | Note: DOCKER_ROOT_VOLUME_SIZE is deprecated. Specifying 200 | DOCKER_ROOT_VOLUME_SIZE and CONTAINER_ROOT_LV_SIZE at the same 201 | time is not allowed. 202 | 203 | 204 | Options below should be specified as values acceptable to \f[B]lvextend -L\f[]. 205 | 206 | ROOT_SIZE: The size to which the root filesystem should be grown. 207 | 208 | ROOT_SIZE can take values acceptable to \f[B]lvcreate -L\f[] as well as 209 | some values acceptable to \f[B]lvcreate -l\f[]. If user intends to pass 210 | values acceptable to \f[B]lvcreate -l\f[], then only those values which 211 | contains "%" in syntax are acceptable. If value does not contain 212 | "%" it is assumed value is suitable for \f[B]lvcreate -L\f[]. 213 | 214 | DATA_SIZE: The desired size for container runtime thin pool data LV. 215 | Defaults: 40% free space in the VG after the root LV and container 216 | runtime metadata LV have been allocated/grown. 217 | 218 | DATA_SIZE can take values acceptable to \f[B]lvcreate -L\f[] as well as 219 | some values acceptable to \f[B]lvcreate -l\f[]. If user intends to pass 220 | values acceptable to \f[B]lvcreate -l\f[], then only those values which 221 | contains "%" in syntax are acceptable. If value does not contain 222 | "%" it is assumed value is suitable for \f[B]lvcreate -L\f[]. 223 | 224 | MIN_DATA_SIZE: Specifies the minimum size of the thin pool data LV. If 225 | sufficient free space is not available, the pool creation will 226 | fail. 227 | 228 | Value should be a number followed by a optional suffix. 229 | "bBsSkKmMgGtTpPeE" are valid suffixes. If no suffix is specified 230 | then value will be considered as megabyte unit. 231 | 232 | Both upper and lower case suffix represent same unit of size. 233 | Use suffix B for Bytes, S for sectors as 512 bytes, K for 234 | kibibytes (1024 bytes), M for mebibytes (1024 kibibytes), G for 235 | gibibytes, T for tebibytes, P for pebibytes and E for exbibytes. 236 | 237 | POOL_META_SIZE: Specifies the size of thin pool metadata LV. If 238 | sufficient free space is not available, the pool creation will 239 | fail. 240 | 241 | Value should be a number followed by a optional suffix. 242 | "bBsSkKmMgGtTpPeE" are valid suffixes. If no suffix is specified 243 | then value will be considered as megabyte unit. 244 | 245 | Both upper and lower case suffix represent same unit of size. 246 | Use suffix B for Bytes, S for sectors as 512 bytes, K for 247 | kibibytes (1024 bytes), M for mebibytes (1024 kibibytes), G for 248 | gibibytes, T for tebibytes, P for pebibytes and E for exbibytes. 249 | 250 | \f[B]Sample\f[] 251 | 252 | A simple, sample INPUTFILE: 253 | 254 | DEVS=/dev/vdb 255 | 256 | DATA_SIZE=8GB 257 | 258 | .fi 259 | 260 | .SH "SEE ALSO" 261 | .BR atomic "(1)" 262 | 263 | .SH HISTORY 264 | 265 | .PP 266 | November 2014, originally compiled by Joe Brockmeier 267 | based on comments in Andy Grimm's script. 268 | February 2017, Modified by Dan Walsh . 269 | .SH AUTHORS 270 | Joe Brockmeier 271 | Andy Grimm 272 | Dan Walsh 273 | -------------------------------------------------------------------------------- /container-storage-setup.conf: -------------------------------------------------------------------------------- 1 | # Specify storage driver one wants to use with container runtimes. 2 | # Default is devicemapper. 3 | # Other possible options are overlay, overlay2 and "". Empty string means do 4 | # not do any storage setup. 5 | STORAGE_DRIVER=devicemapper 6 | 7 | # Set extra options that will be appended to the generated STORAGE_OPTIONS 8 | # variable. These options will be passed to the container runtime daemon 9 | # as-is and should be valid container runtime storage options. 10 | # EXTRA_STORAGE_OPTIONS="--storage-opt dm.fs=ext4" 11 | 12 | # A quoted, space-separated list of devices to be used. This currently 13 | # expects the devices to be unpartitioned drives. If "VG" is not specified, 14 | # then use of the root disk's extra space is implied. 15 | # 16 | # DEVS=/dev/vdb 17 | 18 | # Specify the thinpool name for the lvm thinpool, when using the 19 | # devicemapper STORAGE_DRIVER. This is the logical volume name 20 | # for the newly created thin pool volume. 21 | # 22 | # CONTAINER_THINPOOL=container-thinpool 23 | 24 | # The volume group to use for container runtime storage. Defaults to the 25 | # volume group where the root filesystem resides. If VG is specified and the 26 | # volume group does not exist, it will be created (which requires that "DEVS" 27 | # be nonempty, since we don't currently support putting a second partition on 28 | # the root disk). 29 | # 30 | # VG= 31 | 32 | # The size to which the root filesystem should be grown. 33 | # Value should be acceptable to -L option of lvextend. 34 | # 35 | # ROOT_SIZE can take values acceptable to "lvcreate -L" as well as some 36 | # values acceptable to "lvcreate -l". If user intends to pass values 37 | # acceptable to "lvcreate -l", then only those values which contains "%" 38 | # in syntax are acceptable. If value does not contain "%" it is assumed 39 | # value is suitable for "lvcreate -L". 40 | # 41 | # ROOT_SIZE=8G 42 | 43 | # The desired size for the container runtime data LV. Defaults to using 40% 44 | # of FREE space. 45 | # 46 | # DATA_SIZE can take values acceptable to "lvcreate -L" as well as some 47 | # values acceptable to "lvcreate -l". If user intends to pass values 48 | # acceptable to "lvcreate -l", then only those values which contains "%" 49 | # in syntax are acceptable. If value does not contain "%" it is assumed 50 | # value is suitable for "lvcreate -L". 51 | # 52 | DATA_SIZE=40%FREE 53 | 54 | # MIN_DATA_SIZE specifies the minimum size of data volume otherwise pool 55 | # creation fails. 56 | # 57 | # Value should be a number followed by a optional suffix. "bBsSkKmMgGtTpPeE" 58 | # are valid suffixes. If no suffix is specified then value will be considered 59 | # as mebibyte unit. 60 | # 61 | # Both upper and lower case suffix represent same unit of size. Use suffix B 62 | # for Bytes, S for sectors as 512 bytes, K for kibibytes (1024 bytes), M for 63 | # mebibytes (1024 kibibytes), G for gibibytes, T for tebibytes, P for 64 | # pebibytes and E for exbibytes. 65 | # 66 | MIN_DATA_SIZE=2G 67 | 68 | # The desired size for the thin pool metadata volume. Defaults to using .1% 69 | # of FREE space in Volume Group. 70 | # 71 | # Values passed should be suitable to be used by --poolmetadatasize option 72 | # of lvcreate. It should be a number followed by a optional suffix. 73 | # "bBsSkKmMgGtTpPeE" are valid suffixes. If no suffix is specified then value 74 | # will be considered as mebibyte unit. 75 | # 76 | # Both upper and lower case suffix represent same unit of size. Use suffix B 77 | # for Bytes, S for sectors as 512 bytes, K for kibibytes (1024 bytes), M for 78 | # mebibytes (1024 kibibytes), G for gibibytes, T for tebibytes, P for 79 | # pebibytes and E for exbibytes. 80 | #POOL_META_SIZE=16M 81 | 82 | # Controls the chunk size/block size of thin pool. Value of CHUNK_SIZE 83 | # be suitable to be passed to --chunk-size option of lvconvert. 84 | # 85 | CHUNK_SIZE=512K 86 | 87 | # Enable resizing partition table backing root volume group. By default it 88 | # is disabled until and unless GROWPART=true is specified. 89 | # 90 | GROWPART=false 91 | 92 | # Enable/disable automatic pool extension using lvm 93 | AUTO_EXTEND_POOL=yes 94 | 95 | # Auto pool extension threshold (in % of pool size) 96 | POOL_AUTOEXTEND_THRESHOLD=60 97 | 98 | # Extend the pool by specified percentage when threshold is hit. 99 | POOL_AUTOEXTEND_PERCENT=20 100 | 101 | # Device wait timeout in seconds. This is generic timeout which can be used by 102 | # container storage setup service to wait on various kind of block devices. 103 | # Setting a value of 0 can disable this wait. 104 | DEVICE_WAIT_TIMEOUT=60 105 | 106 | # Wipe any signatures (partition, filesystem, lvm etc) found on disk. 107 | # This could mean wiping the signature explicitly or using force options 108 | # of various commands to wipe/overwrite signatures. By default signatures 109 | # are not wiped and user needs to wipe these. One can change default behavior 110 | # by setting WIPE_SIGNATURES=true. Be careful before using this option 111 | # as this means if there was any leftover data on disk, it will be lost. 112 | WIPE_SIGNATURES=false 113 | 114 | # By default no new volume and filesystem will be setup for container runtime 115 | # root dir. For example the docker engine creates /var/lib/docker/ on top of 116 | # underlying filesystem for storing images and containers. 117 | # 118 | # Logical volume name that will be mounted on CONTAINER_ROOT_LV_MOUNT_PATH. 119 | # Setting CONTAINER_ROOT_LV_MOUNT_PATH requires CONTAINER_ROOT_LV_NAME be set 120 | # CONTAINER_ROOT_LV_NAME="container-root-lv" 121 | 122 | # Specify the desired size for container root lv volume. It defaults to 40% of 123 | # all free space. 124 | # 125 | # CONTAINER_ROOT_LV_SIZE can take values acceptable to "lvcreate -L" as well 126 | # as some values acceptable to "lvcreate -l". If user intends to pass 127 | # values acceptable to "lvcreate -l", then only those values which 128 | # contains "%" in syntax are acceptable. If value does not contain "%" it 129 | # is assumed value is suitable for "lvcreate -L". 130 | # 131 | # Note: If both STORAGE_DRIVER=devicemapper and CONTAINER_ROOT_LV_NAME is 132 | # set, container-storage-setup would set up the thin pool for devicemapper 133 | # first, followed by container runtime root volume. e.g if free space in the 134 | # volume group is 10G, devicemapper thin pool size would be 4G (40% of 10G) 135 | # and containe runtime root volume would be 2.4G (40% of 6G). 136 | CONTAINER_ROOT_LV_SIZE=40%FREE 137 | 138 | # Creates a logical volume named $CONTAINER_ROOT_LV_NAME and mount it on 139 | # $CONTAINER_ROOT_LV_MOUNT_PATH. By default no new logical volume will 140 | # be created. e.g. Specifying CONTAINER_ROOT_LV_MOUNT_PATH=/var/lib/containers/container-runtime 141 | # will carve out a logical volume, create a filesystem on it and mount 142 | # it on /var/lib/containers/container-runtime. 143 | # CONTAINER_ROOT_LV_MOUNT_PATH="/var/lib/containers/container-runtime" 144 | -------------------------------------------------------------------------------- /container-storage-setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #-- 4 | # Copyright 2014-2017 Red Hat, Inc. 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | #++ 18 | 19 | # Purpose: This script sets up the storage for container runtimes. 20 | # Author: Andy Grimm 21 | 22 | set -e 23 | 24 | # container-storage-setup version information 25 | _CSS_MAJOR_VERSION="0" 26 | _CSS_MINOR_VERSION="11" 27 | _CSS_SUBLEVEL="0" 28 | _CSS_EXTRA_VERSION="" 29 | 30 | _CSS_VERSION="${_CSS_MAJOR_VERSION}.${_CSS_MINOR_VERSION}.${_CSS_SUBLEVEL}" 31 | [ -n "$_CSS_EXTRA_VERSION" ] && _CSS_VERSION="${_CSS_VERSION}-${_CSS_EXTRA_VERSION}" 32 | 33 | # Locking related 34 | _LOCKFD=300 35 | _LOCKDIR="/var/lock/container-storage-setup" 36 | _LOCKFILE="lock" 37 | 38 | _CONFIG_NAME="" 39 | _CONFIG_DIR="/var/lib/container-storage-setup/" 40 | 41 | # Partition type related 42 | _MAX_MBR_SIZE_BYTES="2199023255040" 43 | 44 | # Metadata related stuff 45 | _METADATA_VERSION=1 46 | _INFILE_NAME="infile" 47 | _OUTFILE_NAME="outfile" 48 | _METAFILE_NAME="metadata" 49 | _STATUSFILE_NAME="status" 50 | 51 | # This section reads the config file $INPUTFILE 52 | # Read man page for a description of currently supported options: 53 | # 'man container-storage-setup' 54 | 55 | _DOCKER_ROOT_LV_NAME="docker-root-lv" 56 | _DOCKER_ROOT_DIR="/var/lib/docker" 57 | _DOCKER_METADATA_DIR="/var/lib/docker" 58 | DOCKER_ROOT_VOLUME_SIZE=40%FREE 59 | 60 | _DOCKER_COMPAT_MODE="" 61 | _STORAGE_IN_FILE="" 62 | _STORAGE_OUT_FILE="" 63 | _STORAGE_DRIVERS="devicemapper overlay overlay2" 64 | 65 | # Command related variables 66 | _COMMAND_LIST="create activate deactivate remove list export add-dev" 67 | _COMMAND="" 68 | 69 | _PIPE1=/run/css-$$-fifo1 70 | _PIPE2=/run/css-$$-fifo2 71 | _TEMPDIR=$(mktemp --tmpdir -d) 72 | 73 | # Keeps track of resolved device paths 74 | _DEVS_RESOLVED="" 75 | 76 | # Will have currently configured storage options in ${_STORAGE_OUT_FILE} 77 | _CURRENT_STORAGE_OPTIONS="" 78 | 79 | _STORAGE_OPTIONS="STORAGE_OPTIONS" 80 | 81 | # Keeps track of if we created a volume group or not. 82 | _VG_CREATED= 83 | 84 | get_docker_version() { 85 | local version 86 | 87 | # docker version command exits with error as daemon is not running at this 88 | # point of time. So continue despite the error. 89 | version=`docker version --format='{{.Client.Version}}' 2>/dev/null` || true 90 | echo $version 91 | } 92 | 93 | get_deferred_removal_string() { 94 | local version major minor 95 | 96 | if ! version=$(get_docker_version);then 97 | return 0 98 | fi 99 | [ -z "$version" ] && return 0 100 | 101 | major=$(echo $version | cut -d "." -f1) 102 | minor=$(echo $version | cut -d "." -f2) 103 | [ -z "$major" ] && return 0 104 | [ -z "$minor" ] && return 0 105 | 106 | # docker 1.7 onwards supports deferred device removal. Enable it. 107 | if [ $major -gt 1 ] || ([ $major -eq 1 ] && [ $minor -ge 7 ]);then 108 | echo "--storage-opt dm.use_deferred_removal=true" 109 | fi 110 | } 111 | 112 | get_deferred_deletion_string() { 113 | local version major minor 114 | 115 | if ! version=$(get_docker_version);then 116 | return 0 117 | fi 118 | [ -z "$version" ] && return 0 119 | 120 | major=$(echo $version | cut -d "." -f1) 121 | minor=$(echo $version | cut -d "." -f2) 122 | [ -z "$major" ] && return 0 123 | [ -z "$minor" ] && return 0 124 | 125 | if should_enable_deferred_deletion $major $minor; then 126 | echo "--storage-opt dm.use_deferred_deletion=true" 127 | fi 128 | } 129 | 130 | should_enable_deferred_deletion() { 131 | # docker 1.9 onwards supports deferred device deletion. Enable it. 132 | local major=$1 133 | local minor=$2 134 | if [ $major -lt 1 ] || ([ $major -eq 1 ] && [ $minor -lt 9 ]);then 135 | return 1 136 | fi 137 | if platform_supports_deferred_deletion; then 138 | return 0 139 | fi 140 | return 1 141 | } 142 | 143 | platform_supports_deferred_deletion() { 144 | local deferred_deletion_supported=1 145 | trap cleanup_pipes EXIT 146 | local child_exec="$_SRCDIR/css-child-read-write.sh" 147 | 148 | [ ! -x "$child_exec" ] && child_exec="/usr/share/container-storage-setup/css-child-read-write" 149 | 150 | if [ ! -x "$child_exec" ];then 151 | return 1 152 | fi 153 | mkfifo $_PIPE1 154 | mkfifo $_PIPE2 155 | unshare -m ${child_exec} $_PIPE1 $_PIPE2 "$_TEMPDIR" & 156 | read -t 10 n <>$_PIPE1 157 | if [ "$n" != "start" ];then 158 | return 1 159 | fi 160 | rmdir $_TEMPDIR > /dev/null 2>&1 161 | deferred_deletion_supported=$? 162 | echo "finish" > $_PIPE2 163 | return $deferred_deletion_supported 164 | } 165 | 166 | cleanup_pipes(){ 167 | rm -f $_PIPE1 168 | rm -f $_PIPE2 169 | rmdir $_TEMPDIR 2>/dev/null 170 | } 171 | 172 | extra_options_has_dm_fs() { 173 | local option 174 | for option in ${EXTRA_STORAGE_OPTIONS}; do 175 | if grep -q "dm.fs=" <<< $option; then 176 | return 0 177 | fi 178 | done 179 | return 1 180 | } 181 | 182 | # Given a dm device name in /dev/mapper/ dir 183 | # (ex. /dev/mapper/docker-vg--docker-pool), get associated volume group 184 | get_dmdev_vg() { 185 | local dmdev=${1##"/dev/mapper/"} 186 | local vg 187 | 188 | vg=`dmsetup splitname $dmdev --noheadings | cut -d ":" -f1` 189 | echo $vg 190 | } 191 | 192 | # Wait for a device for certain time interval. If device is found 0 is 193 | # returned otherwise 1. 194 | wait_for_dev() { 195 | local devpath=$1 196 | local timeout=$DEVICE_WAIT_TIMEOUT 197 | 198 | if [ -b "$devpath" ];then 199 | Info "Device node $devpath exists." 200 | return 0 201 | fi 202 | 203 | if [ -z "$DEVICE_WAIT_TIMEOUT" ] || [ "$DEVICE_WAIT_TIMEOUT" == "0" ];then 204 | Info "Not waiting for device $devpath as DEVICE_WAIT_TIMEOUT=${DEVICE_WAIT_TIMEOUT}." 205 | return 0 206 | fi 207 | 208 | while [ $timeout -gt 0 ]; do 209 | Info "Waiting for device $devpath to be available. Wait time remaining is $timeout seconds" 210 | if [ $timeout -le 5 ];then 211 | sleep $timeout 212 | else 213 | sleep 5 214 | fi 215 | timeout=$((timeout-5)) 216 | if [ -b "$devpath" ]; then 217 | Info "Device node $devpath exists." 218 | return 0 219 | fi 220 | done 221 | 222 | Info "Timed out waiting for device $devpath" 223 | return 1 224 | } 225 | 226 | get_devicemapper_config_options() { 227 | local storage_options 228 | local dm_fs="--storage-opt dm.fs=xfs" 229 | 230 | # docker expects device mapper device and not lvm device. Do the conversion. 231 | eval $( lvs --nameprefixes --noheadings -o lv_name,kernel_major,kernel_minor $VG | while read line; do 232 | eval $line 233 | if [ "$LVM2_LV_NAME" = "$CONTAINER_THINPOOL" ]; then 234 | echo _POOL_DEVICE_PATH=/dev/mapper/$( cat /sys/dev/block/${LVM2_LV_KERNEL_MAJOR}:${LVM2_LV_KERNEL_MINOR}/dm/name ) 235 | fi 236 | done ) 237 | 238 | if extra_options_has_dm_fs; then 239 | # dm.fs option defined in ${EXTRA_STORAGE_OPTIONS} 240 | dm_fs="" 241 | fi 242 | 243 | storage_options="${_STORAGE_OPTIONS}=\"--storage-driver devicemapper ${dm_fs} --storage-opt dm.thinpooldev=$_POOL_DEVICE_PATH $(get_deferred_removal_string) $(get_deferred_deletion_string) ${EXTRA_STORAGE_OPTIONS}\"" 244 | echo $storage_options 245 | } 246 | 247 | get_config_options() { 248 | if [ "$1" == "devicemapper" ]; then 249 | get_devicemapper_config_options 250 | return $? 251 | fi 252 | echo "${_STORAGE_OPTIONS}=\"--storage-driver $1 ${EXTRA_STORAGE_OPTIONS}\"" 253 | return 0 254 | } 255 | 256 | # Check if multiple overlay directories are supported and if overlay module 257 | # itself is supported on the system. 258 | can_mount_overlay() { 259 | local dir="/run/container-storage-setup/" 260 | local lower1="$dir/lower1" 261 | local lower2="$dir/lower2" 262 | local upper="$dir/upper" 263 | local work="$dir/work" 264 | local merged="$dir/merged" 265 | local cmd 266 | 267 | # Create multiple directories in /run 268 | if ! mkdir -p $dir; then 269 | Error "Failed to create directory $dir" 270 | return 1 271 | fi 272 | 273 | cmd="mkdir -p $lower1 $lower2 $upper $work $merged" 274 | if ! $cmd; then 275 | Error "Failed to run $cmd" 276 | return 1 277 | fi 278 | 279 | cmd="unshare -m mount -t overlay -o lowerdir=$lower1:$lower2,upperdir=$upper,workdir=$work none $merged" 280 | if ! $cmd; then 281 | Error "Failed to run $cmd" 282 | return 1 283 | fi 284 | 285 | return 0 286 | } 287 | 288 | is_xfs_ftype_enabled() { 289 | local mountroot=$1 290 | local fstype 291 | 292 | fstype=$(stat -f -c '%T' $mountroot) 293 | 294 | [ "$fstype" != "xfs" ] && return 0 295 | 296 | # For xfs, see https://bugzilla.redhat.com/show_bug.cgi?id=1288162#c8 297 | if test "$(xfs_info $mountroot | grep -o 'ftype=[01]')" = "ftype=0"; then 298 | return 1 299 | fi 300 | 301 | return 0 302 | } 303 | 304 | write_storage_config_file () { 305 | local storage_driver=$1 306 | local storage_out_file=$2 307 | local storage_options 308 | 309 | if [ -z "$storage_driver" ];then 310 | touch "$storage_out_file" 311 | return 0 312 | fi 313 | 314 | if ! storage_options=$(get_config_options $storage_driver); then 315 | return 1 316 | fi 317 | 318 | cat < ${storage_out_file}.tmp 319 | $storage_options 320 | EOF 321 | 322 | mv -Z ${storage_out_file}.tmp ${storage_out_file} 323 | } 324 | 325 | convert_size_in_bytes() { 326 | local size=$1 prefix suffix 327 | 328 | # if it is all numeric, it is valid as by default it will be MiB. 329 | if [[ $size =~ ^[[:digit:]]+$ ]]; then 330 | echo $(($size*1024*1024)) 331 | return 0 332 | fi 333 | 334 | # supprt G, G[bB] or Gi[bB] inputs. 335 | prefix=${size%[bBsSkKmMgGtTpPeE]i[bB]} 336 | prefix=${prefix%[bBsSkKmMgGtTpPeE][bB]} 337 | prefix=${prefix%[bBsSkKmMgGtTpPeE]} 338 | 339 | # if prefix is not all numeric now, it is an error. 340 | if ! [[ $prefix =~ ^[[:digit:]]+$ ]]; then 341 | return 1 342 | fi 343 | 344 | suffix=${data_size#$prefix} 345 | 346 | case $suffix in 347 | b*|B*) echo $prefix;; 348 | s*|S*) echo $(($prefix*512));; 349 | k*|K*) echo $(($prefix*2**10));; 350 | m*|M*) echo $(($prefix*2**20));; 351 | g*|G*) echo $(($prefix*2**30));; 352 | t*|T*) echo $(($prefix*2**40));; 353 | p*|P*) echo $(($prefix*2**50));; 354 | e*|E*) echo $(($prefix*2**60));; 355 | *) return 1;; 356 | esac 357 | } 358 | 359 | data_size_in_bytes() { 360 | local data_size=$1 361 | local bytes vg_size free_space percent 362 | 363 | # -L compatible syntax 364 | if [[ $DATA_SIZE != *%* ]]; then 365 | bytes=`convert_size_in_bytes $data_size` 366 | [ $? -ne 0 ] && return 1 367 | # If integer overflow took place, value is too large to handle. 368 | if [ $bytes -lt 0 ];then 369 | Error "DATA_SIZE=$data_size is too large to handle." 370 | return 1 371 | fi 372 | echo $bytes 373 | return 0 374 | fi 375 | 376 | if [[ $DATA_SIZE == *%FREE ]];then 377 | free_space=$(vgs --noheadings --nosuffix --units b -o vg_free $VG) 378 | percent=${DATA_SIZE%\%FREE} 379 | echo $((percent*free_space/100)) 380 | return 0 381 | fi 382 | 383 | if [[ $DATA_SIZE == *%VG ]];then 384 | vg_size=$(vgs --noheadings --nosuffix --units b -o vg_size $VG) 385 | percent=${DATA_SIZE%\%VG} 386 | echo $((percent*vg_size/100)) 387 | fi 388 | return 0 389 | } 390 | 391 | check_min_data_size_condition() { 392 | local min_data_size_bytes data_size_bytes free_space 393 | 394 | [ -z $MIN_DATA_SIZE ] && return 0 395 | 396 | if ! check_numeric_size_syntax $MIN_DATA_SIZE; then 397 | Fatal "MIN_DATA_SIZE value $MIN_DATA_SIZE is invalid." 398 | fi 399 | 400 | if ! min_data_size_bytes=$(convert_size_in_bytes $MIN_DATA_SIZE);then 401 | Fatal "Failed to convert MIN_DATA_SIZE to bytes" 402 | fi 403 | 404 | # If integer overflow took place, value is too large to handle. 405 | if [ $min_data_size_bytes -lt 0 ];then 406 | Fatal "MIN_DATA_SIZE=$MIN_DATA_SIZE is too large to handle." 407 | fi 408 | 409 | free_space=$(vgs --noheadings --nosuffix --units b -o vg_free $VG) 410 | 411 | if [ $free_space -lt $min_data_size_bytes ];then 412 | Fatal "There is not enough free space in volume group $VG to create data volume of size MIN_DATA_SIZE=${MIN_DATA_SIZE}." 413 | fi 414 | 415 | if ! data_size_bytes=$(data_size_in_bytes $DATA_SIZE);then 416 | Fatal "Failed to convert desired data size to bytes" 417 | fi 418 | 419 | if [ $data_size_bytes -lt $min_data_size_bytes ]; then 420 | # Increasing DATA_SIZE to meet minimum data size requirements. 421 | Info "DATA_SIZE=${DATA_SIZE} is smaller than MIN_DATA_SIZE=${MIN_DATA_SIZE}. Will create data volume of size specified by MIN_DATA_SIZE." 422 | DATA_SIZE=$MIN_DATA_SIZE 423 | fi 424 | } 425 | 426 | create_lvm_thin_pool () { 427 | if [ -z "$_DEVS_RESOLVED" ] && [ -z "$_VG_EXISTS" ]; then 428 | Fatal "Specified volume group $VG does not exist, and no devices were specified" 429 | fi 430 | 431 | if [ ! -n "$DATA_SIZE" ]; then 432 | Fatal "DATA_SIZE not specified." 433 | fi 434 | 435 | if ! check_data_size_syntax $DATA_SIZE; then 436 | Fatal "DATA_SIZE value $DATA_SIZE is invalid." 437 | fi 438 | 439 | check_min_data_size_condition 440 | 441 | if [ -n "$POOL_META_SIZE" ]; then 442 | _META_SIZE_ARG="$POOL_META_SIZE" 443 | else 444 | # Calculate size of metadata lv. Reserve 0.1% of the free space in the VG 445 | # for docker metadata. 446 | _VG_SIZE=$(vgs --noheadings --nosuffix --units s -o vg_size $VG) 447 | _META_SIZE=$(( $_VG_SIZE / 1000 + 1 )) 448 | if [ -z "$_META_SIZE" ];then 449 | Fatal "Failed to calculate metadata volume size." 450 | fi 451 | _META_SIZE_ARG=${_META_SIZE}s 452 | fi 453 | 454 | if [ -n "$CHUNK_SIZE" ]; then 455 | _CHUNK_SIZE_ARG="-c $CHUNK_SIZE" 456 | fi 457 | 458 | if [[ $DATA_SIZE == *%* ]]; then 459 | _DATA_SIZE_ARG="-l $DATA_SIZE" 460 | else 461 | _DATA_SIZE_ARG="-L $DATA_SIZE" 462 | fi 463 | 464 | lvcreate -y --type thin-pool --zero n $_CHUNK_SIZE_ARG --poolmetadatasize $_META_SIZE_ARG $_DATA_SIZE_ARG -n $CONTAINER_THINPOOL $VG 465 | } 466 | 467 | get_configured_thin_pool() { 468 | local options tpool opt 469 | 470 | options=$_CURRENT_STORAGE_OPTIONS 471 | [ -z "$options" ] && return 0 472 | 473 | # This assumes that thin pool is specified as dm.thinpooldev=foo. There 474 | # are no spaces in between. 475 | for opt in $options; do 476 | if [[ $opt =~ dm.thinpooldev* ]];then 477 | tpool=${opt#*=} 478 | echo "$tpool" 479 | return 0 480 | fi 481 | done 482 | } 483 | 484 | check_docker_storage_metadata() { 485 | local docker_devmapper_meta_dir="$_DOCKER_METADATA_DIR/devicemapper/metadata/" 486 | 487 | [ ! -d "$docker_devmapper_meta_dir" ] && return 0 488 | 489 | # Docker seems to be already using devicemapper storage driver. Error out. 490 | Error "Docker has been previously configured for use with devicemapper graph driver. Not creating a new thin pool as existing docker metadata will fail to work with it. Manual cleanup is required before this will succeed." 491 | Info "Docker state can be reset by stopping docker and by removing ${_DOCKER_METADATA_DIR} directory. This will destroy existing docker images and containers and all the docker metadata." 492 | exit 1 493 | } 494 | 495 | systemd_escaped_filename () { 496 | local escaped_path filename path=$1 497 | escaped_path=$(echo ${path}|sed 's|-|\\x2d|g') 498 | filename=$(echo ${escaped_path}.mount|sed 's|/|-|g' | cut -b 2-) 499 | echo $filename 500 | } 501 | 502 | 503 | # Compatibility mode code 504 | run_docker_compatibility_code() { 505 | # Verify storage options set correctly in input files 506 | check_storage_options 507 | 508 | # Query and save current storage options 509 | if ! _CURRENT_STORAGE_OPTIONS=$(get_current_storage_options); then 510 | return 1 511 | fi 512 | 513 | determine_rootfs_pvs_vg 514 | 515 | if [ $_RESET -eq 1 ]; then 516 | reset_storage_compat 517 | exit 0 518 | fi 519 | 520 | partition_disks_create_vg 521 | grow_root_pvs 522 | 523 | # NB: We are growing root here first, because when root and docker share a 524 | # disk, we'll default to giving some portion of remaining space to docker. 525 | # Do this operation only if root is on a logical volume. 526 | [ -n "$_ROOT_VG" ] && grow_root_lv_fs 527 | 528 | if is_old_data_meta_mode; then 529 | Fatal "Old mode of passing data and metadata logical volumes to docker is not supported. Exiting." 530 | fi 531 | 532 | setup_storage_compat 533 | } 534 | 535 | # 536 | # In the past we created a systemd mount target file, we no longer 537 | # use it, but if one pre-existed we still need to handle it. 538 | # 539 | remove_systemd_mount_target () { 540 | local mp=$1 541 | local filename=$(systemd_escaped_filename $mp) 542 | if [ -f /etc/systemd/system/$filename ]; then 543 | if [ -x /usr/bin/systemctl ];then 544 | systemctl disable $filename >/dev/null 2>&1 545 | systemctl stop $filename >/dev/null 2>&1 546 | systemctl daemon-reload 547 | fi 548 | rm -f /etc/systemd/system/$filename >/dev/null 2>&1 549 | fi 550 | } 551 | 552 | # This is used in compatibility mode. 553 | reset_extra_volume_compat () { 554 | local mp filename 555 | local lv_name=$1 556 | local mount_dir=$2 557 | local vg=$3 558 | 559 | if extra_volume_exists $lv_name $vg; then 560 | mp=$(extra_lv_mountpoint $vg $lv_name $mount_dir) 561 | if [ -n "$mp" ];then 562 | if ! umount $mp >/dev/null 2>&1; then 563 | Fatal "Failed to unmount $mp" 564 | fi 565 | fi 566 | lvchange -an $vg/${lv_name} 567 | lvremove $vg/${lv_name} 568 | else 569 | return 0 570 | fi 571 | # If the user has manually unmounted mount directory, mountpoint (mp) 572 | # will be empty. Extract ${mp} from $(mount_dir) in that case. 573 | if [ -z "$mp" ];then 574 | mp=${mount_dir} 575 | fi 576 | remove_systemd_mount_target $mp 577 | } 578 | 579 | reset_lvm_thin_pool () { 580 | local thinpool_name=$1 581 | local vg=$2 582 | if lvm_pool_exists $thinpool_name $vg; then 583 | lvchange -an $vg/${thinpool_name} 584 | lvremove $vg/${thinpool_name} 585 | fi 586 | } 587 | 588 | # Used in compatibility mode. Determine if already configured thin pool 589 | # is managed by container-storage-setup or not. Returns 0 if tpool is 590 | # managed otherwise 1. 591 | is_managed_tpool_compat() { 592 | local tpool=$1 593 | local thinpool_name=${CONTAINER_THINPOOL} 594 | local escaped_pool_lv_name=`echo $thinpool_name | sed 's/-/--/g'` 595 | 596 | # css generated thin pool device name starts with /dev/mapper/ and 597 | # ends with $thinpool_name 598 | [[ "$tpool" == /dev/mapper/*${escaped_pool_lv_name} ]] && return 0 599 | return 1 600 | } 601 | 602 | # This is used in comatibility mode. 603 | bringup_existing_thin_pool_compat() { 604 | local tpool=$1 605 | 606 | # css generated thin pool device name starts with /dev/mapper/ and 607 | # ends with $thinpool_name 608 | if ! is_managed_tpool_compat "$tpool";then 609 | Fatal "Thin pool ${tpool} does not seem to be managed by container-storage-setup. Exiting." 610 | fi 611 | 612 | if ! wait_for_dev "$tpool"; then 613 | Fatal "Already configured thin pool $tpool is not available. If thin pool exists and is taking longer to activate, set DEVICE_WAIT_TIMEOUT to a higher value and retry. If thin pool does not exist any more, remove ${_STORAGE_OUT_FILE} and retry" 614 | fi 615 | } 616 | 617 | # This is used in comatibility mode. Returns 0 if thin pool is already 618 | # configured and wait could find the device. Returns 1 if thin pool is 619 | # not configured and probably needs to be created. Terminates script 620 | # on fatal errors. 621 | check_existing_thinpool_compat() { 622 | local tpool 623 | 624 | # Check if a thin pool is already configured in /etc/sysconfig/docker-storage 625 | # If yes, wait for that thin pool to come up. 626 | tpool=`get_configured_thin_pool` 627 | [ -z "$tpool" ] && return 1 628 | 629 | Info "Found an already configured thin pool $tpool in ${_STORAGE_OUT_FILE}" 630 | bringup_existing_thin_pool_compat "$tpool" 631 | return 632 | } 633 | 634 | # This is used in comatibility mode. 635 | setup_lvm_thin_pool_compat () { 636 | local thinpool_name=${CONTAINER_THINPOOL} 637 | 638 | if check_existing_thinpool_compat; then 639 | process_auto_pool_extenion ${VG} ${thinpool_name} 640 | # We found existing thin pool and waited for it and processed auto 641 | # pool extension changes. There should not be any need to process 642 | # further 643 | return 644 | fi 645 | 646 | # At this point of time, a volume group should exist for lvm thin pool 647 | # operations to succeed. Make that check and fail if that's not the case. 648 | if ! vg_exists "$VG";then 649 | Fatal "No valid volume group found. Exiting." 650 | else 651 | _VG_EXISTS=1 652 | fi 653 | 654 | if ! lvm_pool_exists $thinpool_name $VG; then 655 | [ -n "$_DOCKER_COMPAT_MODE" ] && check_docker_storage_metadata 656 | create_lvm_thin_pool 657 | [ -n "$_STORAGE_OUT_FILE" ] && write_storage_config_file $STORAGE_DRIVER "$_STORAGE_OUT_FILE" 658 | else 659 | # At this point /etc/sysconfig/docker-storage file should exist. If user 660 | # deleted this file accidently without deleting thin pool, recreate it. 661 | if [ -n "$_STORAGE_OUT_FILE" -a ! -f "${_STORAGE_OUT_FILE}" ];then 662 | Info "${_STORAGE_OUT_FILE} file is missing. Recreating it." 663 | write_storage_config_file $STORAGE_DRIVER "$_STORAGE_OUT_FILE" 664 | fi 665 | fi 666 | 667 | process_auto_pool_extenion ${VG} ${thinpool_name} 668 | } 669 | 670 | lvm_pool_exists() { 671 | local lv_data 672 | local lvname lv lvsize 673 | local thinpool_name=$1 674 | local vg=$2 675 | 676 | if [ -z "$thinpool_name" ]; then 677 | Fatal "Thin pool name must be specified." 678 | fi 679 | lv_data=$( lvs --noheadings -o lv_name,lv_attr --separator , $vg | sed -e 's/^ *//') 680 | SAVEDIFS=$IFS 681 | for lv in $lv_data; do 682 | IFS=, 683 | read lvname lvattr <<< "$lv" 684 | # pool logical volume has "t" as first character in its attributes 685 | if [ "$lvname" == "$thinpool_name" ] && [[ $lvattr == t* ]]; then 686 | IFS=$SAVEDIFS 687 | return 0 688 | fi 689 | done 690 | IFS=$SAVEDIFS 691 | 692 | return 1 693 | } 694 | 695 | # If a ${_STORAGE_OUT_FILE} file is present and if it contains 696 | # dm.datadev or dm.metadatadev entries, that means we have used old mode 697 | # in the past. 698 | is_old_data_meta_mode() { 699 | if [ ! -f "${_STORAGE_OUT_FILE}" ];then 700 | return 1 701 | fi 702 | 703 | if ! grep -e "^${_STORAGE_OPTIONS}=.*dm\.datadev" -e "^${_STORAGE_OPTIONS}=.*dm\.metadatadev" ${_STORAGE_OUT_FILE} > /dev/null 2>&1;then 704 | return 1 705 | fi 706 | 707 | return 0 708 | } 709 | 710 | grow_root_pvs() { 711 | # If root is not in a volume group, then there are no root pvs and nothing 712 | # to do. 713 | [ -z "$_ROOT_PVS" ] && return 0 714 | 715 | # Grow root pvs only if user asked for it through config file. 716 | [ "$GROWPART" != "true" ] && return 717 | 718 | if [ ! -x "/usr/bin/growpart" ];then 719 | Error "GROWPART=true is specified and /usr/bin/growpart executable is not available. Install /usr/bin/growpart and try again." 720 | return 1 721 | fi 722 | 723 | # Note that growpart is only variable here because we may someday support 724 | # using separate partitions on the same disk. Today we fail early in that 725 | # case. Also note that the way we are doing this, it should support LVM 726 | # RAID for the root device. In the mirrored or striped case, we are growing 727 | # partitions on all disks, so as long as they match, growing the LV should 728 | # also work. 729 | for pv in $_ROOT_PVS; do 730 | if ! test -b $pv; then 731 | Error "Not a block device: $pv" 732 | fi 733 | local major_hex minor_hex major_minor 734 | local devpath partition parent_path parent_device 735 | major_hex=$(stat -c '%t' $pv) 736 | minor_hex=$(stat -c '%T' $pv) 737 | major_minor=$((0x${major_hex})):$((0x${minor_hex})) 738 | devpath=$(realpath /sys/dev/block/$major_minor) 739 | partition=$(cat $devpath/partition) 740 | parent_path=$(dirname $devpath) 741 | parent_device=/dev/$(basename ${parent_path}) 742 | # TODO: Remove the || true here 743 | growpart ${parent_device} ${partition} || true 744 | pvresize $pv 745 | done 746 | } 747 | 748 | grow_root_lv_fs() { 749 | if [ -n "$ROOT_SIZE" ]; then 750 | 751 | # Allow user to pass in an argument that could be provided 752 | # to -L (like 10G or +5G) or an argument that could be passed 753 | # to -l (like 80%FREE). Switch based on if '%' is in string. 754 | local root_size_arg 755 | if [[ $ROOT_SIZE =~ % ]]; then 756 | root_size_arg="-l $ROOT_SIZE" 757 | else 758 | root_size_arg="-L $ROOT_SIZE" 759 | fi 760 | 761 | # TODO: Error checking if specified size is <= current size 762 | lvextend -r $root_size_arg $_ROOT_DEV || true 763 | fi 764 | } 765 | 766 | # Determines if a device is already added in a volume group as pv. Returns 767 | # 0 on success. 768 | is_dev_part_of_vg() { 769 | local dev=$1 770 | local vg=$2 771 | 772 | if ! pv_name=$(pvs --noheadings -o pv_name -S pv_name=$dev,vg_name=$vg); then 773 | Fatal "Error running command pvs. Exiting." 774 | fi 775 | 776 | [ -z "$pv_name" ] && return 1 777 | pv_name=`echo $pv_name | tr -d '[ ]'` 778 | [ "$pv_name" == "$dev" ] && return 0 779 | return 1 780 | } 781 | 782 | is_block_dev_partition() { 783 | local bdev=$1 devparent 784 | 785 | if ! disktype=$(lsblk -n --nodeps --output type ${bdev}); then 786 | Fatal "Failed to run lsblk on device $bdev" 787 | fi 788 | 789 | if [ "$disktype" == "part" ];then 790 | return 0 791 | fi 792 | 793 | if [ "$disktype" == "mpath" ];then 794 | return 1 795 | fi 796 | 797 | # For loop device partitions, lsblk reports type as "loop" and not "part". 798 | # So check if device has a parent in the tree and if it does, there are high 799 | # chances it is partition (except the case of lvm volumes) 800 | if ! devparent=$(lsblk -npls -o NAME ${bdev}|tail -n +2); then 801 | Fatal "Failed to run lsblk on device $bdev" 802 | fi 803 | 804 | if [ -n "$devparent" ];then 805 | return 0 806 | fi 807 | 808 | return 1 809 | } 810 | 811 | check_wipe_block_dev_sig() { 812 | local bdev=$1 813 | local sig 814 | 815 | if ! sig=$(wipefs -p $bdev); then 816 | Fatal "Failed to check signatures on device $bdev" 817 | fi 818 | 819 | [ "$sig" == "" ] && return 0 820 | 821 | if [ "$WIPE_SIGNATURES" == "true" ];then 822 | Info "Wipe Signatures is set to true. Any signatures on $bdev will be wiped." 823 | if ! wipefs -a $bdev; then 824 | Fatal "Failed to wipe signatures on device $bdev" 825 | fi 826 | return 0 827 | fi 828 | 829 | while IFS=, read offset uuid label type; do 830 | [ "$offset" == "# offset" ] && continue 831 | Fatal "Found $type signature on device ${bdev} at offset ${offset}. Wipe signatures using wipefs or use WIPE_SIGNATURES=true and retry." 832 | done <<< "$sig" 833 | } 834 | 835 | # This is used in compatibility mode 836 | canonicalize_block_devs_compat() { 837 | local devs=$1 dev 838 | local devs_abs dev_abs 839 | local dest_dev 840 | 841 | for dev in ${devs}; do 842 | # If the device name is a symlink, follow it and use the target 843 | if [ -h "$dev" ];then 844 | if ! dest_dev=$(readlink -e $dev);then 845 | Fatal "Failed to resolve symbolic link $dev" 846 | fi 847 | dev=$dest_dev 848 | fi 849 | # Looks like we allowed just device name (sda) as valid input. In 850 | # such cases /dev/$dev should be a valid block device. 851 | dev_abs=$dev 852 | [ ! -b "$dev" ] && dev_abs="/dev/$dev" 853 | [ ! -b "$dev_abs" ] && Fatal "$dev_abs is not a valid block device." 854 | 855 | if is_block_dev_partition ${dev_abs}; then 856 | Fatal "Partition specification unsupported at this time." 857 | fi 858 | devs_abs="$devs_abs $dev_abs" 859 | done 860 | 861 | # Return list of devices to caller. 862 | echo "$devs_abs" 863 | } 864 | 865 | # This is used in config creation mode 866 | canonicalize_block_devs_generic() { 867 | local devs=$1 dev 868 | local devs_resolved resolved_device 869 | 870 | for dev in ${devs}; do 871 | if ! resolved_device=$(realpath -e $dev);then 872 | Fatal "Failed to resolve path for device ${dev}" 873 | fi 874 | 875 | [ ! -b "$resolved_device" ] && Fatal "$resolved_device is not a valid block device." 876 | 877 | if is_block_dev_partition ${resolved_device}; then 878 | Fatal "Partition specification unsupported at this time." 879 | fi 880 | if [ -n "$devs_resolved" ]; then 881 | devs_resolved="$devs_resolved $resolved_device" 882 | else 883 | devs_resolved="$resolved_device" 884 | fi 885 | done 886 | 887 | # Return list of devices to caller. 888 | echo "$devs_resolved" 889 | } 890 | 891 | # Make sure passed in devices are valid block devies. Also make sure they 892 | # are not partitions. Names which are of the form "sdb", convert them to 893 | # their absolute path for processing in rest of the script. 894 | canonicalize_block_devs() { 895 | local input_dev_list="$1" 896 | local devs_list 897 | 898 | if [ "$_DOCKER_COMPAT_MODE" == "1" ];then 899 | devs_list=$(canonicalize_block_devs_compat "$input_dev_list") || return 1 900 | else 901 | devs_list=$(canonicalize_block_devs_generic "$input_dev_list") || return 1 902 | fi 903 | echo $devs_list 904 | } 905 | 906 | # Scans all the disks listed in DEVS= and returns the disks which are not 907 | # already part of volume group and are new and require further processing. 908 | scan_disks() { 909 | local disk_list="$1" 910 | local vg=$2 911 | local wipe_signatures=$3 912 | local new_disks="" 913 | 914 | for dev in $disk_list; do 915 | local part=$(dev_query_first_child $dev) 916 | 917 | if [ -n "$part" ] && is_dev_part_of_vg ${part} $vg; then 918 | Info "Device ${dev} is already partitioned and is part of volume group $VG" 919 | continue 920 | fi 921 | 922 | # If signatures are being overridden, then simply return the disk as new 923 | # disk. Even if it is partitioned, partition signatures will be wiped. 924 | if [ "$wipe_signatures" == "true" ];then 925 | new_disks="$new_disks $dev" 926 | continue 927 | fi 928 | 929 | # If device does not have partitions, it is a new disk requiring processing. 930 | if [[ -z "$part" ]]; then 931 | new_disks="$dev $new_disks" 932 | continue 933 | fi 934 | 935 | Fatal "Device $dev is already partitioned and cannot be added to volume group $vg" 936 | done 937 | 938 | echo $new_disks 939 | } 940 | 941 | determine_partition_type() { 942 | local dev="$1" size_bytes part_type 943 | 944 | if ! size_bytes=$(blockdev --getsize64 "$dev"); then 945 | Fatal "Failed to determine size of disk $dev" 946 | fi 947 | 948 | if [ $size_bytes -gt $_MAX_MBR_SIZE_BYTES ];then 949 | part_type="gpt" 950 | else 951 | part_type="dos" 952 | fi 953 | echo $part_type 954 | } 955 | 956 | create_partition_sfdisk(){ 957 | local dev="$1" part_type="$2" size part_label 958 | # Use a single partition of a whole device 959 | # TODO: 960 | # * Consider gpt, or unpartitioned volumes 961 | # * Error handling when partition(s) already exist 962 | # * Deal with loop/nbd device names. See growpart code 963 | if [ "$part_type" == "gpt" ];then 964 | # Linux LVM GUID for GPT. Taken from Wiki. 965 | part_label="E6D6D379-F507-44C2-A23C-238F2A3DF928" 966 | # Create as big a partition as possible. 967 | size="" 968 | else 969 | part_label="8e" 970 | size=$(( $( awk "\$4 ~ /"$( basename $dev )"/ { print \$3 }" /proc/partitions ) * 2 - 2048 )) 971 | fi 972 | cat </dev/null 1056 | else 1057 | sfdisk --delete "$dev" 1 >/dev/null 1058 | fi 1059 | } 1060 | 1061 | # Remove disk pvs and partitions. This is called in reset storage path. 1062 | # If partition or pv does not exist, it will still return success. Error 1063 | # will be returned only if pv or partition exists and removal fails. 1064 | remove_disk_pvs_parts() { 1065 | local devs="$1" part 1066 | 1067 | for dev in $devs; do 1068 | part=$(dev_query_first_child $dev) 1069 | [ -z "$part" ] && continue 1070 | 1071 | if ! remove_pv_if_exists $part; then 1072 | Error "Failed to remove physical volume label on device $part" 1073 | return 1 1074 | fi 1075 | 1076 | if ! remove_partition $dev; then 1077 | Error "Failed to remove partition on device $dev" 1078 | return 1 1079 | fi 1080 | done 1081 | } 1082 | 1083 | create_extend_volume_group() { 1084 | if [ -z "$_VG_EXISTS" ]; then 1085 | vgcreate $VG $_PVS 1086 | _VG_CREATED=1 1087 | _VG_EXISTS=1 1088 | else 1089 | # TODO: 1090 | # * Error handling when PV is already part of a VG 1091 | vgextend $VG $_PVS 1092 | fi 1093 | } 1094 | 1095 | # Auto extension logic. Create a profile for pool and attach that profile 1096 | # the pool volume. 1097 | enable_auto_pool_extension() { 1098 | local volume_group=$1 1099 | local pool_volume=$2 1100 | local profileName="${volume_group}--${pool_volume}-extend" 1101 | local profileFile="${profileName}.profile" 1102 | local profileDir 1103 | local tmpFile=`mktemp -p /run -t tmp.XXXXX` 1104 | 1105 | profileDir=$(lvm dumpconfig --type full | grep "profile_dir" | cut -d "=" -f2 | sed 's/"//g') 1106 | [ -n "$profileDir" ] || return 1 1107 | 1108 | if [ ! -n "$POOL_AUTOEXTEND_THRESHOLD" ];then 1109 | Error "POOL_AUTOEXTEND_THRESHOLD not specified" 1110 | return 1 1111 | fi 1112 | 1113 | if [ ! -n "$POOL_AUTOEXTEND_PERCENT" ];then 1114 | Error "POOL_AUTOEXTEND_PERCENT not specified" 1115 | return 1 1116 | fi 1117 | 1118 | cat < $tmpFile 1119 | activation { 1120 | thin_pool_autoextend_threshold=${POOL_AUTOEXTEND_THRESHOLD} 1121 | thin_pool_autoextend_percent=${POOL_AUTOEXTEND_PERCENT} 1122 | 1123 | } 1124 | EOF 1125 | mv -Z $tmpFile ${profileDir}/${profileFile} 1126 | lvchange --metadataprofile ${profileName} ${volume_group}/${pool_volume} 1127 | } 1128 | 1129 | disable_auto_pool_extension() { 1130 | local volume_group=$1 1131 | local pool_volume=$2 1132 | local profileName="${volume_group}--${pool_volume}-extend" 1133 | local profileFile="${profileName}.profile" 1134 | local profileDir 1135 | 1136 | profileDir=$(lvm dumpconfig --type full | grep "profile_dir" | cut -d "=" -f2 | sed 's/"//g') 1137 | [ -n "$profileDir" ] || return 1 1138 | 1139 | lvchange --detachprofile ${volume_group}/${pool_volume} 1140 | rm -f ${profileDir}/${profileFile} 1141 | } 1142 | 1143 | process_auto_pool_extenion() { 1144 | local vg=$1 thinpool_name=$2 1145 | 1146 | # Enable or disable automatic pool extension 1147 | if [ "$AUTO_EXTEND_POOL" == "yes" ];then 1148 | enable_auto_pool_extension ${vg} ${thinpool_name} 1149 | else 1150 | disable_auto_pool_extension ${vg} ${thinpool_name} 1151 | fi 1152 | } 1153 | 1154 | # Gets the current ${_STORAGE_OPTIONS}= string. 1155 | get_current_storage_options() { 1156 | local options 1157 | 1158 | if [ ! -f "${_STORAGE_OUT_FILE}" ];then 1159 | return 0 1160 | fi 1161 | 1162 | if options=$(grep -e "^${_STORAGE_OPTIONS}=" ${_STORAGE_OUT_FILE} | sed "s/${_STORAGE_OPTIONS}=//" | sed 's/^ *//' | sed 's/^"//' | sed 's/"$//');then 1163 | echo $options 1164 | return 0 1165 | fi 1166 | 1167 | return 1 1168 | } 1169 | 1170 | is_valid_storage_driver() { 1171 | local driver=$1 d 1172 | 1173 | # Empty driver is valid. That means user does not want us to setup any 1174 | # storage. 1175 | [ -z "$driver" ] && return 0 1176 | 1177 | for d in $_STORAGE_DRIVERS;do 1178 | [ "$driver" == "$d" ] && return 0 1179 | done 1180 | 1181 | return 1 1182 | } 1183 | 1184 | # Gets the existing storage driver configured in /etc/sysconfig/docker-storage 1185 | get_existing_storage_driver() { 1186 | local options driver 1187 | 1188 | options=$_CURRENT_STORAGE_OPTIONS 1189 | 1190 | [ -z "$options" ] && return 0 1191 | 1192 | # Check if -storage-driver is there. 1193 | if ! driver=$(echo $options | sed -n 's/.*\(--storage-driver [ ]*[a-z0-9]*\).*/\1/p' | sed 's/--storage-driver *//');then 1194 | return 1 1195 | fi 1196 | 1197 | # If pattern does not match then driver == options. 1198 | if [ -n "$driver" ] && [ ! "$driver" == "$options" ];then 1199 | echo $driver 1200 | return 0 1201 | fi 1202 | 1203 | # Check if -s is there. 1204 | if ! driver=$(echo $options | sed -n 's/.*\(-s [ ]*[a-z][0-9]*\).*/\1/p' | sed 's/-s *//');then 1205 | return 1 1206 | fi 1207 | 1208 | # If pattern does not match then driver == options. 1209 | if [ -n "$driver" ] && [ ! "$driver" == "$options" ];then 1210 | echo $driver 1211 | return 0 1212 | fi 1213 | 1214 | # We shipped some versions where we did not specify -s devicemapper. 1215 | # If dm.thinpooldev= is present driver is devicemapper. 1216 | if echo $options | grep -q -e "--storage-opt dm.thinpooldev=";then 1217 | echo "devicemapper" 1218 | return 0 1219 | fi 1220 | 1221 | #Failed to determine existing storage driver. 1222 | return 1 1223 | } 1224 | 1225 | extra_volume_exists() { 1226 | local lv_name=$1 1227 | local vg=$2 1228 | 1229 | lvs $vg/$lv_name > /dev/null 2>&1 && return 0 1230 | return 1 1231 | } 1232 | 1233 | # This returns the mountpoint of $1 1234 | extra_lv_mountpoint() { 1235 | local mounts 1236 | local vg=$1 1237 | local lv_name=$2 1238 | local mount_dir=$3 1239 | mounts=$(findmnt -n -o TARGET --source /dev/$vg/$lv_name | grep "^$mount_dir") 1240 | echo $mounts 1241 | } 1242 | 1243 | mount_extra_volume() { 1244 | local vg=$1 1245 | local lv_name=$2 1246 | local mount_dir=$3 1247 | remove_systemd_mount_target $mount_dir 1248 | mounts=$(extra_lv_mountpoint $vg $lv_name $mount_dir) 1249 | if [ -z "$mounts" ]; then 1250 | mount -o pquota /dev/$vg/$lv_name $mount_dir 1251 | fi 1252 | } 1253 | 1254 | # Create a logical volume of size specified by first argument. Name of the 1255 | # volume is specified using second argument. 1256 | create_lv() { 1257 | local data_size=$1 1258 | local data_lv_name=$2 1259 | 1260 | # TODO: Error handling when data_size > available space. 1261 | if [[ $data_size == *%* ]]; then 1262 | lvcreate -y -l $data_size -n $data_lv_name $VG || return 1 1263 | else 1264 | lvcreate -y -L $data_size -n $data_lv_name $VG || return 1 1265 | fi 1266 | return 0 1267 | } 1268 | 1269 | setup_extra_volume() { 1270 | local lv_name=$1 1271 | local mount_dir=$2 1272 | local lv_size=$3 1273 | 1274 | if ! create_lv $lv_size $lv_name; then 1275 | Fatal "Failed to create volume $lv_name of size ${lv_size}." 1276 | fi 1277 | 1278 | if ! mkfs -t xfs /dev/$VG/$lv_name > /dev/null; then 1279 | Fatal "Failed to create filesystem on /dev/$VG/${lv_name}." 1280 | fi 1281 | 1282 | if ! mount_extra_volume $VG $lv_name $mount_dir; then 1283 | Fatal "Failed to mount volume ${lv_name} on ${mount_dir}" 1284 | fi 1285 | 1286 | # setup right selinux label first time fs is created. Mount operation 1287 | # changes the label of directory to reflect the label on root inode 1288 | # of mounted fs. 1289 | if ! restore_selinux_context $mount_dir; then 1290 | return 1 1291 | fi 1292 | } 1293 | 1294 | # This is used only in compatibility mode. We are still using systemd 1295 | # mount unit only for compatibility mode. Reason being that upon service 1296 | # restart, we run into races and device might not yet be up when we try 1297 | # to mount it. And we don't know if this is first time start and we need 1298 | # to create volume or this is restart and we need to wait for device. So 1299 | # continue to use systemd mount unit for compatibility mode. 1300 | setup_systemd_mount_unit_compat() { 1301 | local filename 1302 | local vg=$1 1303 | local lv_name=$2 1304 | local mount_dir=$3 1305 | local unit_file_path 1306 | 1307 | # filename must match the path ${mount_dir}. 1308 | # e.g if ${mount_dir} is /var/lib/containers 1309 | # then filename will be var-lib-containers.mount 1310 | filename=$(systemd_escaped_filename ${mount_dir}) 1311 | unit_file_path="/etc/systemd/system/$filename" 1312 | 1313 | # If unit file already exists, nothing to do. 1314 | [ -f "$unit_file_path" ] && return 0 1315 | 1316 | cat < "${unit_file_path}.tmp" 1317 | # WARNING: This file was auto generated by container-storage-setup. Do not 1318 | # edit it. In the future, this file might be moved to a different location. 1319 | [Unit] 1320 | Description=Mount $lv_name on $mount_dir directory. 1321 | Before=docker-storage-setup.service 1322 | 1323 | [Mount] 1324 | What=/dev/$vg/$lv_name 1325 | Where=${mount_dir} 1326 | Type=xfs 1327 | Options=pquota 1328 | 1329 | [Install] 1330 | WantedBy=docker-storage-setup.service 1331 | EOF 1332 | mv "${unit_file_path}.tmp" "$unit_file_path" 1333 | systemctl daemon-reload 1334 | systemctl enable $filename >/dev/null 2>&1 1335 | systemctl start $filename 1336 | } 1337 | 1338 | setup_extra_lv_fs_compat() { 1339 | [ -z "$_RESOLVED_MOUNT_DIR_PATH" ] && return 0 1340 | if ! setup_extra_dir $_RESOLVED_MOUNT_DIR_PATH; then 1341 | return 1 1342 | fi 1343 | # If we are restarting, then extra volume should exist. This unit 1344 | # file is dependent on extra volume mount unit file. That means this 1345 | # code should run after mount unit has activated successfully. That 1346 | # means after extra volume has come up. 1347 | 1348 | # We had got rid of this logic and reintroducing it back. That means 1349 | # there can be configurations out there which have extra volume but 1350 | # don't have unit file. So in such case, drop a unit file now. This 1351 | # is still racy though. There is no guarantee that volume will be 1352 | # up by the time this code runs when unit file is not present already. 1353 | if extra_volume_exists $CONTAINER_ROOT_LV_NAME $VG; then 1354 | if ! setup_systemd_mount_unit_compat "$VG" "$CONTAINER_ROOT_LV_NAME" "$_RESOLVED_MOUNT_DIR_PATH"; then 1355 | Fatal "Failed to setup systemd mount unit for extra volume $CONTAINER_ROOT_LV_NAME." 1356 | fi 1357 | return 0 1358 | fi 1359 | 1360 | if [ -z "$CONTAINER_ROOT_LV_SIZE" ]; then 1361 | Fatal "Specify a valid value for CONTAINER_ROOT_LV_SIZE." 1362 | fi 1363 | if ! check_data_size_syntax $CONTAINER_ROOT_LV_SIZE; then 1364 | Fatal "CONTAINER_ROOT_LV_SIZE value $CONTAINER_ROOT_LV_SIZE is invalid." 1365 | fi 1366 | # Container runtime extra volume does not exist. Create one. 1367 | if ! setup_extra_volume $CONTAINER_ROOT_LV_NAME $_RESOLVED_MOUNT_DIR_PATH $CONTAINER_ROOT_LV_SIZE; then 1368 | Fatal "Failed to setup extra volume $CONTAINER_ROOT_LV_NAME." 1369 | fi 1370 | 1371 | if ! setup_systemd_mount_unit_compat "$VG" "$CONTAINER_ROOT_LV_NAME" "$_RESOLVED_MOUNT_DIR_PATH"; then 1372 | Fatal "Failed to setup systemd mount unit for extra volume $CONTAINER_ROOT_LV_NAME." 1373 | fi 1374 | } 1375 | 1376 | setup_extra_lv_fs() { 1377 | [ -z "$_RESOLVED_MOUNT_DIR_PATH" ] && return 0 1378 | if ! setup_extra_dir $_RESOLVED_MOUNT_DIR_PATH; then 1379 | return 1 1380 | fi 1381 | if extra_volume_exists $CONTAINER_ROOT_LV_NAME $VG; then 1382 | if ! mount_extra_volume $VG $CONTAINER_ROOT_LV_NAME $_RESOLVED_MOUNT_DIR_PATH; then 1383 | Fatal "Failed to mount volume $CONTAINER_ROOT_LV_NAME on $_RESOLVED_MOUNT_DIR_PATH" 1384 | fi 1385 | return 0 1386 | fi 1387 | if [ -z "$CONTAINER_ROOT_LV_SIZE" ]; then 1388 | Fatal "Specify a valid value for CONTAINER_ROOT_LV_SIZE." 1389 | fi 1390 | if ! check_data_size_syntax $CONTAINER_ROOT_LV_SIZE; then 1391 | Fatal "CONTAINER_ROOT_LV_SIZE value $CONTAINER_ROOT_LV_SIZE is invalid." 1392 | fi 1393 | # Container runtime extra volume does not exist. Create one. 1394 | if ! setup_extra_volume $CONTAINER_ROOT_LV_NAME $_RESOLVED_MOUNT_DIR_PATH $CONTAINER_ROOT_LV_SIZE; then 1395 | Fatal "Failed to setup extra volume $CONTAINER_ROOT_LV_NAME." 1396 | fi 1397 | } 1398 | 1399 | setup_docker_root_lv_fs() { 1400 | [ "$DOCKER_ROOT_VOLUME" != "yes" ] && return 0 1401 | if ! setup_docker_root_dir; then 1402 | return 1 1403 | fi 1404 | if extra_volume_exists $_DOCKER_ROOT_LV_NAME $VG; then 1405 | if ! mount_extra_volume $VG $_DOCKER_ROOT_LV_NAME $_DOCKER_ROOT_DIR; then 1406 | Fatal "Failed to mount volume $_DOCKER_ROOT_LV_NAME on $_DOCKER_ROOT_DIR" 1407 | fi 1408 | return 0 1409 | fi 1410 | if [ -z "$DOCKER_ROOT_VOLUME_SIZE" ]; then 1411 | Fatal "Specify a valid value for DOCKER_ROOT_VOLUME_SIZE." 1412 | fi 1413 | if ! check_data_size_syntax $DOCKER_ROOT_VOLUME_SIZE; then 1414 | Fatal "DOCKER_ROOT_VOLUME_SIZE value $DOCKER_ROOT_VOLUME_SIZE is invalid." 1415 | fi 1416 | # Docker root volume does not exist. Create one. 1417 | if ! setup_extra_volume $_DOCKER_ROOT_LV_NAME $_DOCKER_ROOT_DIR $DOCKER_ROOT_VOLUME_SIZE; then 1418 | Fatal "Failed to setup logical volume $_DOCKER_ROOT_LV_NAME." 1419 | fi 1420 | } 1421 | 1422 | check_storage_options(){ 1423 | if [ "$STORAGE_DRIVER" == "devicemapper" ] && [ -z "$CONTAINER_THINPOOL" ];then 1424 | Fatal "CONTAINER_THINPOOL must be defined for the devicemapper storage driver." 1425 | fi 1426 | 1427 | # Populate $_RESOLVED_MOUNT_DIR_PATH 1428 | if [ -n "$CONTAINER_ROOT_LV_MOUNT_PATH" ];then 1429 | if ! _RESOLVED_MOUNT_DIR_PATH=$(realpath $CONTAINER_ROOT_LV_MOUNT_PATH);then 1430 | Fatal "Failed to resolve path $CONTAINER_ROOT_LV_MOUNT_PATH" 1431 | fi 1432 | fi 1433 | 1434 | if [ "$DOCKER_ROOT_VOLUME" == "yes" ] && [ -n "$CONTAINER_ROOT_LV_MOUNT_PATH" ];then 1435 | Fatal "DOCKER_ROOT_VOLUME and CONTAINER_ROOT_LV_MOUNT_PATH are mutually exclusive options." 1436 | fi 1437 | 1438 | if [ -n "$CONTAINER_ROOT_LV_NAME" ] && [ -z "$CONTAINER_ROOT_LV_MOUNT_PATH" ];then 1439 | Fatal "CONTAINER_ROOT_LV_MOUNT_PATH cannot be empty, when CONTAINER_ROOT_LV_NAME is set" 1440 | fi 1441 | 1442 | if [ -n "$CONTAINER_ROOT_LV_MOUNT_PATH" ] && [ -z "$CONTAINER_ROOT_LV_NAME" ];then 1443 | Fatal "CONTAINER_ROOT_LV_NAME cannot be empty, when CONTAINER_ROOT_LV_MOUNT_PATH is set" 1444 | fi 1445 | 1446 | # Allow using DOCKER_ROOT_VOLUME only in compatibility mode. 1447 | if [ "$DOCKER_ROOT_VOLUME" == "yes" ] && [ "$_DOCKER_COMPAT_MODE" != "1" ];then 1448 | Fatal "DOCKER_ROOT_VOLUME is deprecated. Use CONTAINER_ROOT_LV_MOUNT_PATH instead." 1449 | fi 1450 | 1451 | if [ "$DOCKER_ROOT_VOLUME" == "yes" ];then 1452 | Info "DOCKER_ROOT_VOLUME is deprecated, and will be removed soon. Use CONTAINER_ROOT_LV_MOUNT_PATH instead." 1453 | fi 1454 | 1455 | if [ -n "${EXTRA_DOCKER_STORAGE_OPTIONS}" ]; then 1456 | Info "EXTRA_DOCKER_STORAGE_OPTIONS is deprecated, please use EXTRA_STORAGE_OPTIONS" 1457 | if [ -n "${EXTRA_STORAGE_OPTIONS}" ]; then 1458 | Fatal "EXTRA_DOCKER_STORAGE_OPTIONS and EXTRA_STORAGE_OPTIONS are mutually exclusive options." 1459 | fi 1460 | EXTRA_STORAGE_OPTIONS=${EXTRA_DOCKER_STORAGE_OPTIONS} 1461 | unset EXTRA_DOCKER_STORAGE_OPTIONS 1462 | fi 1463 | } 1464 | 1465 | # This is used in compatibility mode. 1466 | setup_storage_compat() { 1467 | local current_driver 1468 | local containerroot 1469 | 1470 | if [ "$STORAGE_DRIVER" == "" ];then 1471 | Info "STORAGE_DRIVER not set, no storage will be configured. You must specify STORAGE_DRIVER if you want to configure storage." 1472 | exit 0 1473 | fi 1474 | 1475 | if ! is_valid_storage_driver $STORAGE_DRIVER;then 1476 | Fatal "Invalid storage driver: ${STORAGE_DRIVER}." 1477 | fi 1478 | 1479 | if ! current_driver=$(get_existing_storage_driver);then 1480 | Fatal "Failed to determine existing storage driver." 1481 | fi 1482 | 1483 | # If storage is configured and new driver should match old one. 1484 | if [ -n "$current_driver" ] && [ "$current_driver" != "$STORAGE_DRIVER" ];then 1485 | Info "Storage is already configured with ${current_driver} driver. Can't configure it with ${STORAGE_DRIVER} driver. To override, remove ${_STORAGE_OUT_FILE} and retry." 1486 | check_existing_thinpool_compat || true 1487 | return 0 1488 | fi 1489 | 1490 | if [ "$STORAGE_DRIVER" == "overlay" -o "$STORAGE_DRIVER" == "overlay2" ]; then 1491 | if ! can_mount_overlay; then 1492 | Fatal "Can not setup storage driver $STORAGE_DRIVER as system does not support it. Specify a different driver." 1493 | fi 1494 | fi 1495 | 1496 | # If a user decides to setup (a) and (b)/(c): 1497 | # a) lvm thin pool for devicemapper. 1498 | # b) a separate volume for container runtime root. 1499 | # c) a separate named ($CONTAINER_ROOT_LV_NAME) volume for $CONTAINER_ROOT_LV_MOUNT_PATH. 1500 | # (a) will be setup first, followed by (b) or (c). 1501 | 1502 | # Set up lvm thin pool LV. 1503 | if [ "$STORAGE_DRIVER" == "devicemapper" ]; then 1504 | setup_lvm_thin_pool_compat 1505 | else 1506 | write_storage_config_file $STORAGE_DRIVER "$_STORAGE_OUT_FILE" 1507 | fi 1508 | 1509 | # If container root is on a separate volume, setup that. 1510 | if ! setup_docker_root_lv_fs; then 1511 | Error "Failed to setup docker root volume." 1512 | return 1 1513 | fi 1514 | 1515 | # Set up a separate named ($CONTAINER_ROOT_LV_NAME) volume 1516 | # for $CONTAINER_ROOT_LV_MOUNT_PATH. 1517 | if ! setup_extra_lv_fs_compat; then 1518 | Error "Failed to setup logical volume for $CONTAINER_ROOT_LV_MOUNT_PATH." 1519 | return 1 1520 | fi 1521 | 1522 | if [ "$STORAGE_DRIVER" == "overlay" -o "$STORAGE_DRIVER" == "overlay2" ]; then 1523 | # This is little hacky. We are guessing where overlay2 will be setup 1524 | # by container runtime environment. At some point of time this should 1525 | # be passed in by a config variable. 1526 | containerroot=${_RESOLVED_MOUNT_DIR_PATH:-/var} 1527 | 1528 | if ! is_xfs_ftype_enabled "$containerroot"; then 1529 | Error "XFS filesystem at ${containerroot} has ftype=0, cannot use overlay backend; consider different driver or separate volume or OS reprovision" 1530 | return 1 1531 | fi 1532 | fi 1533 | } 1534 | 1535 | restore_selinux_context() { 1536 | local dir=$1 1537 | 1538 | if ! restorecon -R $dir; then 1539 | Error "restorecon -R $dir failed." 1540 | return 1 1541 | fi 1542 | } 1543 | 1544 | get_docker_root_dir(){ 1545 | local flag=false path 1546 | options=$(grep -e "^OPTIONS" /etc/sysconfig/docker|cut -d"'" -f 2) 1547 | for opt in $options 1548 | do 1549 | if [ "$flag" = true ];then 1550 | path=$opt 1551 | flag=false 1552 | continue 1553 | fi 1554 | case "$opt" in 1555 | "-g"|"--graph") 1556 | flag=true 1557 | ;; 1558 | -g=*|--graph=*) 1559 | path=$(echo $opt|cut -d"=" -f 2) 1560 | ;; 1561 | *) 1562 | ;; 1563 | esac 1564 | done 1565 | if [ -z "$path" ];then 1566 | return 1567 | fi 1568 | if ! _DOCKER_ROOT_DIR=$(realpath -m $path);then 1569 | Fatal "Failed to resolve path $path" 1570 | fi 1571 | } 1572 | 1573 | setup_extra_dir() { 1574 | local resolved_mount_dir_path=$1 1575 | [ -d "$resolved_mount_dir_path" ] && return 0 1576 | 1577 | # Directory does not exist. Create one. 1578 | mkdir -p $resolved_mount_dir_path 1579 | return $? 1580 | } 1581 | 1582 | setup_docker_root_dir() { 1583 | if ! get_docker_root_dir; then 1584 | return 1 1585 | fi 1586 | 1587 | [ -d "_$DOCKER_ROOT_DIR" ] && return 0 1588 | 1589 | # Directory does not exist. Create one. 1590 | mkdir -p $_DOCKER_ROOT_DIR 1591 | return $? 1592 | } 1593 | 1594 | 1595 | # This deals with determining rootfs, root vg and pvs etc and sets the 1596 | # global variables accordingly. 1597 | determine_rootfs_pvs_vg() { 1598 | # Read mounts 1599 | _ROOT_DEV=$( awk '$2 ~ /^\/$/ && $1 !~ /rootfs/ { print $1 }' /proc/mounts ) 1600 | if ! _ROOT_VG=$(lvs --noheadings -o vg_name $_ROOT_DEV 2>/dev/null);then 1601 | Info "Volume group backing root filesystem could not be determined" 1602 | _ROOT_VG= 1603 | else 1604 | _ROOT_VG=$(echo $_ROOT_VG | sed -e 's/^ *//' -e 's/ *$//') 1605 | fi 1606 | 1607 | _ROOT_PVS= 1608 | if [ -n "$_ROOT_VG" ];then 1609 | _ROOT_PVS=$( pvs --noheadings -o pv_name,vg_name | awk "\$2 ~ /^$_ROOT_VG\$/ { print \$1 }" ) 1610 | fi 1611 | 1612 | _VG_EXISTS= 1613 | if [ -z "$VG" ]; then 1614 | if [ -n "$_ROOT_VG" ]; then 1615 | VG=$_ROOT_VG 1616 | _VG_EXISTS=1 1617 | fi 1618 | else 1619 | if vg_exists "$VG";then 1620 | _VG_EXISTS=1 1621 | fi 1622 | fi 1623 | } 1624 | 1625 | partition_disks_create_vg() { 1626 | local dev_list 1627 | 1628 | # If there is no volume group specified or no root volume group, there is 1629 | # nothing to do in terms of dealing with disks. 1630 | if [[ -n "$DEVS" && -n "$VG" ]]; then 1631 | _DEVS_RESOLVED=$(canonicalize_block_devs "${DEVS}") || return 1 1632 | 1633 | # If all the disks have already been correctly partitioned, there is 1634 | # nothing more to do 1635 | dev_list=$(scan_disks "$_DEVS_RESOLVED" "$VG" "$WIPE_SIGNATURES") || return 1 1636 | if [[ -n "$dev_list" ]]; then 1637 | for dev in $dev_list; do 1638 | check_wipe_block_dev_sig $dev 1639 | done 1640 | create_disk_partitions "$dev_list" 1641 | create_extend_volume_group 1642 | fi 1643 | fi 1644 | } 1645 | 1646 | # This is used in compatibility mode. 1647 | reset_storage_compat() { 1648 | local tpool 1649 | 1650 | # Check if a thin pool is already configured in /etc/sysconfig/docker-storage 1651 | tpool=`get_configured_thin_pool` 1652 | if [ -n "$_RESOLVED_MOUNT_DIR_PATH" ] && [ -n "$CONTAINER_ROOT_LV_NAME" ];then 1653 | reset_extra_volume_compat $CONTAINER_ROOT_LV_NAME $_RESOLVED_MOUNT_DIR_PATH $VG 1654 | fi 1655 | 1656 | if [ "$DOCKER_ROOT_VOLUME" == "yes" ];then 1657 | if ! get_docker_root_dir; then 1658 | return 1 1659 | fi 1660 | reset_extra_volume_compat $_DOCKER_ROOT_LV_NAME $_DOCKER_ROOT_DIR $VG 1661 | fi 1662 | 1663 | if [ -n "$tpool" ]; then 1664 | local tpool_vg tpool_lv 1665 | Info "Found an already configured thin pool $tpool in ${_STORAGE_OUT_FILE}" 1666 | if ! is_managed_tpool_compat "$tpool"; then 1667 | Fatal "Thin pool ${tpool} does not seem to be managed by container-storage-setup. Exiting." 1668 | fi 1669 | tpool_vg=`get_dmdev_vg $tpool` 1670 | reset_lvm_thin_pool ${CONTAINER_THINPOOL} "$tpool_vg" 1671 | elif [ "$STORAGE_DRIVER" == "devicemapper" ]; then 1672 | reset_lvm_thin_pool ${CONTAINER_THINPOOL} $VG 1673 | fi 1674 | 1675 | rm -f ${_STORAGE_OUT_FILE} 1676 | } 1677 | 1678 | usage() { 1679 | cat <<-FOE 1680 | Usage: $1 [OPTIONS] 1681 | Usage: $1 [OPTIONS] COMMAND [arg...] 1682 | 1683 | Grows the root filesystem and sets up storage for container runtimes 1684 | 1685 | Options: 1686 | --help Print help message 1687 | --reset Reset your docker storage to init state. 1688 | --version Print version information. 1689 | 1690 | Commands: 1691 | create Create storage configuration 1692 | activate Activate storage configuration 1693 | deactivate Deactivate storage configuration 1694 | remove Remove storage configuration 1695 | list List storage configuration 1696 | export Send storage configuration output file to stdout 1697 | add-dev Add block device to storage configuration 1698 | FOE 1699 | } 1700 | 1701 | # 1702 | # START of Helper functions dealing with commands and storage setup for new 1703 | # design 1704 | # 1705 | # Functions dealing with metadata handling 1706 | create_metadata() { 1707 | local metafile=$1 1708 | 1709 | cat > ${metafile}.tmp <<-EOF 1710 | _M_METADATA_VERSION=$_METADATA_VERSION 1711 | _M_STORAGE_DRIVER=$STORAGE_DRIVER 1712 | _M_VG=$VG 1713 | _M_VG_CREATED=$_VG_CREATED 1714 | _M_DEVS_RESOLVED="$_DEVS_RESOLVED" 1715 | _M_CONTAINER_THINPOOL=$CONTAINER_THINPOOL 1716 | _M_CONTAINER_ROOT_LV_NAME=$CONTAINER_ROOT_LV_NAME 1717 | _M_CONTAINER_ROOT_LV_MOUNT_PATH=$CONTAINER_ROOT_LV_MOUNT_PATH 1718 | _M_AUTO_EXTEND_POOL=$AUTO_EXTEND_POOL 1719 | _M_DEVICE_WAIT_TIMEOUT=$DEVICE_WAIT_TIMEOUT 1720 | EOF 1721 | mv ${metafile}.tmp ${metafile} 1722 | } 1723 | 1724 | metadata_update_add_dev() { 1725 | local metafile=$1 1726 | local new_resolved_dev=$2 1727 | local updated_resolved_devs 1728 | 1729 | cp $metafile ${metafile}.tmp 1730 | 1731 | if [ -z "$_M_DEVS_RESOLVED" ]; then 1732 | updated_resolved_devs="$new_resolved_dev" 1733 | else 1734 | updated_resolved_devs="$_M_DEVS_RESOLVED $new_resolved_dev" 1735 | fi 1736 | 1737 | if ! sed -i "s;^_M_DEVS_RESOLVED=.*$;_M_DEVS_RESOLVED=\"${updated_resolved_devs}\";" ${metafile}.tmp;then 1738 | Error "Failed to update _M_DEVS_RESOLVED in metadata." 1739 | return 1 1740 | fi 1741 | mv ${metafile}.tmp ${metafile} 1742 | } 1743 | 1744 | set_config_status() { 1745 | local config_name=$1 1746 | local status=$2 1747 | local status_file="$_CONFIG_DIR/$config_name/$_STATUSFILE_NAME" 1748 | 1749 | mkdir -p "$_CONFIG_DIR/$config_name" 1750 | echo "$status" > ${status_file}.tmp 1751 | mv ${status_file}.tmp ${status_file} 1752 | } 1753 | 1754 | get_config_status() { 1755 | local config_name=$1 1756 | local status_file="$_CONFIG_DIR/$config_name/$_STATUSFILE_NAME" 1757 | local curr_status 1758 | 1759 | curr_status=`cat $status_file` 1760 | echo $curr_status 1761 | } 1762 | 1763 | create_storage_config() { 1764 | local config_path=$1 1765 | local infile=$2 1766 | 1767 | mkdir -p $config_path 1768 | cp $infile $config_path/$_INFILE_NAME 1769 | touch $config_path/$_METAFILE_NAME 1770 | 1771 | create_metadata "$config_path/$_METAFILE_NAME" 1772 | write_storage_config_file "$STORAGE_DRIVER" "$config_path/$_OUTFILE_NAME" 1773 | } 1774 | 1775 | # activate command processing start 1776 | 1777 | # Wait for thin pool for certain time interval. If thinpool is found 0 is 1778 | # returned otherwise 1. 1779 | wait_for_thinpool() { 1780 | local thinpool_name=$1 1781 | local vg=$2 1782 | local timeout=$3 1783 | 1784 | if lvm_pool_exists $thinpool_name $vg; then 1785 | return 0 1786 | fi 1787 | 1788 | if [ -z "$timeout" ] || [ "$timeout" == "0" ];then 1789 | return 1 1790 | fi 1791 | 1792 | while [ $timeout -gt 0 ]; do 1793 | Info "Waiting for lvm thin pool $vg/${thinpool_name}. Wait time remaining is $timeout seconds" 1794 | if [ $timeout -le 5 ];then 1795 | sleep $timeout 1796 | else 1797 | sleep 5 1798 | fi 1799 | timeout=$((timeout-5)) 1800 | if lvm_pool_exists $thinpool_name $vg; then 1801 | return 0 1802 | fi 1803 | done 1804 | 1805 | Info "Timed out waiting for lvm thin pool $vg/${thinpool_name}" 1806 | return 1 1807 | } 1808 | 1809 | activate_devicemapper() { 1810 | local thinpool_name=$1 1811 | local vg=$2 1812 | local timeout=$3 1813 | 1814 | # TODO: Add logic to activate volume group. For now it assumes that 1815 | # volume group will auto activate when devices are ready. 1816 | 1817 | # Wait for thin pool 1818 | if ! wait_for_thinpool $thinpool_name $vg $timeout;then 1819 | return 1 1820 | fi 1821 | 1822 | # Activate thin pool 1823 | if ! lvchange -ay -K $vg/$thinpool_name; then 1824 | Error "Thin pool $vg/$thinpool_name activation failed" 1825 | return 1 1826 | fi 1827 | return 0 1828 | } 1829 | 1830 | activate_storage_driver() { 1831 | local driver=$1 1832 | 1833 | if ! is_valid_storage_driver $driver; then 1834 | Error "Invalid storage driver $driver" 1835 | return 1 1836 | fi 1837 | 1838 | [ "$driver" == "" ] && return 0 1839 | [ "$driver" == "overlay" -o "$driver" == "overlay2" ] && return 0 1840 | 1841 | if [ "$driver" == "devicemapper" ];then 1842 | if ! activate_devicemapper $_M_CONTAINER_THINPOOL $_M_VG $_M_DEVICE_WAIT_TIMEOUT; then 1843 | Error "Activation of driver $driver failed" 1844 | return 1 1845 | fi 1846 | fi 1847 | } 1848 | 1849 | # Wait for logical volume 1850 | wait_for_lv() { 1851 | local lv_name=$1 1852 | local vg=$2 1853 | local timeout=$3 1854 | 1855 | if extra_volume_exists $lv_name $vg; then 1856 | return 0 1857 | fi 1858 | 1859 | if [ -z "$timeout" ] || [ "$timeout" == "0" ];then 1860 | return 1 1861 | fi 1862 | 1863 | while [ $timeout -gt 0 ]; do 1864 | Info "Waiting for logical volume $vg/${lv_name}. Wait time remaining is $timeout seconds" 1865 | if [ $timeout -le 5 ];then 1866 | sleep $timeout 1867 | else 1868 | sleep 5 1869 | fi 1870 | timeout=$((timeout-5)) 1871 | if extra_volume_exists $lv_name $vg; then 1872 | return 0 1873 | fi 1874 | done 1875 | 1876 | Info "Timed out waiting for logical volume $vg/${lv_name}" 1877 | return 1 1878 | } 1879 | 1880 | activate_extra_lv_fs() { 1881 | local lv_name=$1 1882 | local vg=$2 1883 | local timeout=$3 1884 | local mount_path=$4 1885 | 1886 | if ! wait_for_lv $lv_name $vg $timeout; then 1887 | Error "logical volume $vg/${lv_name} does not exist" 1888 | return 1 1889 | fi 1890 | 1891 | if ! lvchange -ay $vg/$lv_name; then 1892 | Error "Failed to activate volume $vg/$lv_name" 1893 | return 1 1894 | fi 1895 | 1896 | if ! mount_extra_volume $vg $lv_name $mount_path; then 1897 | Error "Failed to mount volume $vg/$lv_name on $mount_path" 1898 | return 1 1899 | fi 1900 | } 1901 | 1902 | # activate command processing start 1903 | run_command_activate() { 1904 | local metafile_path="$_CONFIG_DIR/$_CONFIG_NAME/$_METAFILE_NAME" 1905 | local status_file="$_CONFIG_DIR/$_CONFIG_NAME/$_STATUSFILE_NAME" 1906 | local curr_status 1907 | 1908 | [ ! -d "$_CONFIG_DIR/$_CONFIG_NAME" ] && Fatal "Storage configuration $_CONFIG_NAME does not exist" 1909 | 1910 | [ ! -e "$metafile_path" ] && Fatal "Storage configuration $_CONFIG_NAME metadata does not exist" 1911 | source "$metafile_path" 1912 | 1913 | if ! curr_status=$(cat $status_file); then 1914 | Fatal "Failed to determine current status of storage configuration $_CONFIG_NAME" 1915 | fi 1916 | 1917 | if [ "$curr_status" == "invalid" ];then 1918 | Fatal "Storage configuration $_CONFIG_NAME is invalid. Can't activate it." 1919 | fi 1920 | 1921 | if ! activate_storage_driver $_M_STORAGE_DRIVER; then 1922 | Fatal "Activation of storage config $_CONFIG_NAME failed" 1923 | fi 1924 | 1925 | # Populate $_RESOLVED_MOUNT_DIR_PATH 1926 | if [ -n "$_M_CONTAINER_ROOT_LV_MOUNT_PATH" ];then 1927 | if ! _RESOLVED_MOUNT_DIR_PATH=$(realpath $_M_CONTAINER_ROOT_LV_MOUNT_PATH);then 1928 | Fatal "Failed to resolve path $_M_CONTAINER_ROOT_LV_MOUNT_PATH" 1929 | fi 1930 | 1931 | if ! activate_extra_lv_fs $_M_CONTAINER_ROOT_LV_NAME $_M_VG $_M_DEVICE_WAIT_TIMEOUT $_RESOLVED_MOUNT_DIR_PATH; then 1932 | Fatal "Activation of storage config $_CONFIG_NAME failed" 1933 | fi 1934 | fi 1935 | 1936 | set_config_status "$_CONFIG_NAME" "active" 1937 | echo "Activated storage config $_CONFIG_NAME" 1938 | } 1939 | 1940 | activate_help() { 1941 | cat <<-FOE 1942 | Usage: $1 activate [OPTIONS] CONFIG_NAME 1943 | 1944 | Activate storage configuration specified by CONFIG_NAME 1945 | 1946 | Options: 1947 | -h, --help Print help message 1948 | FOE 1949 | } 1950 | 1951 | process_command_activate() { 1952 | local command="$1" 1953 | local command_opts=`echo "$command" | sed 's/activate //'` 1954 | 1955 | parsed_opts=`getopt -o h -l help -- $command_opts` 1956 | eval set -- "$parsed_opts" 1957 | while true ; do 1958 | case "$1" in 1959 | -h | --help) activate_help $(basename $0); exit 0;; 1960 | --) shift; break;; 1961 | esac 1962 | done 1963 | 1964 | case $# in 1965 | 1) 1966 | _CONFIG_NAME=$1 1967 | ;; 1968 | *) 1969 | activate_help $(basename $0); exit 0;; 1970 | esac 1971 | } 1972 | # activate command processing end 1973 | 1974 | # 1975 | # deactivate command processing start 1976 | # 1977 | deactivate_devicemapper() { 1978 | local thinpool_name=$1 1979 | local vg=$2 1980 | 1981 | # Deactivate thin pool 1982 | if ! lvchange -an $vg/$thinpool_name; then 1983 | Error "Thin pool $vg/$thinpool_name deactivation failed" 1984 | return 1 1985 | fi 1986 | return 0 1987 | } 1988 | 1989 | deactivate_storage_driver() { 1990 | local driver=$1 1991 | 1992 | if ! is_valid_storage_driver $driver; then 1993 | Error "Invalid storage driver $driver" 1994 | return 1 1995 | fi 1996 | 1997 | [ "$driver" == "" ] && return 0 1998 | [ "$driver" == "overlay" -o "$driver" == "overlay2" ] && return 0 1999 | 2000 | if [ "$driver" == "devicemapper" ];then 2001 | if ! deactivate_devicemapper $_M_CONTAINER_THINPOOL $_M_VG; then 2002 | Error "Deactivation of driver $driver failed" 2003 | return 1 2004 | fi 2005 | fi 2006 | } 2007 | 2008 | deactivate_extra_lv_fs() { 2009 | local lv_name=$1 2010 | local vg=$2 2011 | local mount_path=$3 2012 | 2013 | if mountpoint -q $mount_path; then 2014 | if ! umount $mount_path; then 2015 | Error "Failed to unmount $mount_path" 2016 | return 1 2017 | fi 2018 | fi 2019 | 2020 | #TODO: Most likely we will have to try deactivation in a loop to make 2021 | # sure any udev rules have run and now lv is not busy. 2022 | 2023 | if ! lvchange -an $vg/$lv_name; then 2024 | Error "Failed to deactivate $vg/$lv_name" 2025 | return 1 2026 | fi 2027 | } 2028 | 2029 | run_command_deactivate() { 2030 | local resolved_path 2031 | local metafile_path="$_CONFIG_DIR/$_CONFIG_NAME/$_METAFILE_NAME" 2032 | 2033 | [ ! -d "$_CONFIG_DIR/$_CONFIG_NAME" ] && Fatal "Storage configuration $_CONFIG_NAME does not exist" 2034 | 2035 | [ ! -e "$metafile_path" ] && Fatal "Storage configuration $_CONFIG_NAME metadata does not exist" 2036 | source "$metafile_path" 2037 | 2038 | if ! deactivate_storage_driver $_M_STORAGE_DRIVER; then 2039 | Fatal "Deactivation of storage config $_CONFIG_NAME failed" 2040 | fi 2041 | 2042 | if [ -n "$_M_CONTAINER_ROOT_LV_MOUNT_PATH" ];then 2043 | if ! resolved_path=$(realpath $_M_CONTAINER_ROOT_LV_MOUNT_PATH);then 2044 | Fatal "Failed to resolve path $_M_CONTAINER_ROOT_LV_MOUNT_PATH" 2045 | fi 2046 | 2047 | if ! deactivate_extra_lv_fs $_M_CONTAINER_ROOT_LV_NAME $_M_VG $resolved_path; then 2048 | Fatal "Deactivation of storage config $_CONFIG_NAME failed" 2049 | fi 2050 | fi 2051 | 2052 | set_config_status "$_CONFIG_NAME" "inactive" 2053 | echo "Deactivated storage config $_CONFIG_NAME" 2054 | } 2055 | 2056 | deactivate_help() { 2057 | cat <<-FOE 2058 | Usage: $1 deactivate [OPTIONS] CONFIG_NAME 2059 | 2060 | De-activate storage configuration specified by CONFIG_NAME 2061 | 2062 | Options: 2063 | -h, --help Print help message 2064 | FOE 2065 | } 2066 | 2067 | process_command_deactivate() { 2068 | local command="$1" 2069 | local command_opts=`echo "$command" | sed 's/deactivate //'` 2070 | 2071 | parsed_opts=`getopt -o h -l help -- $command_opts` 2072 | eval set -- "$parsed_opts" 2073 | while true ; do 2074 | case "$1" in 2075 | -h | --help) deactivate_help $(basename $0); exit 0;; 2076 | --) shift; break;; 2077 | esac 2078 | done 2079 | 2080 | case $# in 2081 | 1) 2082 | _CONFIG_NAME=$1 2083 | ;; 2084 | *) 2085 | deactivate_help $(basename $0); exit 0;; 2086 | esac 2087 | } 2088 | 2089 | # 2090 | # deactivate command processing end 2091 | # 2092 | 2093 | # 2094 | # Remove command processing start 2095 | # 2096 | reset_extra_volume() { 2097 | local mp filename 2098 | local lv_name=$1 2099 | local mount_dir=$2 2100 | local vg=$3 2101 | 2102 | if ! extra_volume_exists $lv_name $vg; then 2103 | return 0 2104 | fi 2105 | 2106 | mp=$(extra_lv_mountpoint $vg $lv_name $mount_dir) 2107 | if [ -n "$mp" ];then 2108 | if ! umount $mp >/dev/null 2>&1; then 2109 | Fatal "Failed to unmount $mp" 2110 | fi 2111 | fi 2112 | lvchange -an $vg/${lv_name} 2113 | lvremove $vg/${lv_name} 2114 | } 2115 | 2116 | # Remove command processing 2117 | reset_storage() { 2118 | local resolved_path dev 2119 | 2120 | # Populate $_RESOLVED_MOUNT_DIR_PATH 2121 | if [ -n "$_M_CONTAINER_ROOT_LV_MOUNT_PATH" ];then 2122 | if ! resolved_path=$(realpath $_M_CONTAINER_ROOT_LV_MOUNT_PATH);then 2123 | Error "Failed to resolve path $_M_CONTAINER_ROOT_LV_MOUNT_PATH" 2124 | return 1 2125 | fi 2126 | 2127 | if ! reset_extra_volume $_M_CONTAINER_ROOT_LV_NAME $resolved_path $_M_VG;then 2128 | Error "Failed to remove volume $_M_CONTAINER_ROOT_LV_NAME" 2129 | return 1 2130 | fi 2131 | fi 2132 | 2133 | if [ "$_M_STORAGE_DRIVER" == "devicemapper" ]; then 2134 | if ! reset_lvm_thin_pool $_M_CONTAINER_THINPOOL $_M_VG; then 2135 | Error "Failed to remove thinpool $_M_VG/$_M_CONTAINER_THINPOOL" 2136 | return 1 2137 | fi 2138 | fi 2139 | 2140 | # If we created a volume group, remove volume group. 2141 | if [ "$_M_VG_CREATED" == "1" ];then 2142 | if ! remove_vg_if_exists "$_M_VG"; then 2143 | Error "Failed to remove volume group $_M_VG" 2144 | return 1 2145 | fi 2146 | 2147 | # Cleanup any disks we added to volume group. 2148 | if ! remove_disk_pvs_parts "$_M_DEVS_RESOLVED";then 2149 | return 1 2150 | fi 2151 | fi 2152 | 2153 | # Get rid of config data 2154 | rm -rf "$_CONFIG_DIR/$_CONFIG_NAME/" 2155 | } 2156 | 2157 | run_command_remove() { 2158 | local metafile_path="$_CONFIG_DIR/$_CONFIG_NAME/$_METAFILE_NAME" 2159 | local curr_status 2160 | 2161 | [ ! -d "$_CONFIG_DIR/$_CONFIG_NAME" ] && Fatal "Storage configuration $_CONFIG_NAME does not exist" 2162 | 2163 | # Source stored metadata file. 2164 | [ ! -e "$_CONFIG_DIR/$_CONFIG_NAME/$_METAFILE_NAME" ] && Fatal "Storage configuration $_CONFIG_NAME metadata does not exist" 2165 | 2166 | source "$metafile_path" 2167 | 2168 | # If storage is active, deactivate it first 2169 | curr_status=$(get_config_status "$_CONFIG_NAME") 2170 | if [ "$curr_status" == "active" ];then 2171 | if ! run_command_deactivate; then 2172 | Fatal "Failed to remove storage config $_CONFIG_NAME" 2173 | fi 2174 | fi 2175 | 2176 | set_config_status "$_CONFIG_NAME" "invalid" 2177 | if ! reset_storage; then 2178 | Fatal "Failed to remove storage config $_CONFIG_NAME" 2179 | fi 2180 | 2181 | echo "Removed storage configuration $_CONFIG_NAME" 2182 | } 2183 | 2184 | remove_help() { 2185 | cat <<-FOE 2186 | Usage: $1 remove [OPTIONS] CONFIG_NAME 2187 | 2188 | Remove storage configuration specified by CONFIG_NAME 2189 | 2190 | Options: 2191 | -h, --help Print help message 2192 | FOE 2193 | } 2194 | 2195 | process_command_remove() { 2196 | local command="$1" 2197 | local command_opts=`echo "$command" | sed 's/remove //'` 2198 | 2199 | parsed_opts=`getopt -o h -l help -- $command_opts` 2200 | eval set -- "$parsed_opts" 2201 | while true ; do 2202 | case "$1" in 2203 | -h | --help) remove_help $(basename $0); exit 0;; 2204 | --) shift; break;; 2205 | esac 2206 | done 2207 | 2208 | case $# in 2209 | 1) 2210 | _CONFIG_NAME=$1 2211 | ;; 2212 | *) 2213 | remove_help $(basename $0); exit 0;; 2214 | esac 2215 | } 2216 | 2217 | # 2218 | # Remove command processing end 2219 | # 2220 | 2221 | # 2222 | # list command processing start 2223 | # 2224 | list_all_configs() { 2225 | local all_configs=$(ls "$_CONFIG_DIR" 2>/dev/null) 2226 | local config_name storage_driver 2227 | local status_file curr_status metadata_file 2228 | 2229 | [ -z "$all_configs" ] && return 0 2230 | 2231 | printf "%-24s %-16s %-16s\n" "NAME" "DRIVER" "STATUS" 2232 | for config_name in $all_configs; do 2233 | status_file="$_CONFIG_DIR/$config_name/$_STATUSFILE_NAME" 2234 | metadata_file="$_CONFIG_DIR/$config_name/$_METAFILE_NAME" 2235 | curr_status=`cat $status_file` 2236 | storage_driver=`grep _M_STORAGE_DRIVER $metadata_file | cut -d "=" -f2` 2237 | 2238 | printf "%-24s %-16s %-16s\n" "$config_name" "$storage_driver" "$curr_status" 2239 | done 2240 | } 2241 | 2242 | #TODO: What should be listed in what format 2243 | list_overlay_params() { 2244 | echo "VG=$_M_VG" 2245 | echo "DEVS=$_M_DEVS_RESOLVED" 2246 | echo "CONTAINER_ROOT_LV_NAME=$_M_CONTAINER_ROOT_LV_NAME" 2247 | echo "CONTAINER_ROOT_LV_MOUNT_PATH=$_M_CONTAINER_ROOT_LV_MOUNT_PATH" 2248 | } 2249 | 2250 | list_devicemapper_params() { 2251 | echo "VG=$_M_VG" 2252 | echo "DEVS=\"$_M_DEVS_RESOLVED\"" 2253 | echo "CONTAINER_THINPOOL=$_M_CONTAINER_THINPOOL" 2254 | echo "CONTAINER_ROOT_LV_NAME=$_M_CONTAINER_ROOT_LV_NAME" 2255 | echo "CONTAINER_ROOT_LV_MOUNT_PATH=$_M_CONTAINER_ROOT_LV_MOUNT_PATH" 2256 | echo "AUTO_EXTEND_POOL=$_M_AUTO_EXTEND_POOL" 2257 | echo "DEVICE_WAIT_TIMEOUT=$_M_DEVICE_WAIT_TIMEOUT" 2258 | } 2259 | 2260 | list_config() { 2261 | local config_name=$1 2262 | local status_file curr_status 2263 | 2264 | status_file="$_CONFIG_DIR/$config_name/$_STATUSFILE_NAME" 2265 | curr_status=`cat $status_file` 2266 | 2267 | echo "Name: $config_name" 2268 | echo "Status: $curr_status" 2269 | 2270 | echo "STORAGE_DRIVER=$_M_STORAGE_DRIVER" 2271 | 2272 | if [ "$_M_STORAGE_DRIVER" == "" ]; then 2273 | return 0 2274 | elif [ "$_M_STORAGE_DRIVER" == "overlay" ] || [ "$_M_STORAGE_DRIVER" == "overlay2" ];then 2275 | list_overlay_params 2276 | else 2277 | list_devicemapper_params 2278 | fi 2279 | return 0 2280 | } 2281 | 2282 | run_command_list() { 2283 | if [ -z "$_CONFIG_NAME" ]; then 2284 | list_all_configs 2285 | return 2286 | fi 2287 | 2288 | local metafile_path="$_CONFIG_DIR/$_CONFIG_NAME/$_METAFILE_NAME" 2289 | 2290 | [ ! -d "$_CONFIG_DIR/$_CONFIG_NAME" ] && Fatal "Storage configuration $_CONFIG_NAME does not exist" 2291 | 2292 | # Source stored metadata file. 2293 | [ ! -e "$_CONFIG_DIR/$_CONFIG_NAME/$_METAFILE_NAME" ] && Fatal "Storage configuration $_CONFIG_NAME metadata does not exist" 2294 | 2295 | source "$metafile_path" 2296 | list_config "$_CONFIG_NAME" 2297 | } 2298 | 2299 | list_help() { 2300 | cat <<-FOE 2301 | Usage: $1 list [OPTIONS] [CONFIG_NAME] 2302 | 2303 | List storage configuration 2304 | 2305 | Options: 2306 | -h, --help Print help message 2307 | FOE 2308 | } 2309 | 2310 | process_command_list() { 2311 | local command="$1" 2312 | local command_opts=${command#"list"} 2313 | 2314 | parsed_opts=`getopt -o h -l help -- $command_opts` 2315 | eval set -- "$parsed_opts" 2316 | while true ; do 2317 | case "$1" in 2318 | -h | --help) list_help $(basename $0); exit 0;; 2319 | --) shift; break;; 2320 | esac 2321 | done 2322 | 2323 | case $# in 2324 | 0) 2325 | ;; 2326 | 1) 2327 | _CONFIG_NAME=$1 2328 | ;; 2329 | *) 2330 | list_help $(basename $0); exit 0;; 2331 | esac 2332 | } 2333 | 2334 | # 2335 | # list command processing end 2336 | # 2337 | 2338 | # 2339 | # export command processing start 2340 | # 2341 | run_command_export() { 2342 | local metafile_path="$_CONFIG_DIR/$_CONFIG_NAME/$_METAFILE_NAME" 2343 | local outfile_path="$_CONFIG_DIR/$_CONFIG_NAME/$_OUTFILE_NAME" 2344 | 2345 | [ ! -d "$_CONFIG_DIR/$_CONFIG_NAME" ] && Fatal "Storage configuration $_CONFIG_NAME does not exist" 2346 | 2347 | [ ! -e "$_CONFIG_DIR/$_CONFIG_NAME/$_METAFILE_NAME" ] && Fatal "Storage configuration $_CONFIG_NAME metadata does not exist" 2348 | 2349 | [ ! -e "$_CONFIG_DIR/$_CONFIG_NAME/$_METAFILE_NAME" ] && Fatal "Storage configuration $_CONFIG_NAME output file does not exist" 2350 | 2351 | cat $outfile_path 2352 | } 2353 | 2354 | export_help() { 2355 | cat <<-FOE 2356 | Usage: $1 export [OPTIONS] CONFIG_NAME 2357 | 2358 | Export storage configuration output file on stdout 2359 | 2360 | Options: 2361 | -h, --help Print help message 2362 | FOE 2363 | } 2364 | 2365 | process_command_export() { 2366 | local command="$1" 2367 | local command_opts=${command#"export "} 2368 | 2369 | parsed_opts=`getopt -o h -l help -- $command_opts` 2370 | eval set -- "$parsed_opts" 2371 | while true ; do 2372 | case "$1" in 2373 | -h | --help) export_help $(basename $0); exit 0;; 2374 | --) shift; break;; 2375 | esac 2376 | done 2377 | 2378 | case $# in 2379 | 1) 2380 | _CONFIG_NAME=$1 2381 | ;; 2382 | *) 2383 | export_help $(basename $0); exit 0;; 2384 | esac 2385 | } 2386 | 2387 | # 2388 | # export command processing end 2389 | # 2390 | 2391 | # 2392 | # add-dev command processing start 2393 | # 2394 | run_command_add_dev() { 2395 | local metafile_path="$_CONFIG_DIR/$_CONFIG_NAME/$_METAFILE_NAME" 2396 | 2397 | [ ! -d "$_CONFIG_DIR/$_CONFIG_NAME" ] && Fatal "Storage configuration $_CONFIG_NAME does not exist" 2398 | 2399 | [ ! -e "$_CONFIG_DIR/$_CONFIG_NAME/$_METAFILE_NAME" ] && Fatal "Storage configuration $_CONFIG_NAME metadata does not exist" 2400 | 2401 | source $metafile_path 2402 | 2403 | [ -z "$_M_VG" ] && Fatal "No volume group is associated with configuration. Can not add disks." 2404 | VG=$_M_VG 2405 | 2406 | if ! vg_exists "$VG";then 2407 | Error "Volume group $VG does not exist." 2408 | return 1 2409 | fi 2410 | 2411 | _VG_EXISTS=1 2412 | if ! partition_disks_create_vg; then 2413 | Error "Failed to add device $DEVS to config $_CONFIG_NAME" 2414 | return 1 2415 | fi 2416 | 2417 | if ! metadata_update_add_dev $metafile_path "$DEVS"; then 2418 | Error "Failed to add device $DEVS to config $_CONFIG_NAME" 2419 | return 1 2420 | fi 2421 | 2422 | echo "Added device $DEVS to storage configuration $_CONFIG_NAME" 2423 | } 2424 | 2425 | add_dev_help() { 2426 | cat <<-FOE 2427 | Usage: $1 add-dev [OPTIONS] CONFIG_NAME DEVICE 2428 | 2429 | Add block device to configuration CONFIG_NAME 2430 | 2431 | Options: 2432 | -h, --help Print help message 2433 | FOE 2434 | } 2435 | 2436 | process_command_add_dev() { 2437 | local command="$1" 2438 | local command_opts=${command#"add-dev "} 2439 | 2440 | parsed_opts=`getopt -o h -l help -- $command_opts` 2441 | eval set -- "$parsed_opts" 2442 | while true ; do 2443 | case "$1" in 2444 | -h | --help) add_dev_help $(basename $0); exit 0;; 2445 | --) shift; break;; 2446 | esac 2447 | done 2448 | 2449 | case $# in 2450 | 2) 2451 | _CONFIG_NAME=$1 2452 | DEVS="$2" 2453 | ;; 2454 | *) 2455 | add_dev_help $(basename $0); exit 0;; 2456 | esac 2457 | } 2458 | 2459 | # 2460 | # add-dev command processing end 2461 | # 2462 | 2463 | 2464 | # 2465 | # Start of create command processing 2466 | # 2467 | setup_lvm_thin_pool () { 2468 | local thinpool_name=${CONTAINER_THINPOOL} 2469 | 2470 | # At this point of time, a volume group should exist for lvm thin pool 2471 | # operations to succeed. Make that check and fail if that's not the case. 2472 | if ! vg_exists "$VG";then 2473 | Fatal "No valid volume group found. Exiting." 2474 | fi 2475 | _VG_EXISTS=1 2476 | 2477 | if lvm_pool_exists $thinpool_name; then 2478 | Fatal "Thin pool named $thinpool_name already exists. Specify a different thin pool name." 2479 | fi 2480 | 2481 | create_lvm_thin_pool 2482 | 2483 | # Mark thin pool for skip auto activation during reboot. start command 2484 | # will activate thin pool. 2485 | lvchange -ky $VG/$thinpool_name 2486 | 2487 | [ -n "$_STORAGE_OUT_FILE" ] && write_storage_config_file $STORAGE_DRIVER "$_STORAGE_OUT_FILE" 2488 | 2489 | process_auto_pool_extenion ${VG} ${thinpool_name} 2490 | } 2491 | 2492 | setup_storage() { 2493 | local containerroot 2494 | 2495 | if ! is_valid_storage_driver $STORAGE_DRIVER;then 2496 | Fatal "Invalid storage driver: ${STORAGE_DRIVER}." 2497 | fi 2498 | 2499 | if [ "$STORAGE_DRIVER" == "overlay" -o "$STORAGE_DRIVER" == "overlay2" ]; then 2500 | if ! can_mount_overlay; then 2501 | Fatal "Can not setup storage driver $STORAGE_DRIVER as system does not support it. Specify a different driver." 2502 | fi 2503 | fi 2504 | 2505 | # If a user decides to setup (a) and (b)/(c): 2506 | # a) lvm thin pool for devicemapper. 2507 | # b) a separate volume for container runtime root. 2508 | # c) a separate named ($CONTAINER_ROOT_LV_NAME) volume for $CONTAINER_ROOT_LV_MOUNT_PATH. 2509 | # (a) will be setup first, followed by (b) or (c). 2510 | 2511 | # Set up lvm thin pool LV. 2512 | if [ "$STORAGE_DRIVER" == "devicemapper" ]; then 2513 | setup_lvm_thin_pool 2514 | elif [ "$STORAGE_DRIVER" == "overlay" -o "$STORAGE_DRIVER" == "overlay2" ];then 2515 | [ -n "$_STORAGE_OUT_FILE" ] && write_storage_config_file $STORAGE_DRIVER "$_STORAGE_OUT_FILE" 2516 | fi 2517 | 2518 | # Set up a separate named ($CONTAINER_ROOT_LV_NAME) volume 2519 | # for $CONTAINER_ROOT_LV_MOUNT_PATH. 2520 | if ! setup_extra_lv_fs; then 2521 | Error "Failed to setup logical volume for $CONTAINER_ROOT_LV_MOUNT_PATH." 2522 | return 1 2523 | fi 2524 | 2525 | if [ "$STORAGE_DRIVER" == "overlay" -o "$STORAGE_DRIVER" == "overlay2" ]; then 2526 | # This is little hacky. We are guessing where overlay2 will be setup 2527 | # by container runtime environment. At some point of time this should 2528 | # be passed in by a config variable. 2529 | containerroot=${_RESOLVED_MOUNT_DIR_PATH:-/var} 2530 | 2531 | if ! is_xfs_ftype_enabled "$containerroot"; then 2532 | Error "XFS filesystem at ${containerroot} has ftype=0, cannot use overlay backend; consider different driver or separate volume or OS reprovision" 2533 | return 1 2534 | fi 2535 | fi 2536 | } 2537 | 2538 | run_command_create() { 2539 | # Verify storage options set correctly in input files 2540 | [ -d "$_CONFIG_DIR/$_CONFIG_NAME" ] && Fatal "Storage configuration $_CONFIG_NAME already exists" 2541 | check_storage_options 2542 | determine_rootfs_pvs_vg 2543 | partition_disks_create_vg 2544 | setup_storage 2545 | create_storage_config "$_CONFIG_DIR/$_CONFIG_NAME" "$_STORAGE_IN_FILE" 2546 | set_config_status "$_CONFIG_NAME" "active" 2547 | echo "Created storage configuration $_CONFIG_NAME" 2548 | } 2549 | 2550 | create_help() { 2551 | cat <<-FOE 2552 | Usage: $1 create [OPTIONS] CONFIG_NAME INPUTFILE 2553 | 2554 | Create storage configuration specified by CONFIG_NAME and INPUTFILE 2555 | 2556 | Options: 2557 | -h, --help Print help message 2558 | -o, --output Output file path 2559 | FOE 2560 | } 2561 | 2562 | process_command_create() { 2563 | local command="$1" 2564 | local command_opts=`echo "$command" | sed 's/create //'` 2565 | 2566 | parsed_opts=`getopt -o ho: -l help,output: -- $command_opts` 2567 | eval set -- "$parsed_opts" 2568 | while true ; do 2569 | case "$1" in 2570 | -h | --help) create_help $(basename $0); exit 0;; 2571 | -o | --output) _STORAGE_OUT_FILE=$2; shift 2;; 2572 | --) shift; break;; 2573 | esac 2574 | done 2575 | 2576 | case $# in 2577 | 2) 2578 | _CONFIG_NAME=$1 2579 | _STORAGE_IN_FILE=$2 2580 | if [ ! -e "$_STORAGE_IN_FILE" ]; then 2581 | Fatal "File $_STORAGE_IN_FILE does not exist." 2582 | fi 2583 | ;; 2584 | *) 2585 | create_help $(basename $0); exit 0;; 2586 | esac 2587 | } 2588 | 2589 | # 2590 | # End of create command processing 2591 | # 2592 | parse_subcommands() { 2593 | local subcommand_str="$1" 2594 | local subcommand=`echo "$subcommand_str" | cut -d " " -f1` 2595 | 2596 | case $subcommand in 2597 | create) 2598 | process_command_create "$subcommand_str" 2599 | _COMMAND="create" 2600 | ;; 2601 | activate) 2602 | process_command_activate "$subcommand_str" 2603 | _COMMAND="activate" 2604 | ;; 2605 | deactivate) 2606 | process_command_deactivate "$subcommand_str" 2607 | _COMMAND="deactivate" 2608 | ;; 2609 | remove) 2610 | process_command_remove "$subcommand_str" 2611 | _COMMAND="remove" 2612 | ;; 2613 | list) 2614 | process_command_list "$subcommand_str" 2615 | _COMMAND="list" 2616 | ;; 2617 | export) 2618 | process_command_export "$subcommand_str" 2619 | _COMMAND="export" 2620 | ;; 2621 | add-dev) 2622 | process_command_add_dev "$subcommand_str" 2623 | _COMMAND="add-dev" 2624 | ;; 2625 | *) 2626 | Error "Unknown command $subcommand" 2627 | usage 2628 | exit 1 2629 | ;; 2630 | esac 2631 | } 2632 | 2633 | process_input_str() { 2634 | local input="$1" 2635 | local output 2636 | 2637 | # Look for commands and if one is found substitute with -- command so 2638 | # that commands options are not parsed as css options by getopt 2639 | 2640 | for i in $_COMMAND_LIST; do 2641 | if grep -w $i <<< "$input" > /dev/null 2>&1; then 2642 | echo ${input/$i/-- $i} 2643 | return 2644 | fi 2645 | done 2646 | echo "$input" 2647 | } 2648 | 2649 | # 2650 | # END of helper functions dealing with commands and storage setup for new design 2651 | # 2652 | 2653 | # 2654 | # Start helper functions for locking 2655 | # 2656 | prepare_locking() { 2657 | mkdir -p $_LOCKDIR 2658 | eval "exec $_LOCKFD>"${_LOCKDIR}/$_LOCKFILE"" 2659 | # Supress lvm warnings about leaked file descriptor. 2660 | export LVM_SUPPRESS_FD_WARNINGS=1 2661 | } 2662 | 2663 | acquire_lock() { 2664 | local timeout=60 2665 | 2666 | while [ $timeout -gt 0 ];do 2667 | flock -n $_LOCKFD && return 0 2668 | timeout=$((timeout-1)) 2669 | Info "Waiting to acquire lock ${_LOCKDIR}/$_LOCKFILE" 2670 | sleep 1 2671 | done 2672 | 2673 | Error "Timed out while waiting to acquire lock ${_LOCKDIR}/$_LOCKFILE" 2674 | return 1 2675 | } 2676 | 2677 | 2678 | # 2679 | # End helper functions for locking 2680 | # 2681 | # Source library. If there is a library present in same dir as d-s-s, source 2682 | # that otherwise fall back to standard library. This is useful when modifyin 2683 | # libcss.sh in git tree and testing d-s-s. 2684 | _SRCDIR=`dirname $0` 2685 | 2686 | if [ -e $_SRCDIR/libcss.sh ]; then 2687 | source $_SRCDIR/libcss.sh 2688 | elif [ -e /usr/share/container-storage-setup/libcss.sh ]; then 2689 | source /usr/share/container-storage-setup/libcss.sh 2690 | fi 2691 | 2692 | if [ -e $_SRCDIR/container-storage-setup.conf ]; then 2693 | source $_SRCDIR/container-storage-setup.conf 2694 | elif [ -e /usr/share/container-storage-setup/container-storage-setup ]; then 2695 | source /usr/share/container-storage-setup/container-storage-setup 2696 | fi 2697 | 2698 | # Main Script 2699 | _INPUT_STR="$@" 2700 | _INPUT_STR_MODIFIED=`process_input_str "$_INPUT_STR"` 2701 | _OPTS=`getopt -o hv -l reset -l help -l version -- $_INPUT_STR_MODIFIED` 2702 | eval set -- "$_OPTS" 2703 | _RESET=0 2704 | while true ; do 2705 | case "$1" in 2706 | --reset) _RESET=1; shift;; 2707 | -h | --help) usage $(basename $0); exit 0;; 2708 | -v | --version) echo $_CSS_VERSION; exit 0;; 2709 | --) shift; break;; 2710 | esac 2711 | done 2712 | 2713 | # Check subcommands 2714 | case $# in 2715 | 0) 2716 | CONTAINER_THINPOOL=docker-pool 2717 | _DOCKER_COMPAT_MODE=1 2718 | _STORAGE_IN_FILE="/etc/sysconfig/docker-storage-setup" 2719 | _STORAGE_OUT_FILE="/etc/sysconfig/docker-storage" 2720 | ;; 2721 | *) 2722 | _SUBCOMMAND_STR="$@" 2723 | parse_subcommands "$_SUBCOMMAND_STR" 2724 | ;; 2725 | esac 2726 | 2727 | if [ -n "$_DOCKER_COMPAT_MODE" ]; then 2728 | _STORAGE_OPTIONS="DOCKER_STORAGE_OPTIONS" 2729 | fi 2730 | 2731 | # If user has overridden any settings in $_STORAGE_IN_FILE 2732 | # take that into account. 2733 | if [ -e "${_STORAGE_IN_FILE}" ]; then 2734 | source ${_STORAGE_IN_FILE} 2735 | fi 2736 | 2737 | # Take lock only in new mode and not compatibility mode 2738 | [ -z "$_DOCKER_COMPAT_MODE" ] && { prepare_locking; acquire_lock; } 2739 | 2740 | case $_COMMAND in 2741 | create) 2742 | run_command_create 2743 | ;; 2744 | activate) 2745 | run_command_activate 2746 | ;; 2747 | deactivate) 2748 | run_command_deactivate 2749 | ;; 2750 | remove) 2751 | run_command_remove 2752 | ;; 2753 | list) 2754 | run_command_list 2755 | ;; 2756 | export) 2757 | run_command_export 2758 | ;; 2759 | add-dev) 2760 | run_command_add_dev 2761 | ;; 2762 | *) 2763 | run_docker_compatibility_code 2764 | ;; 2765 | esac 2766 | -------------------------------------------------------------------------------- /container-storage-setup.spec: -------------------------------------------------------------------------------- 1 | %global project container-storage-setup 2 | %global git0 https://github.com/projectatomic/%{repo} 3 | %global csslibdir %{_prefix}/share/container-storage-setup 4 | %global commit 79462e9565053fb1e0d87c336e6d980f0a56c41e 5 | %global shortcommit %(c=%{commit}; echo ${c:0:7}) 6 | %global repo %{project} 7 | 8 | Name: container-storage-setup 9 | Version: 0.1.0 10 | Release: 1%{?dist} 11 | Summary: A simple service to setup container storage devices 12 | 13 | License: ASL 2.0 14 | URL: http://github.com/projectatomic/container-storage-setup/ 15 | Source0: %{git0}/archive/%{commit}/%{repo}-%{shortcommit}.tar.gz 16 | BuildArch: noarch 17 | 18 | Requires: lvm2 19 | Requires: xfsprogs 20 | 21 | %description 22 | This is a simple service to configure Container Runtimes to use an LVM-managed 23 | thin pool. It also supports auto-growing both the pool as well 24 | as the root logical volume and partition table. 25 | 26 | %prep 27 | %setup -q -n %{repo}-%{commit} 28 | 29 | %build 30 | 31 | %install 32 | %{__make} install-core DESTDIR=%{?buildroot} 33 | 34 | %files 35 | %doc README.md 36 | %license LICENSE 37 | %{_bindir}/container-storage-setup 38 | %dir %{csslibdir} 39 | %{_mandir}/man1/container-storage-setup.1* 40 | %{csslibdir}/container-storage-setup 41 | %{csslibdir}/css-child-read-write 42 | %{csslibdir}/libcss.sh 43 | 44 | %changelog 45 | * Mon Feb 27 2017 Dan Walsh - 0.1.0-1 46 | - Initial version of container-storage-setup 47 | - Building to push through the fedora release cycle 48 | 49 | * Thu Oct 16 2014 Andy Grimm - 0.0.1-2 50 | - Fix rpm deps and scripts 51 | 52 | * Thu Oct 16 2014 Andy Grimm - 0.0.1-1 53 | - Initial build 54 | 55 | -------------------------------------------------------------------------------- /css-child-read-write.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This is a helper script which is called by container-storage-setup.sh (d-s-s). 3 | # This script helps in providing synchronization primitives to d-s-s so that 4 | # d-s-s can determine whether deferred deletion is supported by the underlying 5 | # kernel or not. 6 | 7 | # $1 is named FIFO pipe. 8 | # This helper script will write to $1 to signal d-s-s that unshare has been completed successfully. 9 | 10 | # $2 is another named FIFO pipe. 11 | # This helper script will read from $2. The write for this pipe would come from d-s-s to indicate 12 | # that helper script can terminate now. 13 | 14 | # $3 is absolute path to a temp dir which child will bind mount. Parent will 15 | # later try to remove this dir. 16 | 17 | if ! mount -o bind $3 $3; then 18 | echo "stop" > $1 19 | exit 1 20 | fi 21 | echo "start" > $1 22 | read -t 10 n <>$2 23 | -------------------------------------------------------------------------------- /docker-storage-setup-override.conf: -------------------------------------------------------------------------------- 1 | # Edit this file to override any configuration options specified in 2 | # /usr/share/container-storage-setup/container-storage-setup. 3 | # 4 | # For more details refer to "man container-storage-setup" 5 | -------------------------------------------------------------------------------- /docker-storage-setup.1: -------------------------------------------------------------------------------- 1 | .so man1/container-storage-setup.1 2 | 3 | -------------------------------------------------------------------------------- /docker-storage-setup.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Docker Storage Setup 3 | After=cloud-init.service 4 | Before=docker.service 5 | 6 | [Service] 7 | Type=oneshot 8 | ExecStart=/usr/bin/container-storage-setup 9 | EnvironmentFile=-/etc/sysconfig/docker-storage-setup 10 | 11 | [Install] 12 | WantedBy=multi-user.target 13 | -------------------------------------------------------------------------------- /libcss.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Library for common functions 3 | 4 | # echo info messages on stdout 5 | Info() { 6 | # stdout is used to pass back output from bash functions 7 | # so we use stderr 8 | echo "INFO: ${1}" >&2 9 | } 10 | 11 | # echo error messages on stderr 12 | Error() { 13 | echo "ERROR: ${1}" >&2 14 | } 15 | 16 | # echo error on stderr and exit with error code 1 17 | Fatal() { 18 | Error "${1}" 19 | exit 1 20 | } 21 | 22 | # checks the size specifications acceptable to -L 23 | check_numeric_size_syntax() { 24 | data_size=$1 25 | 26 | # if it is all numeric, it is valid as by default it will be MB. 27 | [[ $data_size =~ ^[[:digit:]]+$ ]] && return 0 28 | 29 | # Numeric digits followed by b or B. (byte specification) 30 | [[ $data_size =~ ^[[:digit:]]+[bB]$ ]] && return 0 31 | 32 | # Numeric digits followed by valid suffix. Will support both G and GB. 33 | [[ $data_size =~ ^[[:digit:]]+[sSkKmMgGtTpPeE][bB]?$ ]] && return 0 34 | 35 | # Numeric digits followed by valid suffix and ib. Ex. Gib or GiB. 36 | [[ $data_size =~ ^[[:digit:]]+[sSkKmMgGtTpPeE]i[bB]$ ]] && return 0 37 | 38 | return 1 39 | } 40 | 41 | check_data_size_syntax() { 42 | local data_size=$1 43 | 44 | # For -l style options, we only support %FREE and %VG option. %PVS and 45 | # %ORIGIN does not seem to make much sense for this use case. 46 | if [[ $data_size == *%FREE ]] || [[ $data_size == *%VG ]];then 47 | return 0 48 | fi 49 | 50 | # -L compatible syntax 51 | check_numeric_size_syntax $data_size && return 0 52 | return 1 53 | } 54 | 55 | # Check if passed in vg exists. Returns 0 if volume group exists. 56 | vg_exists() { 57 | local vg=$1 58 | 59 | for vg_name in $(vgs --noheadings -o vg_name); do 60 | if [ "$vg_name" == "$vg" ]; then 61 | return 0 62 | fi 63 | done 64 | return 1 65 | } 66 | 67 | # Remove volume group if it exists 68 | remove_vg_if_exists() { 69 | vg_exists $1 || return 0 70 | vgremove "$1" > /dev/null 71 | } 72 | 73 | pv_exists() { 74 | pvs $1 >/dev/null 2>&1 75 | } 76 | 77 | remove_pv_if_exists() { 78 | pv_exists $1 || return 0 79 | pvremove $1 >/dev/null 80 | } 81 | -------------------------------------------------------------------------------- /tests/001-test-use-devs-to-create-thin-pool.sh: -------------------------------------------------------------------------------- 1 | source $SRCDIR/libtest.sh 2 | 3 | # Test DEVS= directive. Returns 0 on success and 1 on failure. 4 | test_devs() { 5 | local devs=$TEST_DEVS 6 | local test_status=1 7 | local testname=`basename "$0"` 8 | local vg_name="css-test-foo" 9 | 10 | # Error out if any pre-existing volume group vg named css-test-foo 11 | if vg_exists "$vg_name"; then 12 | echo "ERROR: $testname: Volume group $vg_name already exists." >> $LOGS 13 | return $test_status 14 | fi 15 | 16 | # Create config file 17 | cat << EOF > /etc/sysconfig/docker-storage-setup 18 | DEVS="$devs" 19 | VG=$vg_name 20 | EOF 21 | 22 | # Run container-storage-setup 23 | $CSSBIN >> $LOGS 2>&1 24 | 25 | # Test failed. 26 | if [ $? -ne 0 ]; then 27 | echo "ERROR: $testname: $CSSBIN failed." >> $LOGS 28 | cleanup $vg_name "$devs" 29 | return $test_status 30 | fi 31 | 32 | # Make sure volume group $VG got created 33 | if vg_exists "$vg_name"; then 34 | test_status=0 35 | else 36 | echo "ERROR: $testname: $CSSBIN failed. $vg_name was not created." >> $LOGS 37 | fi 38 | 39 | cleanup $vg_name "$devs" 40 | return $test_status 41 | } 42 | 43 | test_devs 44 | -------------------------------------------------------------------------------- /tests/002-test-reject-disk-with-lvm-signature.sh: -------------------------------------------------------------------------------- 1 | source $SRCDIR/libtest.sh 2 | 3 | # Make sure a disk with lvm signature is rejected and is not overriden 4 | # by css. Returns 0 on success and 1 on failure. 5 | test_lvm_sig() { 6 | local devs=$TEST_DEVS dev 7 | local test_status=1 8 | local testname=`basename "$0"` 9 | local vg_name="css-test-foo" 10 | 11 | # Error out if any pre-existing volume group vg named css-test-foo 12 | if vg_exists "$vg_name"; then 13 | echo "ERROR: $testname: Volume group $vg_name already exists." >> $LOGS 14 | return $test_status 15 | fi 16 | 17 | cat << EOF > /etc/sysconfig/docker-storage-setup 18 | DEVS="$devs" 19 | VG=$vg_name 20 | EOF 21 | 22 | # create lvm signatures on disks 23 | for dev in $devs; do 24 | pvcreate -f $dev >> $LOGS 2>&1 25 | done 26 | 27 | # Run container-storage-setup 28 | $CSSBIN >> $LOGS 2>&1 29 | 30 | # Css should fail. If it did not, then test failed. This is very crude 31 | # check though as css can fail for so many reasons. A more precise check 32 | # would be too check for exact error message. 33 | [ $? -ne 0 ] && test_status=0 34 | 35 | cleanup $vg_name "$devs" 36 | return $test_status 37 | } 38 | 39 | # Make sure a disk with lvm signature is rejected and is not overriden 40 | # by css. Returns 0 on success and 1 on failure. 41 | 42 | test_lvm_sig 43 | -------------------------------------------------------------------------------- /tests/003-test-override-signature-wipes-existing-signatures.sh: -------------------------------------------------------------------------------- 1 | source $SRCDIR/libtest.sh 2 | 3 | test_override_signatures() { 4 | local devs=$TEST_DEVS dev 5 | local test_status=1 6 | local testname=`basename "$0"` 7 | local vg_name="css-test-foo" 8 | 9 | # Error out if vg_name VG exists already 10 | if vg_exists "$vg_name"; then 11 | echo "ERROR: $testname: Volume group $vg_name already exists." >> $LOGS 12 | return $test_status 13 | fi 14 | 15 | cat << EOF > /etc/sysconfig/docker-storage-setup 16 | DEVS="$devs" 17 | VG=$vg_name 18 | WIPE_SIGNATURES=true 19 | EOF 20 | 21 | # create lvm signatures on disks 22 | for dev in $devs; do 23 | pvcreate -f $dev >> $LOGS 2>&1 24 | done 25 | 26 | # Run container-storage-setup 27 | $CSSBIN >> $LOGS 2>&1 28 | 29 | # Test failed. 30 | if [ $? -ne 0 ]; then 31 | echo "ERROR: $testname: $CSSBIN failed." >> $LOGS 32 | cleanup $vg_name "$devs" 33 | return $test_status 34 | fi 35 | 36 | # Make sure volume group $VG got created. 37 | if vg_exists "$vg_name"; then 38 | test_status=0 39 | else 40 | echo "ERROR: $testname: $CSSBIN failed. $vg_name was not created." >> $LOGS 41 | fi 42 | 43 | cleanup $vg_name "$devs" 44 | return $test_status 45 | } 46 | 47 | # Create a disk with some signature, say lvm signature and make sure 48 | # override signature can override that, wipe signature and create thin 49 | # pool. 50 | test_override_signatures 51 | -------------------------------------------------------------------------------- /tests/004-test-non-absolute-disk-name-support.sh: -------------------------------------------------------------------------------- 1 | source $SRCDIR/libtest.sh 2 | 3 | test_non_absolute_disk_name() { 4 | local devs dev 5 | local test_status=1 6 | local testname=`basename "$0"` 7 | local vg_name="css-test-foo" 8 | 9 | # Remove prefix /dev/ from disk names to test if non-absolute disk 10 | # names work. 11 | for dev in $TEST_DEVS; do 12 | dev=${dev##/dev/} 13 | devs="$devs $dev" 14 | done 15 | 16 | # Error out if any pre-existing volume group vg named css-test-foo 17 | if vg_exists "$vg_name"; then 18 | echo "ERROR: $testname: Volume group $vg_name already exists." >> $LOGS 19 | return $test_status 20 | fi 21 | 22 | cat << EOF > /etc/sysconfig/docker-storage-setup 23 | DEVS="$devs" 24 | VG=$vg_name 25 | EOF 26 | 27 | # Run container-storage-setup 28 | $CSSBIN >> $LOGS 2>&1 29 | 30 | # Test failed. 31 | if [ $? -ne 0 ]; then 32 | echo "ERROR: $testname: $CSSBIN failed." >> $LOGS 33 | cleanup $vg_name "$TEST_DEVS" 34 | return $test_status 35 | fi 36 | 37 | # Make sure volume group $VG got created. 38 | if vg_exists "$vg_name"; then 39 | test_status=0 40 | else 41 | echo "ERROR: $testname: $CSSBIN failed. $vg_name was not created." >> $LOGS 42 | fi 43 | 44 | cleanup $vg_name "$TEST_DEVS" 45 | return $test_status 46 | } 47 | 48 | # Make sure non-absolute disk names are supported. Ex. sdb. 49 | test_non_absolute_disk_name 50 | -------------------------------------------------------------------------------- /tests/005-test-devmapper-cleanup.sh: -------------------------------------------------------------------------------- 1 | source $SRCDIR/libtest.sh 2 | 3 | # Test "container-storage-setup reset". Returns 0 on success and 1 on failure. 4 | test_reset_devmapper() { 5 | local devs=${TEST_DEVS} 6 | local test_status=1 7 | local testname=`basename "$0"` 8 | local vg_name="css-test-foo" 9 | 10 | # Error out if any pre-existing volume group vg named css-test-foo 11 | if vg_exists "$vg_name"; then 12 | echo "ERROR: $testname: Volume group $vg_name already exists." >> $LOGS 13 | return $test_status 14 | fi 15 | 16 | cat << EOF > /etc/sysconfig/docker-storage-setup 17 | DEVS="$devs" 18 | VG=$vg_name 19 | EOF 20 | 21 | # Run container-storage-setup 22 | $CSSBIN >> $LOGS 2>&1 23 | 24 | # Test failed. 25 | if [ $? -ne 0 ]; then 26 | echo "ERROR: $testname: $CSSBIN failed." >> $LOGS 27 | cleanup $vg_name "$devs" 28 | return $test_status 29 | fi 30 | 31 | $CSSBIN --reset >> $LOGS 2>&1 32 | # Test failed. 33 | if [ $? -eq 0 ]; then 34 | if [ -e /etc/sysconfig/docker-storage ]; then 35 | echo "ERROR: $testname: $CSSBIN failed. /etc/sysconfig/docker-storage still exists." >> $LOGS 36 | else 37 | if lv_exists $vg_name "docker-pool"; then 38 | echo "ERROR: $testname: Thin pool docker-pool still exists." >> $LOGS 39 | else 40 | test_status=0 41 | fi 42 | fi 43 | fi 44 | 45 | if [ $test_status -ne 0 ]; then 46 | echo "ERROR: $testname: $CSSBIN --reset failed." >> $LOGS 47 | fi 48 | 49 | cleanup $vg_name "$devs" 50 | 51 | return $test_status 52 | } 53 | 54 | # Create a devicemapper docker backend and then make sure the 55 | # `container-storage-setup --reset` 56 | # cleans it up properly. 57 | test_reset_devmapper 58 | -------------------------------------------------------------------------------- /tests/006-test-overlay-cleanup.sh: -------------------------------------------------------------------------------- 1 | source $SRCDIR/libtest.sh 2 | 3 | # Test "container-storage-setup --reset". Returns 0 on success and 1 on failure. 4 | test_reset_overlay() { 5 | local test_status=0 6 | local testname=`basename "$0"` 7 | local infile=/etc/sysconfig/docker-storage-setup 8 | local outfile=/etc/sysconfig/docker-storage 9 | 10 | cat << EOF > ${infile} 11 | STORAGE_DRIVER=overlay 12 | EOF 13 | 14 | # Run container-storage-setup 15 | $CSSBIN >> $LOGS 2>&1 16 | 17 | # Test failed. 18 | if [ $? -ne 0 ]; then 19 | echo "ERROR: $testname: $CSSBIN failed." >> $LOGS 20 | rm -f $infile $outfile 21 | return 1 22 | fi 23 | 24 | $CSSBIN --reset >> $LOGS 2>&1 25 | if [ $? -ne 0 ]; then 26 | # Test failed. 27 | echo "ERROR: $testname: $CSSBIN --reset failed." >> $LOGS 28 | test_status=1 29 | elif [ -e /etc/sysconfig/docker-storage ]; then 30 | # Test failed. 31 | echo "ERROR: $testname: $CSSBIN --reset failed to cleanup /etc/sysconfig/docker." >> $LOGS 32 | test_status=1 33 | fi 34 | 35 | rm -f $infile $outfile 36 | return $test_status 37 | } 38 | 39 | # Create a overlay backend and then make sure the 40 | # container-storage-setup --reset 41 | # cleans it up properly. 42 | test_reset_overlay 43 | -------------------------------------------------------------------------------- /tests/007-test-setting-extra-opts.sh: -------------------------------------------------------------------------------- 1 | source $SRCDIR/libtest.sh 2 | 3 | # Test that the user-specified options stored in 4 | # EXTRA_DOCKER_STORAGE_OPTIONS actually end up in 5 | # the docker storage config file, appended to the variable 6 | # DOCKER_STORAGE_OPTIONS in /etc/sysconfig/docker-storage 7 | test_set_extra_docker_opts() { 8 | local devs=$TEST_DEVS 9 | local test_status=1 10 | local testname=`basename "$0"` 11 | local vg_name="css-test-foo" 12 | local extra_options="--storage-opt dm.fs=ext4" 13 | 14 | # Error out if volume group $vg_name exists already 15 | if vg_exists "$vg_name"; then 16 | echo "ERROR: $testname: Volume group $vg_name already exists" >> $LOGS 17 | return $test_status 18 | fi 19 | 20 | cat << EOF > /etc/sysconfig/docker-storage-setup 21 | DEVS="$devs" 22 | VG=$vg_name 23 | EXTRA_DOCKER_STORAGE_OPTIONS="$extra_options" 24 | EOF 25 | 26 | # Run container-storage-setup 27 | $CSSBIN >> $LOGS 2>&1 28 | 29 | # css failed 30 | if [ $? -ne 0 ]; then 31 | echo "ERROR: $testname: $CSSBIN --reset failed." >> $LOGS 32 | cleanup $vg_name "$devs" 33 | return $test_status 34 | fi 35 | 36 | # Check if docker-storage config file was created by css 37 | if [ ! -f /etc/sysconfig/docker-storage ]; then 38 | echo "ERROR: $testname: /etc/sysconfig/docker-storage file was not created." >> $LOGS 39 | cleanup $vg_name "$devs" 40 | return $test_status 41 | fi 42 | 43 | source /etc/sysconfig/docker-storage 44 | 45 | # Search for $extra_options in $options. 46 | echo $DOCKER_STORAGE_OPTIONS | grep -q -- "$extra_options" 47 | 48 | # Successful appending to DOCKER_STORAGE_OPTIONS 49 | if [ $? -eq 0 ]; then 50 | test_status=0 51 | else 52 | echo "ERROR: $testname: failed. DOCKER_STORAGE_OPTIONS ${DOCKER_STORAGE_OPTIONS} does not include extra_options ${extra_options}." >> $LOGS 53 | fi 54 | 55 | cleanup $vg_name "$devs" 56 | return $test_status 57 | } 58 | 59 | # Test that $EXTRA_DOCKER_STORAGE_OPTIONS is successfully written 60 | # into /etc/sysconfig/docker-storage 61 | test_set_extra_docker_opts 62 | -------------------------------------------------------------------------------- /tests/008-test-overlay2-setup-cleanup.sh: -------------------------------------------------------------------------------- 1 | source $SRCDIR/libtest.sh 2 | 3 | # Test "container-storage-setup reset". Returns 0 on success and 1 on failure. 4 | test_reset_overlay2() { 5 | local test_status=0 6 | local testname=`basename "$0"` 7 | local infile=/etc/sysconfig/docker-storage-setup 8 | local outfile=/etc/sysconfig/docker-storage 9 | 10 | cat << EOF > /etc/sysconfig/docker-storage-setup 11 | STORAGE_DRIVER=overlay2 12 | EOF 13 | 14 | # Run container-storage-setup 15 | $CSSBIN >> $LOGS 2>&1 16 | 17 | # Test failed. 18 | if [ $? -ne 0 ]; then 19 | echo "ERROR: $testname: $CSSBIN failed." >> $LOGS 20 | rm -f $infile $outfile 21 | return 1 22 | fi 23 | 24 | if ! grep -q "overlay2" /etc/sysconfig/docker-storage; then 25 | echo "ERROR: $testname: /etc/sysconfig/docker-storage does not have string overlay2." >> $LOGS 26 | rm -f $infile $outfile 27 | return 1 28 | fi 29 | 30 | $CSSBIN --reset >> $LOGS 2>&1 31 | if [ $? -ne 0 ]; then 32 | # Test failed. 33 | test_status=1 34 | echo "ERROR: $testname: $CSSBIN --reset failed." >> $LOGS 35 | elif [ -e /etc/sysconfig/docker-storage ]; then 36 | # Test failed. 37 | test_status=1 38 | echo "ERROR: $testname: $CSSBIN /etc/sysconfig/docker-storage still exists." >> $LOGS 39 | fi 40 | 41 | rm -f $infile $outfile 42 | return $test_status 43 | } 44 | 45 | # Create a overlay2 backend and then make sure the 46 | # container-storage-setup --reset 47 | # cleans it up properly. 48 | test_reset_overlay2 49 | -------------------------------------------------------------------------------- /tests/009-test-follow-symlinked-devices.sh: -------------------------------------------------------------------------------- 1 | source $SRCDIR/libtest.sh 2 | 3 | test_follow_symlinked_devices() { 4 | local devs dev 5 | local devlinks devlink 6 | local test_status=1 7 | local testname=`basename "$0"` 8 | local vg_name="css-test-foo" 9 | 10 | # Create a symlink for a device and try to follow it 11 | for dev in $TEST_DEVS; do 12 | if [ ! -h $dev ]; then 13 | devlink="/tmp/$(basename $dev)-test.$$" 14 | ln -s $dev $devlink 15 | 16 | dev=$devlink 17 | devlinks="$devlinks $dev" 18 | fi 19 | devs="$devs $dev" 20 | echo "Using symlinke devices: $dev -> $(readlink -e $dev)" >> $LOGS 21 | done 22 | 23 | cat << EOF > /etc/sysconfig/docker-storage-setup 24 | DEVS="$devs" 25 | VG=$vg_name 26 | EOF 27 | # Run container-storage-setup 28 | $CSSBIN >> $LOGS 2>&1 29 | 30 | # Test failed. 31 | if [ $? -ne 0 ]; then 32 | echo "ERROR: $testname: $CSSBIN failed." >> $LOGS 33 | cleanup_soft_links "$devlinks" 34 | cleanup $vg_name "$TEST_DEVS" 35 | return $test_status 36 | fi 37 | 38 | # Make sure volume group $VG got created. 39 | if vg_exists "$vg_name"; then 40 | test_status=0 41 | else 42 | echo "ERROR: $testname: $CSSBIN failed. $vg_name was not created." >> $LOGS 43 | fi 44 | 45 | cleanup_soft_links "$devlinks" 46 | cleanup $vg_name "$TEST_DEVS" 47 | return $test_status 48 | } 49 | 50 | # Make sure symlinked disk names are supported. 51 | test_follow_symlinked_devices 52 | -------------------------------------------------------------------------------- /tests/010-test-use-devs-to-create-docker-root-volume.sh: -------------------------------------------------------------------------------- 1 | source $SRCDIR/libtest.sh 2 | 3 | # Test DOCKER_ROOT_VOLUME= directive. Returns 0 on success and 1 on failure. 4 | test_docker_root_volume() { 5 | local devs=$TEST_DEVS 6 | local test_status=1 7 | local testname=`basename "$0"` 8 | local vg_name="css-test-foo" 9 | local docker_root_lv_name="docker-root-lv" 10 | 11 | # Error out if any pre-existing volume group vg named css-test-foo 12 | if vg_exists "$vg_name"; then 13 | echo "ERROR: $testname: Volume group $vg_name already exists." >> $LOGS 14 | return $test_status 15 | fi 16 | 17 | # Create config file 18 | cat << EOF > /etc/sysconfig/docker-storage-setup 19 | DEVS="$devs" 20 | VG=$vg_name 21 | DOCKER_ROOT_VOLUME=yes 22 | EOF 23 | 24 | # Run container-storage-setup 25 | $CSSBIN >> $LOGS 2>&1 26 | 27 | # Test failed. 28 | if [ $? -ne 0 ]; then 29 | echo "ERROR: $testname: $CSSBIN failed." >> $LOGS 30 | cleanup $vg_name "$devs" 31 | return $test_status 32 | fi 33 | 34 | # Make sure $DOCKER_ROOT_VOLUME {docker-root-lv} got created 35 | # successfully. 36 | if ! lv_exists "$vg_name" "$docker_root_lv_name"; then 37 | echo "ERROR: $testname: Logical Volume $docker_root_lv_name does not exist." >> $LOGS 38 | cleanup $vg_name "$devs" 39 | return $test_status 40 | fi 41 | 42 | # Make sure $DOCKER_ROOT_VOLUME {docker-root-lv} is 43 | # mounted on /var/lib/docker 44 | local mnt 45 | mnt=$(findmnt -n -o TARGET --first-only --source /dev/${vg_name}/${docker_root_lv_name}) 46 | if [ "$mnt" != "/var/lib/docker" ];then 47 | echo "ERROR: $testname: Logical Volume $docker_root_lv_name is not mounted on /var/lib/docker." >> $LOGS 48 | cleanup $vg_name "$devs" 49 | return $test_status 50 | fi 51 | 52 | cleanup_container_root_volume $vg_name $docker_root_lv_name $mnt 53 | cleanup $vg_name "$devs" 54 | return 0 55 | } 56 | 57 | cleanup_container_root_volume(){ 58 | local vg_name=$1 59 | local lv_name=$2 60 | local mount_path=$3 61 | 62 | umount $mount_path >> $LOGS 2>&1 63 | lvchange -an $vg_name/${lv_name} >> $LOGS 2>&1 64 | lvremove $vg_name/${lv_name} >> $LOGS 2>&1 65 | 66 | cleanup_mount_file $mount_path 67 | } 68 | 69 | test_docker_root_volume 70 | -------------------------------------------------------------------------------- /tests/011-test-docker-root-volume-cleanup.sh: -------------------------------------------------------------------------------- 1 | source $SRCDIR/libtest.sh 2 | 3 | # Test "container-storage-setup --reset" for DOCKER_ROOT_VOLUME=yes. 4 | # Returns 0 on success and 1 on failure. 5 | test_reset_docker_root_volume() { 6 | local devs=${TEST_DEVS} 7 | local test_status=1 8 | local testname=`basename "$0"` 9 | local vg_name="css-test-foo" 10 | local mount_path="/var/lib/docker" 11 | local mount_filename="var-lib-docker.mount" 12 | local docker_root_lv_name="docker-root-lv" 13 | 14 | # Error out if any pre-existing volume group vg named css-test-foo 15 | if vg_exists "$vg_name"; then 16 | echo "ERROR: $testname: Volume group $vg_name already exists." >> $LOGS 17 | return $test_status 18 | fi 19 | 20 | cat << EOF > /etc/sysconfig/docker-storage-setup 21 | DEVS="$devs" 22 | VG=$vg_name 23 | DOCKER_ROOT_VOLUME=yes 24 | EOF 25 | 26 | # Run container-storage-setup 27 | $CSSBIN >> $LOGS 2>&1 28 | 29 | # Test failed. 30 | if [ $? -ne 0 ]; then 31 | echo "ERROR: $testname: $CSSBIN failed." >> $LOGS 32 | cleanup_all $vg_name $docker_root_lv_name $mount_path "$devs" 33 | return $test_status 34 | fi 35 | 36 | $CSSBIN --reset >> $LOGS 2>&1 37 | # Test failed. 38 | if [ $? -ne 0 ]; then 39 | echo "ERROR: $testname: $CSSBIN --reset failed." >> $LOGS 40 | cleanup_all $vg_name $docker_root_lv_name $mount_path "$devs" 41 | return $test_status 42 | fi 43 | 44 | if ! everything_clean $vg_name $docker_root_lv_name $mount_filename;then 45 | echo "ERROR: $testname: $CSSBIN --reset did not cleanup everything as needed." >> $LOGS 46 | cleanup_all $vg_name $docker_root_lv_name $mount_path "$devs" 47 | return $test_status 48 | fi 49 | cleanup $vg_name "$devs" 50 | return 0 51 | } 52 | 53 | everything_clean(){ 54 | local vg_name=$1 55 | local docker_root_lv_name=$2 56 | local mount_filename=$3 57 | if [ -e "/etc/sysconfig/docker-storage" ] || [ -e "/etc/systemd/system/${mount_filename}" ]; then 58 | return 1 59 | fi 60 | if lv_exists "$vg_name" "$docker_root_lv_name";then 61 | return 1 62 | fi 63 | return 0 64 | } 65 | 66 | cleanup_all(){ 67 | local vg_name=$1 68 | local lv_name=$2 69 | local mount_path=$3 70 | local devs=$4 71 | 72 | umount $mount_path >> $LOGS 2>&1 73 | lvchange -an $vg_name/${lv_name} >> $LOGS 2>&1 74 | lvremove $vg_name/${lv_name} >> $LOGS 2>&1 75 | 76 | cleanup_mount_file $mount_path 77 | cleanup $vg_name "$devs" 78 | } 79 | 80 | #If a user has specified DOCKER_ROOT_VOLUME=yes 81 | #container-storage-setup sets up a logical volume 82 | #named "docker-root-lv" and mounts it on docker 83 | #root directory. This function tests if 84 | #`container-storage-setup --reset` cleans it up properly. 85 | test_reset_docker_root_volume 86 | -------------------------------------------------------------------------------- /tests/012-test-use-devs-to-create-container-root-volume.sh: -------------------------------------------------------------------------------- 1 | source $SRCDIR/libtest.sh 2 | 3 | # Test CONTAINER_ROOT_LV_NAME and CONTAINER_ROOT_LV_MOUNT_PATH directives. 4 | # Returns 0 on success and 1 on failure. 5 | test_container_root_volume() { 6 | local devs=$TEST_DEVS 7 | local test_status=1 8 | local testname=`basename "$0"` 9 | local vg_name="css-test-foo" 10 | local root_lv_name="container-root-lv" 11 | local root_lv_mount_path="/var/lib/containers" 12 | 13 | # Error out if any pre-existing volume group vg named css-test-foo 14 | if vg_exists "$vg_name"; then 15 | echo "ERROR: $testname: Volume group $vg_name already exists." >> $LOGS 16 | return $test_status 17 | fi 18 | 19 | # Create config file 20 | cat << EOF > /etc/sysconfig/docker-storage-setup 21 | DEVS="$devs" 22 | VG=$vg_name 23 | CONTAINER_ROOT_LV_NAME=$root_lv_name 24 | CONTAINER_ROOT_LV_MOUNT_PATH=$root_lv_mount_path 25 | EOF 26 | 27 | # Run container-storage-setup 28 | $CSSBIN >> $LOGS 2>&1 29 | 30 | # Test failed. 31 | if [ $? -ne 0 ]; then 32 | echo "ERROR: $testname: $CSSBIN failed." >> $LOGS 33 | cleanup_all $vg_name $root_lv_name $root_lv_mount_path "$devs" 34 | return $test_status 35 | fi 36 | 37 | # Make sure $CONTAINER_ROOT_LV_NAME {container-root-lv} got created 38 | # successfully. 39 | if ! lv_exists "$vg_name" "$root_lv_name"; then 40 | echo "ERROR: $testname: Logical Volume $root_lv_name does not exist." >> $LOGS 41 | cleanup_all $vg_name $root_lv_name $root_lv_mount_path "$devs" 42 | return $test_status 43 | fi 44 | 45 | # Make sure $CONTAINER_ROOT_LV_NAME {container-root-lv} is 46 | # mounted on $CONTAINER_ROOT_LV_MOUNT_PATH {/var/lib/containers} 47 | local mnt 48 | mnt=$(findmnt -n -o TARGET --first-only --source /dev/${vg_name}/${root_lv_name}) 49 | if [ "$mnt" != "$root_lv_mount_path" ];then 50 | echo "ERROR: $testname: Logical Volume $root_lv_name is not mounted on $root_lv_mount_path." >> $LOGS 51 | cleanup_all $vg_name $root_lv_name $root_lv_mount_path "$devs" 52 | return $test_status 53 | fi 54 | 55 | cleanup_all $vg_name $root_lv_name $root_lv_mount_path "$devs" 56 | return 0 57 | } 58 | 59 | cleanup_all(){ 60 | local vg_name=$1 61 | local lv_name=$2 62 | local mount_path=$3 63 | local devs=$4 64 | 65 | umount $mount_path >> $LOGS 2>&1 66 | lvchange -an $vg_name/${lv_name} >> $LOGS 2>&1 67 | lvremove $vg_name/${lv_name} >> $LOGS 2>&1 68 | 69 | cleanup_mount_file $mount_path 70 | cleanup $vg_name "$devs" 71 | } 72 | 73 | # This test will check if a user set 74 | # CONTAINER_ROOT_LV_NAME="container-root-lv" and 75 | # CONTAINER_ROOT_LV_MOUNT_PATH="/var/lib/containers", then 76 | # container-storage-setup would create a logical volume named 77 | # "container-root-lv" and mount it on "/var/lib/containers". 78 | test_container_root_volume 79 | 80 | -------------------------------------------------------------------------------- /tests/013-test-container-root-volume-cleanup.sh: -------------------------------------------------------------------------------- 1 | source $SRCDIR/libtest.sh 2 | 3 | # Test "container-storage-setup --reset" for CONTAINER_ROOT_LV_NAME= 4 | # and CONTAINER_ROOT_LV_MOUNT_PATH= directives. 5 | # Returns 0 on success and 1 on failure. 6 | test_reset_container_root_volume() { 7 | local devs=${TEST_DEVS} 8 | local test_status=1 9 | local testname=`basename "$0"` 10 | local vg_name="css-test-foo" 11 | local root_lv_name="container-root-lv" 12 | local root_lv_mount_path="/var/lib/containers" 13 | local mount_filename="var-lib-containers.mount" 14 | 15 | # Error out if any pre-existing volume group vg named css-test-foo 16 | if vg_exists "$vg_name"; then 17 | echo "ERROR: $testname: Volume group $vg_name already exists." >> $LOGS 18 | return $test_status 19 | fi 20 | 21 | cat << EOF > /etc/sysconfig/docker-storage-setup 22 | DEVS="$devs" 23 | VG=$vg_name 24 | CONTAINER_ROOT_LV_NAME=$root_lv_name 25 | CONTAINER_ROOT_LV_MOUNT_PATH=$root_lv_mount_path 26 | EOF 27 | 28 | # Run container-storage-setup 29 | $CSSBIN >> $LOGS 2>&1 30 | 31 | # Test failed. 32 | if [ $? -ne 0 ]; then 33 | echo "ERROR: $testname: $CSSBIN failed." >> $LOGS 34 | cleanup_all $vg_name $root_lv_name $root_lv_mount_path "$devs" 35 | return $test_status 36 | fi 37 | 38 | $CSSBIN --reset >> $LOGS 2>&1 39 | # Test failed. 40 | if [ $? -ne 0 ]; then 41 | echo "ERROR: $testname: $CSSBIN --reset failed." >> $LOGS 42 | cleanup_all $vg_name $root_lv_name $root_lv_mount_path "$devs" 43 | return $test_status 44 | fi 45 | 46 | if ! everything_clean $vg_name $root_lv_name $mount_filename;then 47 | echo "ERROR: $testname: $CSSBIN --reset did not cleanup everything as needed." >> $LOGS 48 | cleanup_all $vg_name $root_lv_name $root_lv_mount_path "$devs" 49 | return $test_status 50 | fi 51 | cleanup $vg_name "$devs" 52 | return 0 53 | } 54 | 55 | everything_clean(){ 56 | local vg_name=$1 57 | local lv_name=$2 58 | local mount_filename=$3 59 | if [ -e "/etc/sysconfig/docker-storage" ] || [ -e "/etc/systemd/system/${mount_filename}" ]; then 60 | return 1 61 | fi 62 | if lv_exists "$vg_name" "$lv_name";then 63 | return 1 64 | fi 65 | return 0 66 | } 67 | 68 | cleanup_all(){ 69 | local vg_name=$1 70 | local lv_name=$2 71 | local mount_path=$3 72 | local devs=$4 73 | 74 | umount $mount_path >> $LOGS 2>&1 75 | lvchange -an $vg_name/${lv_name} >> $LOGS 2>&1 76 | lvremove $vg_name/${lv_name} >> $LOGS 2>&1 77 | 78 | cleanup_mount_file $mount_path 79 | cleanup $vg_name "$devs" 80 | } 81 | 82 | 83 | # If a user has specified CONTAINER_ROOT_LV_NAME="container-root-lv" 84 | # and CONTAINER_ROOT_LV_MOUNT_PATH="/var/lib/containers", then 85 | # container-storage-setup would create a logical volume named 86 | # "container-root-lv" and mount it on "/var/lib/containers". 87 | # This function tests if `container-storage-setup --reset` 88 | # cleans it up properly. 89 | test_reset_container_root_volume 90 | -------------------------------------------------------------------------------- /tests/014-test-use-loop-to-create-thin-pool.sh: -------------------------------------------------------------------------------- 1 | source $SRCDIR/libtest.sh 2 | 3 | # Test DEVS= directive. Returns 0 on success and 1 on failure. 4 | test_devs() { 5 | local lbdevice tmpfile 6 | local test_status=1 7 | local testname=`basename "$0"` 8 | local vg_name="css-test-foo" 9 | 10 | # Error out if any pre-existing volume group vg named css-test-foo 11 | if vg_exists "$vg_name"; then 12 | echo "ERROR: $testname: Volume group $vg_name already exists." >> $LOGS 13 | return $test_status 14 | fi 15 | 16 | # Create loopback device. 17 | tmpfile=$(mktemp /tmp/c-s-s.XXXXXX) 18 | truncate --size=6G $tmpfile 19 | lbdevice=$(losetup -f) 20 | losetup --partscan $lbdevice $tmpfile 21 | 22 | # Create config file 23 | cat << EOF > /etc/sysconfig/docker-storage-setup 24 | DEVS="$lbdevice" 25 | VG=$vg_name 26 | EOF 27 | 28 | # Run container-storage-setup 29 | $CSSBIN >> $LOGS 2>&1 30 | 31 | # Test failed. 32 | if [ $? -ne 0 ]; then 33 | echo "ERROR: $testname: $CSSBIN failed." >> $LOGS 34 | cleanup $vg_name "$lbdevice" 35 | cleanup_loop_device "$tmpfile" "$lbdevice" 36 | return $test_status 37 | fi 38 | 39 | # Make sure volume group $VG got created 40 | if vg_exists "$vg_name"; then 41 | test_status=0 42 | else 43 | echo "ERROR: $testname: $CSSBIN failed. $vg_name was not created." >> $LOGS 44 | fi 45 | 46 | cleanup $vg_name "$lbdevice" 47 | cleanup_loop_device "$tmpfile" "$lbdevice" 48 | return $test_status 49 | } 50 | 51 | cleanup_loop_device() { 52 | local tmpfile=$1 53 | local lbdevice=$2 54 | losetup -d $lbdevice 55 | rm $tmpfile > /dev/null 2>&1 56 | } 57 | 58 | test_devs 59 | -------------------------------------------------------------------------------- /tests/015-test-fail-loop-to-create-thin-pool.sh: -------------------------------------------------------------------------------- 1 | source $SRCDIR/libtest.sh 2 | 3 | test_fail_if_loop_partition_passed() { 4 | local lbdevice tmpfile 5 | local test_status=1 6 | local testname=`basename "$0"` 7 | local vg_name="css-test-foo" 8 | local tmplog=${WORKDIR}/tmplog 9 | local errmsg="Partition specification unsupported at this time." 10 | 11 | # Error out if any pre-existing volume group vg named css-test-foo 12 | if vg_exists "$vg_name"; then 13 | echo "ERROR: $testname: Volume group $vg_name already exists." >> $LOGS 14 | return $test_status 15 | fi 16 | 17 | # Create loopback device. 18 | tmpfile=$(mktemp /tmp/c-s-s.XXXXXX) 19 | truncate --size=6G $tmpfile 20 | lbdevice=$(losetup -f) 21 | losetup --partscan $lbdevice $tmpfile 22 | if ! create_partition $lbdevice;then 23 | echo "ERROR: Failed partitioning $lbdevice" 24 | cleanup_loop_device "$tmpfile" "$lbdevice" 25 | return $test_status 26 | fi 27 | 28 | cat << EOF > /etc/sysconfig/docker-storage-setup 29 | DEVS="${lbdevice}p1" 30 | VG=$vg_name 31 | EOF 32 | 33 | # Run container-storage-setup 34 | $CSSBIN > $tmplog 2>&1 35 | rc=$? 36 | cat $tmplog >> $LOGS 2>&1 37 | 38 | # Test failed. 39 | if [ $rc -ne 0 ]; then 40 | if grep --no-messages -q "$errmsg" $tmplog; then 41 | test_status=0 42 | else 43 | echo "ERROR: $testname: $CSSBIN Failed for a reason other then \"$errmsg\"" >> $LOGS 44 | fi 45 | else 46 | echo "ERROR: $testname: $CSSBIN Succeeded. Should have failed since ${lbdevice}p1 is a loop device partition." >> $LOGS 47 | fi 48 | cleanup $vg_name "$lbdevice" 49 | cleanup_loop_device "$tmpfile" "$lbdevice" 50 | return $test_status 51 | } 52 | 53 | cleanup_loop_device() { 54 | local tmpfile=$1 55 | local lbdevice=$2 56 | losetup -d $lbdevice 57 | rm $tmpfile > /dev/null 2>&1 58 | } 59 | 60 | # Make sure command fails if loop device partition /dev/loop0p1 is passed. 61 | test_fail_if_loop_partition_passed 62 | 63 | -------------------------------------------------------------------------------- /tests/016-test-gpt-partition-table-creation-2tb.sh: -------------------------------------------------------------------------------- 1 | source $SRCDIR/libtest.sh 2 | 3 | # Test GPT partition table gets created for disks bigger than 2TB. 4 | test_gpt() { 5 | local lbdevice tmpfile part_type 6 | local test_status=1 7 | local testname=`basename "$0"` 8 | local vg_name="css-test-foo" 9 | 10 | # Error out if any pre-existing volume group vg named css-test-foo 11 | if vg_exists "$vg_name"; then 12 | echo "ERROR: $testname: Volume group $vg_name already exists." >> $LOGS 13 | return $test_status 14 | fi 15 | 16 | # Create loopback device. 17 | tmpfile=$(mktemp /tmp/c-s-s.XXXXXX) 18 | truncate --size=3T $tmpfile 19 | lbdevice=$(losetup -f) 20 | losetup --partscan $lbdevice $tmpfile 21 | 22 | # Create config file 23 | cat << EOF > /etc/sysconfig/docker-storage-setup 24 | DEVS="$lbdevice" 25 | VG=$vg_name 26 | EOF 27 | 28 | # Run container-storage-setup 29 | $CSSBIN >> $LOGS 2>&1 30 | 31 | # Test failed. 32 | if [ $? -ne 0 ]; then 33 | echo "ERROR: $testname: $CSSBIN failed." >> $LOGS 34 | cleanup $vg_name "$lbdevice" 35 | cleanup_loop_device "$tmpfile" "$lbdevice" 36 | return $test_status 37 | fi 38 | 39 | # Make sure volume group $VG got created 40 | if ! vg_exists "$vg_name"; then 41 | echo "ERROR: $testname: $CSSBIN failed. $vg_name was not created." >> $LOGS 42 | fi 43 | 44 | # Make sure partition table type is gpt. 45 | part_type=`fdisk -l "${lbdevice}" | grep "label type" | cut -d ":" -f2` 46 | # Get rid of blanks 47 | part_type=${part_type/ /} 48 | 49 | if [ "$part_type" == "gpt" ]; then 50 | test_status=0 51 | else 52 | echo "ERROR: $testname: $CSSBIN failed. Partition table type created is "$part_type", expected gpt." >> $LOGS 53 | fi 54 | 55 | cleanup $vg_name "$lbdevice" 56 | cleanup_loop_device "$tmpfile" "$lbdevice" 57 | return $test_status 58 | } 59 | 60 | cleanup_loop_device() { 61 | local tmpfile=$1 62 | local lbdevice=$2 63 | losetup -d $lbdevice 64 | rm $tmpfile > /dev/null 2>&1 65 | } 66 | 67 | test_gpt 68 | -------------------------------------------------------------------------------- /tests/017-test-storage-driver-change-to-overlay2-in-atomic.sh: -------------------------------------------------------------------------------- 1 | source $SRCDIR/libtest.sh 2 | 3 | # We recently changed default storage driver to overlay2. That change 4 | # takes affect only over fresh installation and not over upgrade. But 5 | # in atomic, even over upgrade it overwrites existing 6 | # /etc/sysconfig/docker-storage-setup file and after upgrade and reboot 7 | # we don't wait for thin pool as we exit early thinking storage driver 8 | # changed. 9 | 10 | # Make sure, even if storage driver changed, we try to bring up existing 11 | # thin pool. 12 | test_storage_driver_change() { 13 | local devs=$TEST_DEVS 14 | local test_status=1 15 | local testname=`basename "$0"` 16 | local vg_name="css-test-foo" 17 | 18 | # Error out if any pre-existing volume group vg named css-test-foo 19 | if vg_exists "$vg_name"; then 20 | echo "ERROR: $testname: Volume group $vg_name already exists." >> $LOGS 21 | return $test_status 22 | fi 23 | 24 | # Create config file 25 | cat << EOF > /etc/sysconfig/docker-storage-setup 26 | DEVS="$devs" 27 | VG=$vg_name 28 | EOF 29 | 30 | # Run container-storage-setup 31 | $CSSBIN >> $LOGS 2>&1 32 | 33 | # Test failed. 34 | if [ $? -ne 0 ]; then 35 | echo "ERROR: $testname: $CSSBIN failed." >> $LOGS 36 | cleanup $vg_name "$devs" 37 | return $test_status 38 | fi 39 | 40 | # Make sure volume group $VG got created 41 | if ! vg_exists "$vg_name"; then 42 | echo "ERROR: $testname: $CSSBIN failed. $vg_name was not created." >> $LOGS 43 | cleanup $vg_name "$devs" 44 | return $test_status 45 | fi 46 | 47 | # Overwrite config file 48 | cat << EOF > /etc/sysconfig/docker-storage-setup 49 | STORAGE_DRIVER=overlay2 50 | EOF 51 | # Run container-storage-setup 52 | $CSSBIN >> $LOGS 2>&1 53 | 54 | # Test failed. 55 | if [ $? -ne 0 ]; then 56 | echo "ERROR: $testname: $CSSBIN failed." >> $LOGS 57 | else 58 | test_status=0 59 | fi 60 | 61 | cleanup $vg_name "$devs" 62 | return $test_status 63 | } 64 | 65 | test_storage_driver_change 66 | 67 | -------------------------------------------------------------------------------- /tests/018-test-thinpool-reset-after-driver-change-to-overlay2-in-atomic.sh: -------------------------------------------------------------------------------- 1 | source $SRCDIR/libtest.sh 2 | 3 | # We recently changed default storage driver to overlay2. That change 4 | # takes affect only over fresh installation and not over upgrade. But 5 | # in atomic, even over upgrade it overwrites existing 6 | # /etc/sysconfig/docker-storage-setup file and after upgrade storage reset 7 | # does not reset thin pool thinking storage driver is not devicemapper. 8 | 9 | # Make sure thinpool can be reset even after atomic upgrade. 10 | test_storage_driver_reset() { 11 | local devs=$TEST_DEVS 12 | local test_status=1 13 | local testname=`basename "$0"` 14 | local vg_name="css-test-foo" 15 | 16 | # Error out if any pre-existing volume group vg named css-test-foo 17 | if vg_exists "$vg_name"; then 18 | echo "ERROR: $testname: Volume group $vg_name already exists." >> $LOGS 19 | return $test_status 20 | fi 21 | 22 | # Create config file 23 | cat << EOF > /etc/sysconfig/docker-storage-setup 24 | DEVS="$devs" 25 | VG=$vg_name 26 | EOF 27 | 28 | # Run container-storage-setup 29 | $CSSBIN >> $LOGS 2>&1 30 | 31 | # Test failed. 32 | if [ $? -ne 0 ]; then 33 | echo "ERROR: $testname: $CSSBIN failed." >> $LOGS 34 | cleanup $vg_name "$devs" 35 | return $test_status 36 | fi 37 | 38 | # Make sure volume group $VG got created 39 | if ! vg_exists "$vg_name"; then 40 | echo "ERROR: $testname: $CSSBIN failed. $vg_name was not created." >> $LOGS 41 | cleanup $vg_name "$devs" 42 | return $test_status 43 | fi 44 | 45 | # Overwrite config file 46 | cat << EOF > /etc/sysconfig/docker-storage-setup 47 | STORAGE_DRIVER=overlay2 48 | EOF 49 | # Reset storage 50 | $CSSBIN --reset >> $LOGS 2>&1 51 | 52 | # Test failed. 53 | if [ $? -eq 0 ]; then 54 | if [ -e /etc/sysconfig/docker-storage ]; then 55 | echo "ERROR: $testname: $CSSBIN failed. /etc/sysconfig/docker-storage still exists." >> $LOGS 56 | else 57 | if lv_exists $vg_name "docker-pool"; then 58 | echo "ERROR: $testname: Thin pool docker-pool still exists." >> $LOGS 59 | else 60 | test_status=0 61 | fi 62 | fi 63 | fi 64 | 65 | if [ $test_status -ne 0 ]; then 66 | echo "ERROR: $testname: $CSSBIN --reset failed." >> $LOGS 67 | fi 68 | 69 | cleanup $vg_name "$devs" 70 | return $test_status 71 | } 72 | 73 | test_storage_driver_reset 74 | 75 | -------------------------------------------------------------------------------- /tests/101-test-use-devs-to-create-thin-pool.sh: -------------------------------------------------------------------------------- 1 | source $SRCDIR/libtest.sh 2 | 3 | # Test DEVS= directive. Returns 0 on success and 1 on failure. 4 | test_devs() { 5 | local devs=$TEST_DEVS 6 | local test_status=1 7 | local testname=`basename "$0"` 8 | local vg_name="css-test-foo" 9 | local infile=${WORKDIR}/container-storage-setup 10 | local outfile=${WORKDIR}/container-storage 11 | 12 | # Error out if any pre-existing volume group vg named css-test-foo 13 | if vg_exists "$vg_name"; then 14 | echo "ERROR: $testname: Volume group $vg_name already exists." >> $LOGS 15 | return $test_status 16 | fi 17 | 18 | # Create config file 19 | cat << EOF > $infile 20 | DEVS="$devs" 21 | VG=$vg_name 22 | CONTAINER_THINPOOL=container-thinpool 23 | EOF 24 | 25 | local cmd="$CSSBIN create -o $outfile $CSS_TEST_CONFIG $infile" 26 | 27 | # Run container-storage-setup 28 | $cmd >> $LOGS 2>&1 29 | 30 | # Test failed. 31 | if [ $? -ne 0 ]; then 32 | echo "ERROR: $testname: $cmd failed." >> $LOGS 33 | cleanup $vg_name "$devs" "$infile" "$outfile" 34 | return $test_status 35 | fi 36 | 37 | # Make sure volume group $VG got created 38 | if vg_exists "$vg_name"; then 39 | test_status=0 40 | else 41 | echo "ERROR: $testname: $cmd failed. $vg_name was not created." >> $LOGS 42 | fi 43 | 44 | cleanup $vg_name "$devs" "$infile" "$outfile" 45 | return $test_status 46 | } 47 | 48 | test_devs 49 | -------------------------------------------------------------------------------- /tests/102-test-reject-disk-with-lvm-signature.sh: -------------------------------------------------------------------------------- 1 | source $SRCDIR/libtest.sh 2 | 3 | # Make sure a disk with lvm signature is rejected and is not overriden 4 | # by css. Returns 0 on success and 1 on failure. 5 | test_lvm_sig() { 6 | local devs=$TEST_DEVS dev 7 | local test_status=1 8 | local testname=`basename "$0"` 9 | local vg_name="css-test-foo" 10 | local infile=${WORKDIR}/container-storage-setup 11 | local outfile=${WORKDIR}/container-storage 12 | 13 | # Error out if any pre-existing volume group vg named css-test-foo 14 | if vg_exists "$vg_name"; then 15 | echo "ERROR: $testname: Volume group $vg_name already exists." >> $LOGS 16 | return $test_status 17 | fi 18 | 19 | cat << EOF > ${infile} 20 | DEVS="$devs" 21 | VG=$vg_name 22 | CONTAINER_THINPOOL=container-thinpool 23 | EOF 24 | 25 | # create lvm signatures on disks 26 | for dev in $devs; do 27 | pvcreate -f $dev >> $LOGS 2>&1 28 | done 29 | 30 | # Run $CSSBIN 31 | local cmd="$CSSBIN create -o $outfile $CSS_TEST_CONFIG $infile" 32 | $cmd >> $LOGS 2>&1 33 | 34 | # Css should fail. If it did not, then test failed. This is very crude 35 | # check though as css can fail for so many reasons. A more precise check 36 | # would be too check for exact error message. 37 | [ $? -ne 0 ] && test_status=0 38 | 39 | cleanup $vg_name "$devs" "$infile" "$outfile" 40 | return $test_status 41 | } 42 | 43 | # Make sure a disk with lvm signature is rejected and is not overriden 44 | # by css. Returns 0 on success and 1 on failure. 45 | 46 | test_lvm_sig 47 | -------------------------------------------------------------------------------- /tests/103-test-override-signature-wipes-existing-signatures.sh: -------------------------------------------------------------------------------- 1 | source $SRCDIR/libtest.sh 2 | 3 | test_override_signatures() { 4 | local devs=$TEST_DEVS dev 5 | local test_status=1 6 | local testname=`basename "$0"` 7 | local vg_name="css-test-foo" 8 | local infile=${WORKDIR}/container-storage-setup 9 | local outfile=${WORKDIR}/container-storage 10 | 11 | # Error out if vg_name VG exists already 12 | if vg_exists "$vg_name"; then 13 | echo "ERROR: $testname: Volume group $vg_name already exists." >> $LOGS 14 | return $test_status 15 | fi 16 | 17 | cat << EOF > $infile 18 | DEVS="$devs" 19 | VG=$vg_name 20 | WIPE_SIGNATURES=true 21 | CONTAINER_THINPOOL=container-thinpool 22 | EOF 23 | 24 | # create lvm signatures on disks 25 | for dev in $devs; do 26 | pvcreate -f $dev >> $LOGS 2>&1 27 | done 28 | 29 | # Run container-storage-setup 30 | local cmd="$CSSBIN create -o $outfile $CSS_TEST_CONFIG $infile" 31 | $cmd >> $LOGS 2>&1 32 | 33 | # Test failed. 34 | if [ $? -ne 0 ]; then 35 | echo "ERROR: $testname: $cmd failed." >> $LOGS 36 | cleanup $vg_name "$devs" "$infile" "$outfile" 37 | return $test_status 38 | fi 39 | 40 | # Make sure volume group $VG got created. 41 | if vg_exists "$vg_name"; then 42 | test_status=0 43 | else 44 | echo "ERROR: $testname: $cmd failed. $vg_name was not created." >> $LOGS 45 | fi 46 | 47 | cleanup $vg_name "$devs" "$infile" "$outfile" 48 | return $test_status 49 | } 50 | 51 | # Create a disk with some signature, say lvm signature and make sure 52 | # override signature can override that, wipe signature and create thin 53 | # pool. 54 | test_override_signatures 55 | -------------------------------------------------------------------------------- /tests/105-test-devmapper-cleanup.sh: -------------------------------------------------------------------------------- 1 | source $SRCDIR/libtest.sh 2 | # Test "container-storage-setup --reset". Returns 0 on success and 1 on failure. 3 | test_reset_devmapper() { 4 | local devs=${TEST_DEVS} 5 | local test_status=1 6 | local testname=`basename "$0"` 7 | local vg_name="css-test-foo" 8 | local infile=${WORKDIR}/container-storage-setup 9 | local outfile=${WORKDIR}/container-storage 10 | 11 | # Error out if any pre-existing volume group vg named css-test-foo 12 | if vg_exists "$vg_name"; then 13 | echo "ERROR: $testname: Volume group $vg_name already exists." >> $LOGS 14 | return $test_status 15 | fi 16 | 17 | cat << EOF > $infile 18 | DEVS="$devs" 19 | VG=$vg_name 20 | CONTAINER_THINPOOL=container-thinpool 21 | EOF 22 | 23 | # Run container-storage-setup 24 | local create_cmd="$CSSBIN create -o $outfile $CSS_TEST_CONFIG $infile" 25 | local remove_cmd="$CSSBIN remove $CSS_TEST_CONFIG" 26 | $create_cmd >> $LOGS 2>&1 27 | 28 | # Test failed. 29 | if [ $? -ne 0 ]; then 30 | echo "ERROR: $testname: $CSSBIN failed." >> $LOGS 31 | cleanup $vg_name "$devs" "$infile" "$outfile" 32 | return $test_status 33 | fi 34 | 35 | # Make sure thinpool got created with the specified name CONTAINER_THINPOOL 36 | if lv_exists $vg_name "container-thinpool"; then 37 | $remove_cmd >> $LOGS 2>&1 38 | # Test failed. 39 | if [ $? -eq 0 ]; then 40 | if lv_exists $vg_name "container-thinpool"; then 41 | echo "ERROR: $testname: Thin pool container-thinpool still exists." >> $LOGS 42 | else 43 | test_status=0 44 | fi 45 | fi 46 | 47 | if [ $test_status -ne 0 ]; then 48 | echo "ERROR: $testname: $remove_cmd failed." >> $LOGS 49 | fi 50 | else 51 | echo "ERROR: $testname: Thin pool container-thinpool did not get created." >> $LOGS 52 | fi 53 | 54 | cleanup $vg_name "$devs" "$infile" "$outfile" 55 | 56 | return $test_status 57 | } 58 | 59 | # Create a devicemapper docker backend and then make sure the 60 | # `container-storage-setup --reset` 61 | # cleans it up properly. 62 | test_reset_devmapper 63 | -------------------------------------------------------------------------------- /tests/106-test-overlay-cleanup.sh: -------------------------------------------------------------------------------- 1 | source $SRCDIR/libtest.sh 2 | 3 | # Test "container-storage-setup --reset". Returns 0 on success and 1 on failure. 4 | test_reset_overlay() { 5 | local test_status=0 6 | local testname=`basename "$0"` 7 | local infile=${WORKDIR}/container-storage-setup 8 | local outfile=${WORKDIR}/container-storage 9 | 10 | cat << EOF > $infile 11 | STORAGE_DRIVER=overlay 12 | EOF 13 | 14 | # Run container-storage-setup 15 | local create_cmd="$CSSBIN create -o $outfile $CSS_TEST_CONFIG $infile" 16 | local remove_cmd="$CSSBIN remove $CSS_TEST_CONFIG" 17 | 18 | $create_cmd >> $LOGS 2>&1 19 | 20 | # Test failed. 21 | if [ $? -ne 0 ]; then 22 | echo "ERROR: $testname: $create_cmd failed." >> $LOGS 23 | rm -f $infile $outfile 24 | return 1 25 | fi 26 | 27 | $remove_cmd >> $LOGS 2>&1 28 | if [ $? -ne 0 ]; then 29 | # Test failed. 30 | echo "ERROR: $testname: $remove_cmd failed." >> $LOGS 31 | test_status=1 32 | fi 33 | 34 | rm -f $infile $outfile 35 | return $test_status 36 | } 37 | 38 | # Create a overlay backend and then make sure the 39 | # container-storage-setup --reset 40 | # cleans it up properly. 41 | test_reset_overlay 42 | -------------------------------------------------------------------------------- /tests/107-test-setting-extra-opts.sh: -------------------------------------------------------------------------------- 1 | source $SRCDIR/libtest.sh 2 | 3 | # Test that the user-specified options stored in 4 | # EXTRA_STORAGE_OPTIONS actually end up in 5 | # the storage config file, appended to the variable 6 | # STORAGE_OPTIONS in $outfile 7 | test_set_extra_opts() { 8 | local devs=$TEST_DEVS 9 | local test_status=1 10 | local testname=`basename "$0"` 11 | local vg_name="css-test-foo" 12 | local extra_options="--storage-opt dm.fs=ext4" 13 | local infile=${WORKDIR}/container-storage-setup 14 | local outfile=${WORKDIR}/container-storage 15 | 16 | # Error out if volume group $vg_name exists already 17 | if vg_exists "$vg_name"; then 18 | echo "ERROR: $testname: Volume group $vg_name already exists" >> $LOGS 19 | return $test_status 20 | fi 21 | 22 | cat << EOF > $infile 23 | DEVS="$devs" 24 | VG=$vg_name 25 | EXTRA_STORAGE_OPTIONS="$extra_options" 26 | CONTAINER_THINPOOL=container-thinpool 27 | EOF 28 | 29 | local create_cmd="$CSSBIN create -o $outfile $CSS_TEST_CONFIG $infile" 30 | 31 | # Run container-storage-setup 32 | $create_cmd >> $LOGS 2>&1 33 | 34 | # css failed 35 | if [ $? -ne 0 ]; then 36 | echo "ERROR: $testname: $create_cmd failed." >> $LOGS 37 | cleanup $vg_name "$devs" "$infile" "$outfile" 38 | return $test_status 39 | fi 40 | 41 | # Check if storage config file was created by css 42 | if [ ! -f $outfile ]; then 43 | echo "ERROR: $testname: $outfile file was not created." >> $LOGS 44 | cleanup $vg_name "$devs" "$infile" "$outfile" 45 | return $test_status 46 | fi 47 | 48 | source $outfile 49 | 50 | # Search for $extra_options in $options. 51 | echo $STORAGE_OPTIONS | grep -q -- "$extra_options" 52 | 53 | # Successful appending to STORAGE_OPTIONS 54 | if [ $? -eq 0 ]; then 55 | test_status=0 56 | else 57 | echo "ERROR: $testname: failed. STORAGE_OPTIONS ${STORAGE_OPTIONS} does not include extra_options ${extra_options}." >> $LOGS 58 | fi 59 | 60 | cleanup $vg_name "$devs" "$infile" "$outfile" 61 | return $test_status 62 | } 63 | 64 | # Test that $EXTRA_STORAGE_OPTIONS is successfully written 65 | # into $outfile 66 | test_set_extra_opts 67 | -------------------------------------------------------------------------------- /tests/108-test-overlay2-setup-cleanup.sh: -------------------------------------------------------------------------------- 1 | source $SRCDIR/libtest.sh 2 | 3 | # Test "container-storage-setup --reset". Returns 0 on success and 1 on failure. 4 | test_reset_overlay2() { 5 | local test_status=0 6 | local testname=`basename "$0"` 7 | local infile=${WORKDIR}/container-storage-setup 8 | local outfile=${WORKDIR}/container-storage 9 | 10 | cat << EOF > $infile 11 | STORAGE_DRIVER=overlay2 12 | EOF 13 | 14 | local create_cmd="$CSSBIN create -o $outfile $CSS_TEST_CONFIG $infile" 15 | local remove_cmd="$CSSBIN remove $CSS_TEST_CONFIG" 16 | 17 | # Run container-storage-setup 18 | $create_cmd >> $LOGS 2>&1 19 | 20 | # Test failed. 21 | if [ $? -ne 0 ]; then 22 | echo "ERROR: $testname: $create_cmd failed." >> $LOGS 23 | rm -f $infile $outfile 24 | return 1 25 | fi 26 | 27 | if ! grep -q "overlay2" $outfile; then 28 | echo "ERROR: $testname: $outfile does not have string overlay2." >> $LOGS 29 | rm -f $infile $outfile 30 | return 1 31 | fi 32 | 33 | $remove_cmd >> $LOGS 2>&1 34 | if [ $? -ne 0 ]; then 35 | # Test failed. 36 | test_status=1 37 | echo "ERROR: $testname: $remove_cmd failed." >> $LOGS 38 | fi 39 | 40 | rm -f $infile $outfile 41 | return $test_status 42 | } 43 | 44 | # Create a overlay2 backend and then make sure the 45 | # container-storage-setup --reset 46 | # cleans it up properly. 47 | test_reset_overlay2 48 | -------------------------------------------------------------------------------- /tests/109-test-follow-symlinked-devices.sh: -------------------------------------------------------------------------------- 1 | source $SRCDIR/libtest.sh 2 | 3 | test_follow_symlinked_devices() { 4 | local devs dev 5 | local devlinks devlink 6 | local test_status=1 7 | local testname=`basename "$0"` 8 | local vg_name="css-test-foo" 9 | local infile=${WORKDIR}/container-storage-setup 10 | local outfile=${WORKDIR}/container-storage 11 | 12 | # Create a symlink for a device and try to follow it 13 | for dev in $TEST_DEVS; do 14 | if [ ! -h $dev ]; then 15 | devlink="/tmp/$(basename $dev)-test.$$" 16 | ln -s $dev $devlink 17 | 18 | dev=$devlink 19 | devlinks="$devlinks $dev" 20 | fi 21 | devs="$devs $dev" 22 | echo "Using symlinke devices: $dev -> $(readlink -e $dev)" >> $LOGS 23 | done 24 | 25 | cat << EOF > $infile 26 | DEVS="$devs" 27 | VG=$vg_name 28 | CONTAINER_THINPOOL=container-thinpool 29 | EOF 30 | # Run container-storage-setup 31 | local create_cmd="$CSSBIN create -o $outfile $CSS_TEST_CONFIG $infile" 32 | 33 | $create_cmd >> $LOGS 2>&1 34 | 35 | # Test failed. 36 | if [ $? -ne 0 ]; then 37 | echo "ERROR: $testname: $create_cmd failed." >> $LOGS 38 | cleanup_soft_links "$devlinks" 39 | cleanup $vg_name "$TEST_DEVS" "$infile" "$outfile" 40 | return $test_status 41 | fi 42 | 43 | # Make sure volume group $VG got created. 44 | if vg_exists "$vg_name"; then 45 | test_status=0 46 | else 47 | echo "ERROR: $testname: $create_cmd failed. $vg_name was not created." >> $LOGS 48 | fi 49 | 50 | cleanup_soft_links "$devlinks" 51 | cleanup $vg_name "$TEST_DEVS" "$infile" "$outfile" 52 | return $test_status 53 | } 54 | 55 | # Make sure symlinked disk names are supported. 56 | test_follow_symlinked_devices 57 | -------------------------------------------------------------------------------- /tests/110-test-fail-if-no-container-thinpool.sh: -------------------------------------------------------------------------------- 1 | source $SRCDIR/libtest.sh 2 | 3 | test_fail_if_no_container_thinpool() { 4 | local devs=$TEST_DEVS 5 | local test_status=1 6 | local testname=`basename "$0"` 7 | local vg_name="css-test-foo" 8 | local infile=${WORKDIR}/container-storage-setup 9 | local outfile=${WORKDIR}/container-storage 10 | local tmplog=${WORKDIR}/tmplog 11 | local errmsg="CONTAINER_THINPOOL must be defined for the devicemapper storage driver." 12 | # Error out if any pre-existing volume group vg named css-test-foo 13 | if vg_exists "$vg_name"; then 14 | echo "ERROR: $testname: Volume group $vg_name already exists." >> $LOGS 15 | return $test_status 16 | fi 17 | 18 | cat << EOF > $infile 19 | DEVS="$devs" 20 | VG=$vg_name 21 | EOF 22 | # Run container-storage-setup 23 | local create_cmd="$CSSBIN create -o $outfile $CSS_TEST_CONFIG $infile" 24 | 25 | $create_cmd > $tmplog 2>&1 26 | rc=$? 27 | cat $tmplog >> $LOGS 2>&1 28 | 29 | # Test failed. 30 | if [ $rc -ne 0 ]; then 31 | if grep --no-messages -q "$errmsg" $tmplog; then 32 | test_status=0 33 | else 34 | echo "ERROR: $testname: $CSSBIN Failed for a reason other then \"$errmsg\"" >> $LOGS 35 | fi 36 | else 37 | echo "ERROR: $testname: $CSSBIN Succeeded. Should have failed with CONTAINER_THINPOOL specified" >> $LOGS 38 | fi 39 | cleanup $vg_name "$devs" "$infile" "$outfile" 40 | return $test_status 41 | } 42 | 43 | # Make sure command fails if no CONTAINER_THINPOOL is specified 44 | test_fail_if_no_container_thinpool 45 | -------------------------------------------------------------------------------- /tests/112-test-use-devs-to-create-container-root-volume.sh: -------------------------------------------------------------------------------- 1 | source $SRCDIR/libtest.sh 2 | 3 | # Test CONTAINER_ROOT_LV_NAME and CONTAINER_ROOT_LV_MOUNT_PATH directives. 4 | # Returns 0 on success and 1 on failure. 5 | test_container_root_volume() { 6 | local devs=$TEST_DEVS 7 | local test_status=1 8 | local testname=`basename "$0"` 9 | local vg_name="css-test-foo" 10 | local root_lv_name="container-root-lv" 11 | local root_lv_mount_path="/var/lib/containers" 12 | local infile=${WORKDIR}/container-storage-setup 13 | local outfile=${WORKDIR}/container-storage 14 | 15 | # Error out if any pre-existing volume group vg named css-test-foo 16 | if vg_exists "$vg_name"; then 17 | echo "ERROR: $testname: Volume group $vg_name already exists." >> $LOGS 18 | return $test_status 19 | fi 20 | 21 | # Create config file 22 | cat << EOF > $infile 23 | DEVS="$devs" 24 | VG=$vg_name 25 | CONTAINER_ROOT_LV_NAME=$root_lv_name 26 | CONTAINER_ROOT_LV_MOUNT_PATH=$root_lv_mount_path 27 | CONTAINER_THINPOOL=container-thinpool 28 | EOF 29 | 30 | # Run container-storage-setup 31 | local create_cmd="$CSSBIN create -o $outfile $CSS_TEST_CONFIG $infile" 32 | 33 | $create_cmd >> $LOGS 2>&1 34 | 35 | # Test failed. 36 | if [ $? -ne 0 ]; then 37 | echo "ERROR: $testname: $create_cmd failed." >> $LOGS 38 | cleanup_all $vg_name $root_lv_name $root_lv_mount_path "$devs" $infile $outfile 39 | 40 | return $test_status 41 | fi 42 | 43 | # Make sure $CONTAINER_ROOT_LV_NAME {container-root-lv} got created 44 | # successfully. 45 | if ! lv_exists "$vg_name" "$root_lv_name"; then 46 | echo "ERROR: $testname: Logical Volume $root_lv_name does not exist." >> $LOGS 47 | cleanup_all $vg_name $root_lv_name $root_lv_mount_path "$devs" $infile $outfile 48 | return $test_status 49 | fi 50 | 51 | # Make sure $CONTAINER_ROOT_LV_NAME {container-root-lv} is 52 | # mounted on $CONTAINER_ROOT_LV_MOUNT_PATH {/var/lib/containers} 53 | local mnt 54 | mnt=$(findmnt -n -o TARGET --first-only --source /dev/${vg_name}/${root_lv_name}) 55 | if [ "$mnt" != "$root_lv_mount_path" ];then 56 | echo "ERROR: $testname: Logical Volume $root_lv_name is not mounted on $root_lv_mount_path." >> $LOGS 57 | cleanup_all $vg_name $root_lv_name $root_lv_mount_path "$devs" $infile $outfile 58 | return $test_status 59 | fi 60 | 61 | cleanup_all $vg_name $root_lv_name $root_lv_mount_path "$devs" $infile $outfile 62 | return 0 63 | } 64 | 65 | cleanup_all(){ 66 | local vg_name=$1 67 | local lv_name=$2 68 | local mount_path=$3 69 | local devs=$4 70 | local infile=$5 71 | local outfile=$6 72 | 73 | umount $mount_path >> $LOGS 2>&1 74 | lvchange -an $vg_name/${lv_name} >> $LOGS 2>&1 75 | lvremove $vg_name/${lv_name} >> $LOGS 2>&1 76 | 77 | cleanup_mount_file $mount_path 78 | cleanup $vg_name "$devs" "$infile" "$outfile" 79 | } 80 | 81 | # This test will check if a user set 82 | # CONTAINER_ROOT_LV_NAME="container-root-lv" and 83 | # CONTAINER_ROOT_LV_MOUNT_PATH="/var/lib/containers", then 84 | # container-storage-setup would create a logical volume named 85 | # "container-root-lv" and mount it on "/var/lib/containers". 86 | test_container_root_volume 87 | -------------------------------------------------------------------------------- /tests/113-test-container-root-volume-cleanup.sh: -------------------------------------------------------------------------------- 1 | source $SRCDIR/libtest.sh 2 | 3 | # Test "container-storage-setup --reset" for CONTAINER_ROOT_LV_NAME= 4 | # and CONTAINER_ROOT_LV_MOUNT_PATH= directives. 5 | # Returns 0 on success and 1 on failure. 6 | test_reset_container_root_volume() { 7 | local devs=${TEST_DEVS} 8 | local test_status=1 9 | local testname=`basename "$0"` 10 | local vg_name="css-test-foo" 11 | local root_lv_name="container-root-lv" 12 | local root_lv_mount_path="/var/lib/containers" 13 | local mount_filename="var-lib-containers.mount" 14 | local infile=${WORKDIR}/container-storage-setup 15 | local outfile=${WORKDIR}/container-storage 16 | 17 | # Error out if any pre-existing volume group vg named css-test-foo 18 | if vg_exists "$vg_name"; then 19 | echo "ERROR: $testname: Volume group $vg_name already exists." >> $LOGS 20 | return $test_status 21 | fi 22 | 23 | cat << EOF > $infile 24 | DEVS="$devs" 25 | VG=$vg_name 26 | CONTAINER_ROOT_LV_NAME=$root_lv_name 27 | CONTAINER_ROOT_LV_MOUNT_PATH=$root_lv_mount_path 28 | CONTAINER_THINPOOL=container-thinpool 29 | EOF 30 | 31 | # Run container-storage-setup 32 | local create_cmd="$CSSBIN create -o $outfile $CSS_TEST_CONFIG $infile" 33 | local remove_cmd="$CSSBIN remove $CSS_TEST_CONFIG" 34 | 35 | $create_cmd >> $LOGS 2>&1 36 | 37 | # Test failed. 38 | if [ $? -ne 0 ]; then 39 | echo "ERROR: $testname: $CSSBIN failed." >> $LOGS 40 | cleanup_all $vg_name $root_lv_name $root_lv_mount_path "$devs" $infile $outfile 41 | return $test_status 42 | fi 43 | 44 | $remove_cmd >> $LOGS 2>&1 45 | # Test failed. 46 | if [ $? -ne 0 ]; then 47 | echo "ERROR: $testname: $remove_cmd failed." >> $LOGS 48 | cleanup_all $vg_name $root_lv_name $root_lv_mount_path "$devs" $infile $outfile 49 | return $test_status 50 | fi 51 | 52 | if ! everything_clean $vg_name $root_lv_name $mount_filename;then 53 | echo "ERROR: $testname: $CSSBIN --reset did not cleanup everything as needed." >> $LOGS 54 | cleanup_all $vg_name $root_lv_name $root_lv_mount_path "$devs" $infile $outfile 55 | return $test_status 56 | fi 57 | cleanup $vg_name "$devs" "$infile" "$outfile" 58 | return 0 59 | } 60 | 61 | everything_clean(){ 62 | local vg_name=$1 63 | local lv_name=$2 64 | local mount_filename=$3 65 | if [ -e "/etc/systemd/system/${mount_filename}" ]; then 66 | return 1 67 | fi 68 | if lv_exists "$vg_name" "$lv_name";then 69 | return 1 70 | fi 71 | return 0 72 | } 73 | 74 | cleanup_all(){ 75 | local vg_name=$1 76 | local lv_name=$2 77 | local mount_path=$3 78 | local devs=$4 79 | local infile=$5 80 | local outfile=$6 81 | 82 | umount $mount_path >> $LOGS 2>&1 83 | lvchange -an $vg_name/${lv_name} >> $LOGS 2>&1 84 | lvremove $vg_name/${lv_name} >> $LOGS 2>&1 85 | 86 | cleanup_mount_file $mount_path 87 | cleanup $vg_name "$devs" "$infile" "$outfile" 88 | } 89 | 90 | 91 | # If a user has specified CONTAINER_ROOT_LV_NAME="container-root-lv" 92 | # and CONTAINER_ROOT_LV_MOUNT_PATH="/var/lib/containers", then 93 | # container-storage-setup would create a logical volume named 94 | # "container-root-lv" and mount it on "/var/lib/containers". 95 | # This function tests if `container-storage-setup --reset` 96 | # cleans it up properly. 97 | test_reset_container_root_volume 98 | -------------------------------------------------------------------------------- /tests/114-test-use-loop-to-create-thin-pool.sh: -------------------------------------------------------------------------------- 1 | source $SRCDIR/libtest.sh 2 | 3 | # Test DEVS= directive. Returns 0 on success and 1 on failure. 4 | test_devs() { 5 | local lbdevice tmpfile 6 | local test_status=1 7 | local testname=`basename "$0"` 8 | local vg_name="css-test-foo" 9 | local infile=${WORKDIR}/container-storage-setup 10 | local outfile=${WORKDIR}/container-storage 11 | 12 | # Error out if any pre-existing volume group vg named css-test-foo 13 | if vg_exists "$vg_name"; then 14 | echo "ERROR: $testname: Volume group $vg_name already exists." >> $LOGS 15 | return $test_status 16 | fi 17 | 18 | # Create loopback device. 19 | tmpfile=$(mktemp /tmp/c-s-s.XXXXXX) 20 | truncate --size=6G $tmpfile 21 | lbdevice=$(losetup -f) 22 | losetup --partscan $lbdevice $tmpfile 23 | 24 | # Create config file 25 | cat << EOF > $infile 26 | DEVS="$lbdevice" 27 | VG=$vg_name 28 | CONTAINER_THINPOOL=container-thinpool 29 | EOF 30 | 31 | local create_cmd="$CSSBIN create -o $outfile $CSS_TEST_CONFIG $infile" 32 | 33 | # Run container-storage-setup 34 | $create_cmd >> $LOGS 2>&1 35 | 36 | # Test failed. 37 | if [ $? -ne 0 ]; then 38 | echo "ERROR: $testname: $create_cmd failed." >> $LOGS 39 | cleanup $vg_name "$lbdevice" "$infile" "$outfile" 40 | cleanup_loop_device "$tmpfile" "$lbdevice" 41 | return $test_status 42 | fi 43 | 44 | # Make sure volume group $VG got created 45 | if vg_exists "$vg_name"; then 46 | test_status=0 47 | else 48 | echo "ERROR: $testname: $CSSBIN failed. $vg_name was not created." >> $LOGS 49 | fi 50 | 51 | cleanup $vg_name "$lbdevice" "$infile" "$outfile" 52 | cleanup_loop_device "$tmpfile" "$lbdevice" 53 | return $test_status 54 | } 55 | 56 | cleanup_loop_device() { 57 | local tmpfile=$1 58 | local lbdevice=$2 59 | losetup -d $lbdevice 60 | rm $tmpfile > /dev/null 2>&1 61 | } 62 | 63 | test_devs 64 | 65 | -------------------------------------------------------------------------------- /tests/115-test-fail-loop-to-create-thin-pool.sh: -------------------------------------------------------------------------------- 1 | source $SRCDIR/libtest.sh 2 | 3 | test_fail_if_loop_partition_passed() { 4 | local lbdevice tmpfile 5 | local test_status=1 6 | local testname=`basename "$0"` 7 | local vg_name="css-test-foo" 8 | local infile=${WORKDIR}/container-storage-setup 9 | local outfile=${WORKDIR}/container-storage 10 | local tmplog=${WORKDIR}/tmplog 11 | local errmsg="Partition specification unsupported at this time." 12 | 13 | # Error out if any pre-existing volume group vg named css-test-foo 14 | if vg_exists "$vg_name"; then 15 | echo "ERROR: $testname: Volume group $vg_name already exists." >> $LOGS 16 | return $test_status 17 | fi 18 | 19 | # Create loopback device. 20 | tmpfile=$(mktemp /tmp/c-s-s.XXXXXX) 21 | truncate --size=6G $tmpfile 22 | lbdevice=$(losetup -f) 23 | losetup --partscan $lbdevice $tmpfile 24 | if ! create_partition $lbdevice;then 25 | echo "ERROR: Failed partitioning $lbdevice" 26 | cleanup_loop_device "$tmpfile" "$lbdevice" 27 | return $test_status 28 | fi 29 | 30 | cat << EOF > $infile 31 | DEVS="${lbdevice}p1" 32 | VG=$vg_name 33 | CONTAINER_THINPOOL=container-thinpool 34 | EOF 35 | 36 | local create_cmd="$CSSBIN create -o $outfile $CSS_TEST_CONFIG $infile" 37 | # Run container-storage-setup 38 | $create_cmd >> $tmplog 2>&1 39 | rc=$? 40 | cat $tmplog >> $LOGS 2>&1 41 | 42 | # Test failed. 43 | if [ $rc -ne 0 ]; then 44 | if grep --no-messages -q "$errmsg" $tmplog; then 45 | test_status=0 46 | else 47 | echo "ERROR: $testname: $CSSBIN Failed for a reason other then \"$errmsg\"" >> $LOGS 48 | fi 49 | else 50 | echo "ERROR: $testname: $CSSBIN Succeeded. Should have failed since ${lbdevice}p1 is a loop device partition." >> $LOGS 51 | fi 52 | cleanup $vg_name "$lbdevice" "$infile" "$outfile" 53 | cleanup_loop_device "$tmpfile" "$lbdevice" 54 | return $test_status 55 | } 56 | 57 | cleanup_loop_device() { 58 | local tmpfile=$1 59 | local lbdevice=$2 60 | losetup -d $lbdevice 61 | rm $tmpfile > /dev/null 2>&1 62 | } 63 | 64 | # Make sure command fails if loop device partition /dev/loop0p1 is passed. 65 | test_fail_if_loop_partition_passed 66 | 67 | -------------------------------------------------------------------------------- /tests/116-test-storage-driver-nil-create-remove.sh: -------------------------------------------------------------------------------- 1 | source $SRCDIR/libtest.sh 2 | 3 | test_storage_driver_none() { 4 | local test_status=1 5 | local testname=`basename "$0"` 6 | local infile=${WORKDIR}/container-storage-setup 7 | local outfile=${WORKDIR}/container-storage 8 | 9 | cat << EOF > $infile 10 | STORAGE_DRIVER="" 11 | EOF 12 | 13 | # Run container-storage-setup 14 | local create_cmd="$CSSBIN create $CSS_TEST_CONFIG $infile" 15 | local activate_cmd="$CSSBIN activate $CSS_TEST_CONFIG" 16 | local deactivate_cmd="$CSSBIN deactivate $CSS_TEST_CONFIG" 17 | local remove_cmd="$CSSBIN remove $CSS_TEST_CONFIG" 18 | local list_cmd="$CSSBIN list $CSS_TEST_CONFIG" 19 | 20 | $create_cmd >> $LOGS 2>&1 21 | 22 | # Test failed. 23 | if [ $? -ne 0 ]; then 24 | echo "ERROR: $testname: $create_cmd failed." >> $LOGS 25 | cleanup_all $infile $outfile 26 | return $test_status 27 | fi 28 | 29 | # Deactivate and Activate 30 | $deactivate_cmd >> $LOGS 2>&1 31 | if [ $? -ne 0 ]; then 32 | echo "ERROR: $testname: $deactivate_cmd failed." >> $LOGS 33 | cleanup_all $infile $outfile 34 | return $test_status 35 | fi 36 | 37 | $activate_cmd >> $LOGS 2>&1 38 | if [ $? -ne 0 ]; then 39 | echo "ERROR: $testname: $activate_cmd failed." >> $LOGS 40 | cleanup_all $infile $outfile 41 | return $test_status 42 | fi 43 | 44 | $remove_cmd >> $LOGS 2>&1 45 | # Test failed. 46 | if [ $? -ne 0 ]; then 47 | echo "ERROR: $testname: $remove_cmd failed." >> $LOGS 48 | cleanup_all $infile $outfile 49 | return $test_status 50 | fi 51 | 52 | # Make sure config is gone. List config command should fail. 53 | $list_cmd >> $LOGS 2>&1 54 | if [ $? -eq 0 ]; then 55 | echo "ERROR: $testname: Storage configuration $CSS_TEST_CONFIG is present even after removal." >> $LOGS 56 | cleanup_all $infile $outfile 57 | return $test_status 58 | fi 59 | 60 | cleanup_all "$infile" "$outfile" 61 | return 0 62 | } 63 | 64 | cleanup_all(){ 65 | local infile=$1 66 | local outfile=$2 67 | 68 | rm -f "$infile" "$outfile" 69 | rm -rf "$CSS_METADATA_DIR" 70 | } 71 | 72 | 73 | # Make sure STORAGE_DRIVER="" works with all commands. 74 | test_storage_driver_none 75 | -------------------------------------------------------------------------------- /tests/117-test-devmapper-activation-deactivation.sh: -------------------------------------------------------------------------------- 1 | source $SRCDIR/libtest.sh 2 | 3 | test_devmapper_deactivation_activation() { 4 | local devs=$TEST_DEVS 5 | local test_status=1 6 | local testname=`basename "$0"` 7 | local vg_name="css-test-foo" 8 | local infile=${WORKDIR}/container-storage-setup 9 | local outfile=${WORKDIR}/container-storage 10 | 11 | # Error out if any pre-existing volume group vg named css-test-foo 12 | if vg_exists "$vg_name"; then 13 | echo "ERROR: $testname: Volume group $vg_name already exists." >> $LOGS 14 | return $test_status 15 | fi 16 | 17 | # Create config file 18 | cat << EOF > $infile 19 | DEVS="$devs" 20 | VG=$vg_name 21 | CONTAINER_THINPOOL=container-thinpool 22 | EOF 23 | 24 | local create_cmd="$CSSBIN create -o $outfile $CSS_TEST_CONFIG $infile" 25 | local deactivate_cmd="$CSSBIN deactivate $CSS_TEST_CONFIG" 26 | local activate_cmd="$CSSBIN activate $CSS_TEST_CONFIG" 27 | local list_cmd="$CSSBIN list $CSS_TEST_CONFIG" 28 | local config_status 29 | 30 | # Run container-storage-setup 31 | $create_cmd >> $LOGS 2>&1 32 | 33 | # Test failed. 34 | if [ $? -ne 0 ]; then 35 | echo "ERROR: $testname: $create_cmd failed." >> $LOGS 36 | cleanup $vg_name "$devs" "$infile" "$outfile" 37 | return $test_status 38 | fi 39 | 40 | # Make sure volume group $VG got created 41 | if ! vg_exists "$vg_name"; then 42 | echo "ERROR: $testname: create operation failed. Volume group $vg_name was not created." >> $LOGS 43 | cleanup $vg_name "$devs" "$infile" "$outfile" 44 | return $test_status 45 | fi 46 | 47 | # Make sure thinpool got created 48 | if ! lv_exists "$vg_name" "container-thinpool";then 49 | echo "ERROR: $testname: create operation failed. Thinpool container-thinpool was not created." >> $LOGS 50 | cleanup $vg_name "$devs" "$infile" "$outfile" 51 | return $test_status 52 | fi 53 | 54 | # Deactivate storage config 55 | if ! $deactivate_cmd >> $LOGS 2>&1; then 56 | echo "ERROR: $testname: $deactivate_cmd failed." >> $LOGS 57 | cleanup $vg_name "$devs" "$infile" "$outfile" 58 | return $test_status 59 | fi 60 | 61 | # Make sure config state changed to inactive. 62 | config_status=`$list_cmd | grep "Status:" | cut -d " " -f2` 63 | if [ "$config_status" != "inactive" ]; then 64 | echo "error: $testname: configuration status is $config_status. It should be inactive after deactivation." >> $LOGS 65 | cleanup $vg_name "$devs" "$infile" "$outfile" 66 | return $test_status 67 | fi 68 | 69 | 70 | # Make sure thinpool lv got deactivated. 71 | if lv_is_active "$vg_name" "container-thinpool";then 72 | echo "ERROR: $testname: deactivate operation failed. Volume $vg_name/container-thinpool is still active" >> $LOGS 73 | cleanup $vg_name "$devs" "$infile" "$outfile" 74 | return $test_status 75 | fi 76 | 77 | # Activate storage config 78 | if ! $activate_cmd >> $LOGS 2>&1; then 79 | echo "ERROR: $testname: $activate_cmd failed." >> $LOGS 80 | cleanup $vg_name "$devs" "$infile" "$outfile" 81 | return $test_status 82 | fi 83 | 84 | # Make sure config state changed to active. 85 | config_status=`$list_cmd | grep "Status:" | cut -d " " -f2` 86 | if [ "$config_status" != "active" ]; then 87 | echo "error: $testname: configuration status is $config_status. It should be active after activation." >> $LOGS 88 | cleanup $vg_name "$devs" "$infile" "$outfile" 89 | return $test_status 90 | fi 91 | 92 | # Make sure thinpool lv got activated. 93 | if ! lv_is_active "$vg_name" "container-thinpool";then 94 | echo "ERROR: $testname: activate operation failed. Volume $vg_name/container-thinpool is not active" >> $LOGS 95 | cleanup $vg_name "$devs" "$infile" "$outfile" 96 | return $test_status 97 | fi 98 | 99 | test_status=0 100 | cleanup $vg_name "$devs" "$infile" "$outfile" 101 | return $test_status 102 | } 103 | 104 | # Test if deactivation/activation works with storage driver devmapper 105 | test_devmapper_deactivation_activation 106 | -------------------------------------------------------------------------------- /tests/118-test-container-root-lv-activation-deactivation.sh: -------------------------------------------------------------------------------- 1 | source $SRCDIR/libtest.sh 2 | 3 | test_container_root_volume_activation_deactivation() { 4 | local devs=$TEST_DEVS 5 | local test_status=1 6 | local testname=`basename "$0"` 7 | local vg_name="css-test-foo" 8 | local root_lv_name="container-root-lv" 9 | local root_lv_mount_path="/var/lib/containers" 10 | local infile=${WORKDIR}/container-storage-setup 11 | local outfile=${WORKDIR}/container-storage 12 | 13 | # Error out if any pre-existing volume group vg named css-test-foo 14 | if vg_exists "$vg_name"; then 15 | echo "ERROR: $testname: Volume group $vg_name already exists." >> $LOGS 16 | return $test_status 17 | fi 18 | 19 | # Create config file 20 | cat << EOF > $infile 21 | DEVS="$devs" 22 | VG=$vg_name 23 | CONTAINER_ROOT_LV_NAME=$root_lv_name 24 | CONTAINER_ROOT_LV_MOUNT_PATH=$root_lv_mount_path 25 | CONTAINER_THINPOOL=container-thinpool 26 | EOF 27 | 28 | # Run container-storage-setup 29 | local create_cmd="$CSSBIN create -o $outfile $CSS_TEST_CONFIG $infile" 30 | local deactivate_cmd="$CSSBIN deactivate $CSS_TEST_CONFIG" 31 | local activate_cmd="$CSSBIN activate $CSS_TEST_CONFIG" 32 | local list_cmd="$CSSBIN list $CSS_TEST_CONFIG" 33 | local config_status 34 | 35 | $create_cmd >> $LOGS 2>&1 36 | 37 | # Test failed. 38 | if [ $? -ne 0 ]; then 39 | echo "ERROR: $testname: $create_cmd failed." >> $LOGS 40 | cleanup_all $vg_name $root_lv_name $root_lv_mount_path "$devs" $infile $outfile 41 | return $test_status 42 | fi 43 | 44 | # Make sure $CONTAINER_ROOT_LV_NAME {container-root-lv} got created 45 | # successfully. 46 | if ! lv_exists "$vg_name" "$root_lv_name"; then 47 | echo "ERROR: $testname: Logical Volume $root_lv_name does not exist." >> $LOGS 48 | cleanup_all $vg_name $root_lv_name $root_lv_mount_path "$devs" $infile $outfile 49 | return $test_status 50 | fi 51 | 52 | # Make sure $CONTAINER_ROOT_LV_NAME {container-root-lv} is 53 | # mounted on $CONTAINER_ROOT_LV_MOUNT_PATH {/var/lib/containers} 54 | local mnt 55 | mnt=$(findmnt -n -o TARGET --first-only --source /dev/${vg_name}/${root_lv_name}) 56 | if [ "$mnt" != "$root_lv_mount_path" ];then 57 | echo "ERROR: $testname: Logical Volume $root_lv_name is not mounted on $root_lv_mount_path." >> $LOGS 58 | cleanup_all $vg_name $root_lv_name $root_lv_mount_path "$devs" $infile $outfile 59 | return $test_status 60 | fi 61 | 62 | # Deactivate configuration 63 | $deactivate_cmd >> $LOGS 2>&1 64 | 65 | # Test failed. 66 | if [ $? -ne 0 ]; then 67 | echo "ERROR: $testname: $deactivate_cmd failed." >> $LOGS 68 | cleanup_all $vg_name $root_lv_name $root_lv_mount_path "$devs" $infile $outfile 69 | return $test_status 70 | fi 71 | 72 | # Make sure configuration status changed to "invactive" 73 | config_status=`$list_cmd | grep "Status:" | cut -d " " -f2` 74 | if [ "$config_status" != "inactive" ];then 75 | echo "error: $testname: configuration status is $config_status. It should be inactive after deactivation." >> $LOGS 76 | cleanup_all $vg_name $root_lv_name $root_lv_mount_path "$devs" $infile $outfile 77 | return $test_status 78 | fi 79 | 80 | # Make sure mount path got unmounted and volume got deactivated. 81 | mnt=$(findmnt -n -o TARGET --first-only --source /dev/${vg_name}/${root_lv_name}) 82 | if [ -n "$mnt" ];then 83 | echo "error: $testname: logical volume $root_lv_name is still mounted at $mnt after deactivation." >> $LOGS 84 | cleanup_all $vg_name $root_lv_name $root_lv_mount_path "$devs" $infile $outfile 85 | return $test_status 86 | fi 87 | 88 | if lv_is_active $vg_name $root_lv_name; then 89 | echo "ERROR: $testname: Logical Volume $root_lv_name is still active after deactivation." >> $LOGS 90 | cleanup_all $vg_name $root_lv_name $root_lv_mount_path "$devs" $infile $outfile 91 | return $test_status 92 | fi 93 | 94 | # Activate configuration 95 | $activate_cmd >> $LOGS 2>&1 96 | 97 | # Test failed. 98 | if [ $? -ne 0 ]; then 99 | echo "ERROR: $testname: $activate_cmd failed." >> $LOGS 100 | cleanup_all $vg_name $root_lv_name $root_lv_mount_path "$devs" $infile $outfile 101 | return $test_status 102 | fi 103 | 104 | # Make sure configuration status changed to "active" 105 | config_status=`$list_cmd | grep "Status:" | cut -d " " -f2` 106 | if [ "$config_status" != "active" ];then 107 | echo "error: $testname: configuration status is $config_status. It should be active after activation." >> $LOGS 108 | cleanup_all $vg_name $root_lv_name $root_lv_mount_path "$devs" $infile $outfile 109 | return $test_status 110 | fi 111 | 112 | if ! lv_is_active $vg_name $root_lv_name; then 113 | echo "ERROR: $testname: Logical Volume $root_lv_name is not active after activation." >> $LOGS 114 | cleanup_all $vg_name $root_lv_name $root_lv_mount_path "$devs" $infile $outfile 115 | return $test_status 116 | fi 117 | 118 | mnt=$(findmnt -n -o TARGET --first-only --source /dev/${vg_name}/${root_lv_name}) 119 | if [ "$mnt" != "$root_lv_mount_path" ];then 120 | echo "ERROR: $testname: Logical Volume $root_lv_name is not mounted on $root_lv_mount_path." >> $LOGS 121 | cleanup_all $vg_name $root_lv_name $root_lv_mount_path "$devs" $infile $outfile 122 | return $test_status 123 | fi 124 | 125 | cleanup_all $vg_name $root_lv_name $root_lv_mount_path "$devs" $infile $outfile 126 | return 0 127 | } 128 | 129 | cleanup_all(){ 130 | local vg_name=$1 131 | local lv_name=$2 132 | local mount_path=$3 133 | local devs=$4 134 | local infile=$5 135 | local outfile=$6 136 | 137 | umount $mount_path >> $LOGS 2>&1 138 | lvchange -an $vg_name/${lv_name} >> $LOGS 2>&1 139 | lvremove $vg_name/${lv_name} >> $LOGS 2>&1 140 | 141 | cleanup_mount_file $mount_path 142 | cleanup $vg_name "$devs" "$infile" "$outfile" 143 | } 144 | 145 | # Test if activation/deactivation works with container root lv configuration 146 | test_container_root_volume_activation_deactivation 147 | -------------------------------------------------------------------------------- /tests/119-test-gpt-partition-table-creation-2tb.sh: -------------------------------------------------------------------------------- 1 | source $SRCDIR/libtest.sh 2 | 3 | # Test GPT partition table gets created for disks bigger than 2TB. 4 | test_gpt() { 5 | local lbdevice tmpfile 6 | local test_status=1 7 | local testname=`basename "$0"` 8 | local vg_name="css-test-foo" 9 | local infile=${WORKDIR}/container-storage-setup 10 | local outfile=${WORKDIR}/container-storage 11 | 12 | # Error out if any pre-existing volume group vg named css-test-foo 13 | if vg_exists "$vg_name"; then 14 | echo "ERROR: $testname: Volume group $vg_name already exists." >> $LOGS 15 | return $test_status 16 | fi 17 | 18 | # Create loopback device. 19 | tmpfile=$(mktemp /tmp/c-s-s.XXXXXX) 20 | truncate --size=3T $tmpfile 21 | lbdevice=$(losetup -f) 22 | losetup --partscan $lbdevice $tmpfile 23 | 24 | # Create config file 25 | cat << EOF > $infile 26 | DEVS="$lbdevice" 27 | VG=$vg_name 28 | CONTAINER_THINPOOL=container-thinpool 29 | EOF 30 | 31 | local create_cmd="$CSSBIN create -o $outfile $CSS_TEST_CONFIG $infile" 32 | 33 | # Run container-storage-setup 34 | $create_cmd >> $LOGS 2>&1 35 | 36 | # Test failed. 37 | if [ $? -ne 0 ]; then 38 | echo "ERROR: $testname: $create_cmd failed." >> $LOGS 39 | cleanup $vg_name "$lbdevice" "$infile" "$outfile" 40 | cleanup_loop_device "$tmpfile" "$lbdevice" 41 | return $test_status 42 | fi 43 | 44 | # Make sure volume group $VG got created 45 | if ! vg_exists "$vg_name"; then 46 | echo "ERROR: $testname: $CSSBIN failed. $vg_name was not created." >> $LOGS 47 | fi 48 | 49 | # Make sure partition table type is gpt. 50 | part_type=`fdisk -l "${lbdevice}" | grep "label type" | cut -d ":" -f2` 51 | # Get rid of blanks 52 | part_type=${part_type/ /} 53 | 54 | if [ "$part_type" == "gpt" ]; then 55 | test_status=0 56 | else 57 | echo "ERROR: $testname: $CSSBIN failed. Partition table type created is "$part_type", expected gpt." >> $LOGS 58 | fi 59 | 60 | cleanup $vg_name "$lbdevice" "$infile" "$outfile" 61 | cleanup_loop_device "$tmpfile" "$lbdevice" 62 | return $test_status 63 | } 64 | 65 | cleanup_loop_device() { 66 | local tmpfile=$1 67 | local lbdevice=$2 68 | losetup -d $lbdevice 69 | rm $tmpfile > /dev/null 2>&1 70 | } 71 | 72 | test_gpt 73 | 74 | -------------------------------------------------------------------------------- /tests/README: -------------------------------------------------------------------------------- 1 | Configure a disk for test 2 | ========================= 3 | - Add following line to file css-test-config 4 | 5 | DEVS=/dev/sdb 6 | 7 | This tells test infrastructure to use disk /dev/sdb for tests. This disk 8 | should be unused. 9 | 10 | Run Tests 11 | ========= 12 | 13 | Running individual tests 14 | # ./run-tests.sh 001-test-use-devs-to-create-thin-pool.sh 004-test-non-absolute-disk-name-support.sh 15 | 16 | Run all tests 17 | # ./run-tests.sh 18 | 19 | By default /usr/bin/container-storage-setup is used for testing. For using a 20 | different binary, define environment variable CONTAINER_STORAGE_SETUP. 21 | 22 | CONTAINER_STORAGE_SETUP=../container-storage-setup.sh ./run-tests.sh 23 | 24 | NOTES: 25 | All tests between 0 and 100 are there for compatibility with docker. These 26 | tests cover backwards compatability. Going forward we would prefer docker as 27 | well as other container runtimes specify the input and ouptut files. Anything 28 | beyond 100, are generic tests for container runtime storage. 29 | -------------------------------------------------------------------------------- /tests/css-test-config: -------------------------------------------------------------------------------- 1 | # This is config file used by css tests. 2 | # DEVS specifies the device path which should be used to run tests. Ex. /dev/sdb 3 | #DEVS=/dev/sdb 4 | -------------------------------------------------------------------------------- /tests/libtest.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | create_partition_sfdisk(){ 4 | local dev="$1" size 5 | # Use a single partition of a whole device 6 | # TODO: 7 | # * Consider gpt, or unpartitioned volumes 8 | # * Error handling when partition(s) already exist 9 | # * Deal with loop/nbd device names. See growpart code 10 | size=$(( $( awk "\$4 ~ /"$( basename $dev )"/ { print \$3 }" /proc/partitions ) * 2 - 2048 )) 11 | cat < /dev/null 2>&1 && return 0 58 | return 1 59 | } 60 | 61 | # Tests if the logical volume lv_name is active 62 | lv_is_active() { 63 | local vg_name=$1 64 | local lv_name=$2 65 | local lv_attr is_active 66 | 67 | lv_attr=$(lvs -o lv_attr --no-headings $vg_name/$lv_name) 68 | lv_attr=${lv_attr# } 69 | is_active=${lv_attr:4:1} 70 | 71 | [ "$is_active" == "a" ] && return 0 72 | return 1 73 | } 74 | 75 | remove_pvs() { 76 | local dev devs=$1 pv 77 | for dev in $devs; do 78 | pv=$(lsblk -npl -o NAME "$dev" | tail -n +2 | head -1) 79 | pvremove -y ${pv} >> $LOGS 2>&1 80 | done 81 | } 82 | 83 | parted_del_partition() { 84 | local dev=$1 85 | parted ${dev} rm 1 >> $LOGS 2>&1 86 | } 87 | 88 | sfdisk_del_partition() { 89 | local dev=$1 90 | sfdisk --delete ${dev} 1 >> $LOGS 2>&1 91 | } 92 | 93 | remove_partitions() { 94 | local dev devs=$1 95 | local use_parted=false 96 | 97 | if [ -x "/usr/sbin/parted" ]; then 98 | use_parted=true 99 | fi 100 | 101 | for dev in $devs; do 102 | if [ "$use_parted" == "true" ]; then 103 | parted_del_partition "$dev" 104 | else 105 | sfdisk_del_partition "$dev" 106 | fi 107 | done 108 | } 109 | 110 | # Wipe all signatures on devices 111 | wipe_signatures() { 112 | local dev devs=$1 113 | for dev in $devs; do 114 | wipefs -f -a $dev >> $LOGS 2>&1 115 | done 116 | } 117 | 118 | cleanup() { 119 | local vg_name=$1 120 | local devs=$2 121 | local infile=/etc/sysconfig/docker-storage-setup 122 | local outfile=/etc/sysconfig/docker-storage 123 | if [ $# -eq 4 ]; then 124 | infile=$3 125 | outfile=$4 126 | fi 127 | 128 | 129 | vgremove -y $vg_name >> $LOGS 2>&1 130 | remove_pvs "$devs" 131 | remove_partitions "$devs" 132 | # After removing partitions let udev settle down. In some 133 | # cases it has been observed that udev rule kept the device 134 | # busy. 135 | udevadm settle 136 | rm -f $infile $outfile 137 | wipe_signatures "$devs" 138 | [ -d "$CSS_METADATA_DIR" ] && rm -rf "$CSS_METADATA_DIR" 139 | } 140 | 141 | cleanup_mount_file() { 142 | local mount_path=$1 143 | local mount_filename=$(echo $mount_path|sed 's/\//-/g'|cut -c 2-) 144 | 145 | if [ -f "/etc/systemd/system/$mount_filename.mount" ];then 146 | systemctl disable $mount_filename.mount >/dev/null 2>&1 147 | rm /etc/systemd/system/$mount_filename.mount >/dev/null 2>&1 148 | systemctl daemon-reload 149 | fi 150 | } 151 | 152 | cleanup_soft_links() { 153 | local dev devs=$1 154 | 155 | for dev in $devs; do 156 | rm $dev 157 | done 158 | } 159 | -------------------------------------------------------------------------------- /tests/run-tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export WORKDIR=$(pwd)/temp/ 4 | METADATA_DIR=/var/lib/docker 5 | export CSSBIN="/usr/bin/container-storage-setup" 6 | export LOGS=$WORKDIR/logs.txt 7 | export CSS_TEST_CONFIG="css-test-storage-config" 8 | export CSS_METADATA_DIR="/var/lib/container-storage-setup" 9 | 10 | # Keeps track of overall pass/failure status of tests. Even if single test 11 | # fails, PASS_STATUS will be set to 1 and returned to caller when all 12 | # tests have run. 13 | PASS_STATUS=0 14 | 15 | #Helper functions 16 | 17 | # Take care of active docker and old docker metadata 18 | check_docker_active() { 19 | if systemctl -q is-active "docker.service"; then 20 | echo "ERROR: docker.service is currently active. Please stop docker.service before running tests." >&2 21 | exit 1 22 | fi 23 | } 24 | 25 | # Check metadata if using devmapper 26 | check_metadata() { 27 | local devmapper_meta_dir="$METADATA_DIR/devicemapper/metadata/" 28 | 29 | [ ! -d "$devmapper_meta_dir" ] && return 0 30 | 31 | echo "ERROR: ${METADATA_DIR} directory exists and contains old metadata. Remove it." >&2 32 | exit 1 33 | } 34 | 35 | setup_workdir() { 36 | mkdir -p $WORKDIR 37 | rm -f $LOGS 38 | } 39 | 40 | # If config file is present, error out 41 | check_config_files() { 42 | if [ -f /etc/sysconfig/docker-storage-setup ];then 43 | echo "ERROR: /etc/sysconfig/docker-storage-setup already exists. Remove it." >&2 44 | exit 1 45 | fi 46 | 47 | if [ -f /etc/sysconfig/docker-storage ];then 48 | echo "ERROR: /etc/sysconfig/docker-storage already exists. Remove it." >&2 49 | exit 1 50 | fi 51 | } 52 | 53 | setup_css_binary() { 54 | # One can setup environment variable CONTAINER_STORAGE_SETUP to override 55 | # which binary is used for tests. 56 | if [ -z "$CONTAINER_STORAGE_SETUP" -a -n "$DOCKER_STORAGE_SETUP" ];then 57 | CONTAINER_STORAGE_SETUP=$DOCKER_STORAGE_SETUP 58 | fi 59 | if [ -n "$CONTAINER_STORAGE_SETUP" ];then 60 | if [ ! -f "$CONTAINER_STORAGE_SETUP" ];then 61 | echo "Error: Executable $CONTAINER_STORAGE_SETUP does not exist" 62 | exit 1 63 | fi 64 | 65 | if [ ! -x "$CONTAINER_STORAGE_SETUP" ];then 66 | echo "Error: Executable $CONTAINER_STORAGE_SETUP does not have execute permissions." 67 | exit 1 68 | fi 69 | CSSBIN=$CONTAINER_STORAGE_SETUP 70 | fi 71 | echo "INFO: Using $CSSBIN for running tests." 72 | } 73 | 74 | # If disk already has signatures, error out. It should be a clean disk. 75 | check_disk_signatures() { 76 | local bdev=$1 77 | local sig 78 | 79 | if ! sig=$(wipefs -p $bdev); then 80 | echo "ERROR: Failed to check signatures on device $bdev" >&2 81 | exit 1 82 | fi 83 | 84 | [ "$sig" == "" ] && return 0 85 | 86 | while IFS=, read offset uuid label type; do 87 | [ "$offset" == "# offset" ] && continue 88 | 89 | echo "ERROR: Found $type signature on device ${bdev} at offset ${offset}. Wipe signatures using wipefs and retry." 90 | exit 1 91 | done <<< "$sig" 92 | } 93 | 94 | check_css_metadata() { 95 | [ ! -d "$CSS_METADATA_DIR" ] && return 0 96 | [ ! "$(ls -A $CSS_METADATA_DIR)" ] && return 0 97 | echo "ERROR: Dir $CSS_METADATA_DIR is not empty" 98 | exit 1 99 | } 100 | 101 | #Tests 102 | 103 | check_block_devs() { 104 | local devs=$1 105 | 106 | if [ -z "$devs" ];then 107 | echo "ERROR: A block device need to be specified for testing in css-test-config file." 108 | exit 1 109 | fi 110 | 111 | for dev in $devs; do 112 | if [ ! -b $dev ];then 113 | echo "ERROR: $dev is not a valid block device." 114 | exit 1 115 | fi 116 | 117 | # Make sure device is not a partition. 118 | if [[ $dev =~ .*[0-9]$ ]]; then 119 | echo "ERROR: Partition specification unsupported at this time." 120 | exit 1 121 | fi 122 | 123 | check_disk_signatures $dev 124 | done 125 | } 126 | 127 | run_test () { 128 | testfile=$1 129 | 130 | echo "Running test $testfile" >> $LOGS 2>&1 131 | bash -c $testfile 132 | 133 | if [ $? -eq 0 ];then 134 | echo "PASS: $(basename $testfile)" 135 | else 136 | echo "FAIL: $(basename $testfile)" 137 | PASS_STATUS=1 138 | fi 139 | } 140 | 141 | run_tests() { 142 | if [ $# -gt 0 ]; then 143 | local files=$@ 144 | else 145 | local files="$SRCDIR/[0-9][0-9][0-9]-test-*" 146 | fi 147 | for t in $files;do 148 | run_test ./$t 149 | done 150 | } 151 | 152 | #Main Script 153 | 154 | # Source config file 155 | export SRCDIR=`dirname $0` 156 | if [ -e $SRCDIR/css-test-config ]; then 157 | source $SRCDIR/css-test-config 158 | # DEVS is used by css as well. So exporting this can fail any tests which 159 | # don't want to use DEVS. So export TEST_DEVS instead. 160 | TEST_DEVS=$DEVS 161 | export TEST_DEVS 162 | fi 163 | 164 | source $SRCDIR/libtest.sh 165 | 166 | usage() { 167 | cat <<-FOE 168 | Usage: $1 [OPTIONS] [ test1, test2, ... ] 169 | 170 | Run Container Storage tests 171 | 172 | If you specify no tests to run, all tests will run. 173 | 174 | Options: 175 | --help Print help message 176 | FOE 177 | } 178 | 179 | if [ $# -gt 0 ]; then 180 | if [ "$1" == "--help" ]; then 181 | usage $(basename $0) 182 | exit 0 183 | fi 184 | fi 185 | 186 | check_docker_active 187 | check_metadata 188 | check_config_files 189 | check_css_metadata 190 | setup_workdir 191 | setup_css_binary 192 | check_block_devs "$DEVS" 193 | run_tests $@ 194 | exit $PASS_STATUS 195 | --------------------------------------------------------------------------------