├── .gitignore
├── .gitreview
├── .stestr.conf
├── .zuul.yaml
├── CONTRIBUTING.rst
├── HACKING.rst
├── LICENSE
├── README.rst
├── babel.cfg
├── cinderlib
├── __init__.py
├── cinderlib.py
├── exception.py
├── nos_brick.py
├── objects.py
├── persistence
│ ├── __init__.py
│ ├── base.py
│ ├── dbms.py
│ └── memory.py
├── serialization.py
├── tests
│ ├── __init__.py
│ ├── functional
│ │ ├── __init__.py
│ │ ├── base_tests.py
│ │ ├── ceph.yaml
│ │ ├── cinder_to_yaml.py
│ │ ├── lvm.yaml
│ │ └── test_basic.py
│ └── unit
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── nos_brick.py
│ │ ├── objects
│ │ ├── __init__.py
│ │ ├── test_connection.py
│ │ ├── test_snapshot.py
│ │ └── test_volume.py
│ │ ├── persistence
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── test_dbms.py
│ │ └── test_memory.py
│ │ ├── test_cinderlib.py
│ │ └── utils.py
├── utils.py
└── workarounds.py
├── devstack
├── README.md
├── override-defaults
├── plugin.sh
└── settings
├── doc
├── .gitignore
├── requirements.txt
└── source
│ ├── Makefile
│ ├── _extra
│ └── .placeholder
│ ├── _static
│ └── .placeholder
│ ├── conf.py
│ ├── contributing.rst
│ ├── index.rst
│ ├── installation.rst
│ ├── limitations.rst
│ ├── make.bat
│ ├── topics
│ ├── backends.rst
│ ├── connections.rst
│ ├── initialization.rst
│ ├── metadata.rst
│ ├── serialization.rst
│ ├── snapshots.rst
│ ├── tracking.rst
│ └── volumes.rst
│ └── usage.rst
├── lower-constraints.txt
├── playbooks
├── cinder-gate-run.yaml
├── setup-ceph.yaml
└── setup-lvm.yaml
├── releasenotes
├── notes
│ └── cinderlib-a458b8e23b6d35f4.yaml
└── source
│ ├── conf.py
│ ├── index.rst
│ └── unreleased.rst
├── requirements.txt
├── setup.cfg
├── setup.py
├── test-requirements.txt
├── tools
├── cinder-cfg-to-python.py
├── coding-checks.sh
├── fast8.sh
├── lvm-prepare.sh
└── virtualenv-sudo.sh
└── tox.ini
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | .*
3 | !.gitignore
4 | !.testr.conf
5 | !.stestr.conf
6 | !.zuul.yaml
7 | !.travis.yml
8 | .*.sw?
9 | __pycache__/
10 | *.py[cod]
11 | *$py.class
12 |
13 | # C extensions
14 | *.so
15 |
16 | # Distribution / packaging
17 | .Python
18 | env/
19 | build/
20 | develop-eggs/
21 | dist/
22 | downloads/
23 | eggs/
24 | .eggs/
25 | lib/
26 | lib64/
27 | parts/
28 | sdist/
29 | var/
30 | *.egg-info/
31 | .installed.cfg
32 | *.egg
33 |
34 | # PyInstaller
35 | # Usually these files are written by a python script from a template
36 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
37 | *.manifest
38 | *.spec
39 |
40 | # Installer logs
41 | pip-log.txt
42 | pip-delete-this-directory.txt
43 |
44 | # Unit test / coverage reports
45 | htmlcov/
46 | .tox/
47 | .coverage
48 | .coverage.*
49 | .cache
50 | nosetests.xml
51 | coverage.xml
52 | *,cover
53 | cover/
54 | .hypothesis/
55 |
56 | # Translations
57 | *.mo
58 | *.pot
59 |
60 | # Django stuff:
61 | *.log
62 |
63 | # PyBuilder
64 | target/
65 |
66 | # pyenv python configuration file
67 | .python-version
68 |
69 | # Temp directory, for example for the LVM file, our custom config, etc.
70 | temp/
71 |
72 | cinder-lioadm
73 |
--------------------------------------------------------------------------------
/.gitreview:
--------------------------------------------------------------------------------
1 | [gerrit]
2 | host=review.openstack.org
3 | port=29418
4 | project=openstack/cinderlib.git
5 |
--------------------------------------------------------------------------------
/.stestr.conf:
--------------------------------------------------------------------------------
1 | [DEFAULT]
2 | test_path=${OS_TEST_PATH:-./cinderlib/tests/unit}
3 | top_dir=./
4 |
--------------------------------------------------------------------------------
/.zuul.yaml:
--------------------------------------------------------------------------------
1 | - project:
2 | templates:
3 | - publish-to-pypi
4 | # PEP8 + PY27 + Post branch-tarball
5 | - openstack-python-jobs
6 | - openstack-python36-jobs
7 | - publish-openstack-docs-pti
8 | - release-notes-jobs-python3
9 | check:
10 | queue: integrated
11 | jobs:
12 | - cinderlib-lvm-functional
13 | - cinderlib-ceph-functional
14 | gate:
15 | queue: integrated
16 | jobs:
17 | - cinderlib-lvm-functional
18 | - cinderlib-ceph-functional
19 |
20 | - job:
21 | name: cinderlib-lvm-functional
22 | parent: openstack-tox-functional-with-sudo
23 | pre-run: playbooks/setup-lvm.yaml
24 | nodeset: centos-7
25 |
26 | - job:
27 | name: cinderlib-ceph-functional
28 | parent: openstack-tox-functional-with-sudo
29 | pre-run: playbooks/setup-ceph.yaml
30 | nodeset: centos-7
31 | vars:
32 | tox_environment:
33 | CL_FTEST_CFG: "cinderlib/tests/functional/ceph.yaml"
34 | CL_FTEST_ROOT_HELPER: sudo
35 | # These come from great-great-grandparent tox job
36 | NOSE_WITH_HTML_OUTPUT: 1
37 | NOSE_HTML_OUT_FILE: nose_results.html
38 | NOSE_WITH_XUNIT: 1
39 |
--------------------------------------------------------------------------------
/CONTRIBUTING.rst:
--------------------------------------------------------------------------------
1 | If you would like to contribute to the development of OpenStack,
2 | you must follow the steps in this page:
3 |
4 | https://docs.openstack.org/infra/manual/developers.html
5 |
6 | Once those steps have been completed, changes to OpenStack
7 | should be submitted for review via the Gerrit tool, following
8 | the workflow documented at:
9 |
10 | https://docs.openstack.org/infra/manual/developers.html#development-workflow
11 |
12 | Pull requests submitted through GitHub will be ignored.
13 |
14 | Bugs should be filed as stories on StoryBoard, not in GitHub's issue tracker:
15 |
16 | https://storyboard.openstack.org/#!/project/openstack/cinderlib
17 |
--------------------------------------------------------------------------------
/HACKING.rst:
--------------------------------------------------------------------------------
1 | Cinderlib Style Commandments
2 | ============================
3 |
4 | - Step 1: Read the OpenStack Style Commandments
5 | https://docs.openstack.org/hacking/latest/
6 | - Step 2: Read on
7 |
8 | Cinder Specific Commandments
9 | ----------------------------
10 | - [N314] Check for vi editor configuration in source files.
11 | - [N322] Ensure default arguments are not mutable.
12 | - [N323] Add check for explicit import of _() to ensure proper translation.
13 | - [N325] str() and unicode() cannot be used on an exception. Remove or use six.text_type().
14 | - [N336] Must use a dict comprehension instead of a dict constructor with a sequence of key-value pairs.
15 | - [C301] timeutils.utcnow() from oslo_utils should be used instead of datetime.now().
16 | - [C302] six.text_type should be used instead of unicode.
17 | - [C303] Ensure that there are no 'print()' statements in code that is being committed.
18 | - [C304] Enforce no use of LOG.audit messages. LOG.info should be used instead.
19 | - [C305] Prevent use of deprecated contextlib.nested.
20 | - [C306] timeutils.strtime() must not be used (deprecated).
21 | - [C307] LOG.warn is deprecated. Enforce use of LOG.warning.
22 | - [C308] timeutils.isotime() must not be used (deprecated).
23 | - [C309] Unit tests should not perform logging.
24 | - [C310] Check for improper use of logging format arguments.
25 | - [C311] Check for proper naming and usage in option registration.
26 | - [C312] Validate that logs are not translated.
27 | - [C313] Check that assertTrue(value) is used and not assertEqual(True, value).
28 |
29 | General
30 | -------
31 | - Use 'raise' instead of 'raise e' to preserve original traceback or exception being reraised::
32 |
33 | except Exception as e:
34 | ...
35 | raise e # BAD
36 |
37 | except Exception:
38 | ...
39 | raise # OKAY
40 |
41 |
42 |
43 | Creating Unit Tests
44 | -------------------
45 | For every new feature, unit tests should be created that both test and
46 | (implicitly) document the usage of said feature. If submitting a patch for a
47 | bug that had no unit test, a new passing unit test should be added. If a
48 | submitted bug fix does have a unit test, be sure to add a new one that fails
49 | without the patch and passes with the patch.
50 |
51 | For more information on creating unit tests and utilizing the testing
52 | infrastructure in OpenStack Cinder, please see
53 | https://docs.openstack.org/cinder/latest/contributor/testing.html
54 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 |
2 | Apache License
3 | Version 2.0, January 2004
4 | http://www.apache.org/licenses/
5 |
6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7 |
8 | 1. Definitions.
9 |
10 | "License" shall mean the terms and conditions for use, reproduction,
11 | and distribution as defined by Sections 1 through 9 of this document.
12 |
13 | "Licensor" shall mean the copyright owner or entity authorized by
14 | the copyright owner that is granting the License.
15 |
16 | "Legal Entity" shall mean the union of the acting entity and all
17 | other entities that control, are controlled by, or are under common
18 | control with that entity. For the purposes of this definition,
19 | "control" means (i) the power, direct or indirect, to cause the
20 | direction or management of such entity, whether by contract or
21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
22 | outstanding shares, or (iii) beneficial ownership of such entity.
23 |
24 | "You" (or "Your") shall mean an individual or Legal Entity
25 | exercising permissions granted by this License.
26 |
27 | "Source" form shall mean the preferred form for making modifications,
28 | including but not limited to software source code, documentation
29 | source, and configuration files.
30 |
31 | "Object" form shall mean any form resulting from mechanical
32 | transformation or translation of a Source form, including but
33 | not limited to compiled object code, generated documentation,
34 | and conversions to other media types.
35 |
36 | "Work" shall mean the work of authorship, whether in Source or
37 | Object form, made available under the License, as indicated by a
38 | copyright notice that is included in or attached to the work
39 | (an example is provided in the Appendix below).
40 |
41 | "Derivative Works" shall mean any work, whether in Source or Object
42 | form, that is based on (or derived from) the Work and for which the
43 | editorial revisions, annotations, elaborations, or other modifications
44 | represent, as a whole, an original work of authorship. For the purposes
45 | of this License, Derivative Works shall not include works that remain
46 | separable from, or merely link (or bind by name) to the interfaces of,
47 | the Work and Derivative Works thereof.
48 |
49 | "Contribution" shall mean any work of authorship, including
50 | the original version of the Work and any modifications or additions
51 | to that Work or Derivative Works thereof, that is intentionally
52 | submitted to Licensor for inclusion in the Work by the copyright owner
53 | or by an individual or Legal Entity authorized to submit on behalf of
54 | the copyright owner. For the purposes of this definition, "submitted"
55 | means any form of electronic, verbal, or written communication sent
56 | to the Licensor or its representatives, including but not limited to
57 | communication on electronic mailing lists, source code control systems,
58 | and issue tracking systems that are managed by, or on behalf of, the
59 | Licensor for the purpose of discussing and improving the Work, but
60 | excluding communication that is conspicuously marked or otherwise
61 | designated in writing by the copyright owner as "Not a Contribution."
62 |
63 | "Contributor" shall mean Licensor and any individual or Legal Entity
64 | on behalf of whom a Contribution has been received by Licensor and
65 | subsequently incorporated within the Work.
66 |
67 | 2. Grant of Copyright License. Subject to the terms and conditions of
68 | this License, each Contributor hereby grants to You a perpetual,
69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70 | copyright license to reproduce, prepare Derivative Works of,
71 | publicly display, publicly perform, sublicense, and distribute the
72 | Work and such Derivative Works in Source or Object form.
73 |
74 | 3. Grant of Patent License. Subject to the terms and conditions of
75 | this License, each Contributor hereby grants to You a perpetual,
76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77 | (except as stated in this section) patent license to make, have made,
78 | use, offer to sell, sell, import, and otherwise transfer the Work,
79 | where such license applies only to those patent claims licensable
80 | by such Contributor that are necessarily infringed by their
81 | Contribution(s) alone or by combination of their Contribution(s)
82 | with the Work to which such Contribution(s) was submitted. If You
83 | institute patent litigation against any entity (including a
84 | cross-claim or counterclaim in a lawsuit) alleging that the Work
85 | or a Contribution incorporated within the Work constitutes direct
86 | or contributory patent infringement, then any patent licenses
87 | granted to You under this License for that Work shall terminate
88 | as of the date such litigation is filed.
89 |
90 | 4. Redistribution. You may reproduce and distribute copies of the
91 | Work or Derivative Works thereof in any medium, with or without
92 | modifications, and in Source or Object form, provided that You
93 | meet the following conditions:
94 |
95 | (a) You must give any other recipients of the Work or
96 | Derivative Works a copy of this License; and
97 |
98 | (b) You must cause any modified files to carry prominent notices
99 | stating that You changed the files; and
100 |
101 | (c) You must retain, in the Source form of any Derivative Works
102 | that You distribute, all copyright, patent, trademark, and
103 | attribution notices from the Source form of the Work,
104 | excluding those notices that do not pertain to any part of
105 | the Derivative Works; and
106 |
107 | (d) If the Work includes a "NOTICE" text file as part of its
108 | distribution, then any Derivative Works that You distribute must
109 | include a readable copy of the attribution notices contained
110 | within such NOTICE file, excluding those notices that do not
111 | pertain to any part of the Derivative Works, in at least one
112 | of the following places: within a NOTICE text file distributed
113 | as part of the Derivative Works; within the Source form or
114 | documentation, if provided along with the Derivative Works; or,
115 | within a display generated by the Derivative Works, if and
116 | wherever such third-party notices normally appear. The contents
117 | of the NOTICE file are for informational purposes only and
118 | do not modify the License. You may add Your own attribution
119 | notices within Derivative Works that You distribute, alongside
120 | or as an addendum to the NOTICE text from the Work, provided
121 | that such additional attribution notices cannot be construed
122 | as modifying the License.
123 |
124 | You may add Your own copyright statement to Your modifications and
125 | may provide additional or different license terms and conditions
126 | for use, reproduction, or distribution of Your modifications, or
127 | for any such Derivative Works as a whole, provided Your use,
128 | reproduction, and distribution of the Work otherwise complies with
129 | the conditions stated in this License.
130 |
131 | 5. Submission of Contributions. Unless You explicitly state otherwise,
132 | any Contribution intentionally submitted for inclusion in the Work
133 | by You to the Licensor shall be under the terms and conditions of
134 | this License, without any additional terms or conditions.
135 | Notwithstanding the above, nothing herein shall supersede or modify
136 | the terms of any separate license agreement you may have executed
137 | with Licensor regarding such Contributions.
138 |
139 | 6. Trademarks. This License does not grant permission to use the trade
140 | names, trademarks, service marks, or product names of the Licensor,
141 | except as required for reasonable and customary use in describing the
142 | origin of the Work and reproducing the content of the NOTICE file.
143 |
144 | 7. Disclaimer of Warranty. Unless required by applicable law or
145 | agreed to in writing, Licensor provides the Work (and each
146 | Contributor provides its Contributions) on an "AS IS" BASIS,
147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148 | implied, including, without limitation, any warranties or conditions
149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150 | PARTICULAR PURPOSE. You are solely responsible for determining the
151 | appropriateness of using or redistributing the Work and assume any
152 | risks associated with Your exercise of permissions under this License.
153 |
154 | 8. Limitation of Liability. In no event and under no legal theory,
155 | whether in tort (including negligence), contract, or otherwise,
156 | unless required by applicable law (such as deliberate and grossly
157 | negligent acts) or agreed to in writing, shall any Contributor be
158 | liable to You for damages, including any direct, indirect, special,
159 | incidental, or consequential damages of any character arising as a
160 | result of this License or out of the use or inability to use the
161 | Work (including but not limited to damages for loss of goodwill,
162 | work stoppage, computer failure or malfunction, or any and all
163 | other commercial damages or losses), even if such Contributor
164 | has been advised of the possibility of such damages.
165 |
166 | 9. Accepting Warranty or Additional Liability. While redistributing
167 | the Work or Derivative Works thereof, You may choose to offer,
168 | and charge a fee for, acceptance of support, warranty, indemnity,
169 | or other liability obligations and/or rights consistent with this
170 | License. However, in accepting such obligations, You may act only
171 | on Your own behalf and on Your sole responsibility, not on behalf
172 | of any other Contributor, and only if You agree to indemnify,
173 | defend, and hold each Contributor harmless for any liability
174 | incurred by, or claims asserted against, such Contributor by reason
175 | of your accepting any such warranty or additional liability.
176 |
177 |
--------------------------------------------------------------------------------
/README.rst:
--------------------------------------------------------------------------------
1 | Cinder Library
2 | ===============================
3 |
4 | .. image:: https://img.shields.io/pypi/v/cinderlib.svg
5 | :target: https://pypi.python.org/pypi/cinderlib
6 |
7 | .. image:: https://img.shields.io/pypi/pyversions/cinderlib.svg
8 | :target: https://pypi.python.org/pypi/cinderlib
9 |
10 | .. image:: https://img.shields.io/:license-apache-blue.svg
11 | :target: http://www.apache.org/licenses/LICENSE-2.0
12 |
13 |
14 | Introduction
15 | ------------
16 |
17 | The Cinder Library, also known as cinderlib, is a Python library that leverages
18 | the Cinder project to provide an object oriented abstraction around Cinder's
19 | storage drivers to allow their usage directly without running any of the Cinder
20 | services or surrounding services, such as KeyStone, MySQL or RabbitMQ.
21 |
22 | * Free software: Apache Software License 2.0
23 | * Documentation: https://docs.openstack.org/cinderlib/latest/
24 |
25 | The library is intended for developers who only need the basic CRUD
26 | functionality of the drivers and don't care for all the additional features
27 | Cinder provides such as quotas, replication, multi-tenancy, migrations,
28 | retyping, scheduling, backups, authorization, authentication, REST API, etc.
29 |
30 | The library was originally created as an external project, so it didn't have
31 | the broad range of backend testing Cinder does, and only a limited number of
32 | drivers were validated at the time. Drivers should work out of the box, and
33 | we'll keep a list of drivers that have added the cinderlib functional tests to
34 | the driver gates confirming they work and ensuring they will keep working.
35 |
36 | Features
37 | --------
38 |
39 | * Use a Cinder driver without running a DBMS, Message broker, or Cinder
40 | service.
41 | * Using multiple simultaneous drivers on the same application.
42 | * Basic operations support:
43 |
44 | - Create volume
45 | - Delete volume
46 | - Extend volume
47 | - Clone volume
48 | - Create snapshot
49 | - Delete snapshot
50 | - Create volume from snapshot
51 | - Connect volume
52 | - Disconnect volume
53 | - Local attach
54 | - Local detach
55 | - Validate connector
56 | - Extra Specs for specific backend functionality.
57 | - Backend QoS
58 | - Multi-pool support
59 |
60 | * Metadata persistence plugins:
61 |
62 | - Stateless: Caller stores JSON serialization.
63 | - Database: Metadata is stored in a database: MySQL, PostgreSQL, SQLite...
64 | - Custom plugin: Caller provides module to store Metadata and cinderlib calls
65 | it when necessary.
66 |
67 | Demo
68 | ----
69 |
70 | .. raw:: html
71 |
72 |
75 |
76 | .. _GIGO: https://en.wikipedia.org/wiki/Garbage_in,_garbage_out
77 | .. _official project documentation: https://readthedocs.org/projects/cinderlib/badge/?version=latest
78 | .. _OpenStack's Cinder volume driver configuration documentation: https://docs.openstack.org/cinder/latest/configuration/block-storage/volume-drivers.html
79 |
--------------------------------------------------------------------------------
/babel.cfg:
--------------------------------------------------------------------------------
1 | [python: **.py]
2 |
3 |
--------------------------------------------------------------------------------
/cinderlib/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018, Red Hat, Inc.
2 | # All Rights Reserved.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 | # not use this file except in compliance with the License. You may obtain
6 | # a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 | # License for the specific language governing permissions and limitations
14 | # under the License.
15 | from __future__ import absolute_import
16 | import pkg_resources
17 |
18 | from cinderlib import cinderlib
19 | from cinderlib import objects
20 | from cinderlib import serialization
21 | from cinderlib import workarounds # noqa
22 |
23 | try:
24 | __version__ = pkg_resources.get_distribution('cinderlib').version
25 | except pkg_resources.DistributionNotFound:
26 | __version__ = '0.0.0'
27 |
28 | DEFAULT_PROJECT_ID = objects.DEFAULT_PROJECT_ID
29 | DEFAULT_USER_ID = objects.DEFAULT_USER_ID
30 | Volume = objects.Volume
31 | Snapshot = objects.Snapshot
32 | Connection = objects.Connection
33 | KeyValue = objects.KeyValue
34 |
35 | load = serialization.load
36 | json = serialization.json
37 | jsons = serialization.jsons
38 | dump = serialization.dump
39 | dumps = serialization.dumps
40 |
41 | setup = cinderlib.setup
42 | Backend = cinderlib.Backend
43 |
44 | get_connector_properties = objects.brick_connector.get_connector_properties
45 | list_supported_drivers = cinderlib.Backend.list_supported_drivers
46 |
--------------------------------------------------------------------------------
/cinderlib/exception.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018, Red Hat, Inc.
2 | # All Rights Reserved.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 | # not use this file except in compliance with the License. You may obtain
6 | # a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 | # License for the specific language governing permissions and limitations
14 | # under the License.
15 |
16 | from cinder import exception
17 |
18 |
19 | NotFound = exception.NotFound
20 | VolumeNotFound = exception.VolumeNotFound
21 | SnapshotNotFound = exception.SnapshotNotFound
22 | ConnectionNotFound = exception.VolumeAttachmentNotFound
23 | InvalidVolume = exception.InvalidVolume
24 |
25 |
26 | class InvalidPersistence(Exception):
27 | __msg = 'Invalid persistence storage: %s.'
28 |
29 | def __init__(self, name):
30 | super(InvalidPersistence, self).__init__(self.__msg % name)
31 |
32 |
33 | class NotLocal(Exception):
34 | __msg = "Volume %s doesn't seem to be attached locally."
35 |
36 | def __init__(self, name):
37 | super(NotLocal, self).__init__(self.__msg % name)
38 |
--------------------------------------------------------------------------------
/cinderlib/nos_brick.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018, Red Hat, Inc.
2 | # All Rights Reserved.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 | # not use this file except in compliance with the License. You may obtain
6 | # a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 | # License for the specific language governing permissions and limitations
14 | # under the License.
15 | """Helper code to attach/detach out of OpenStack
16 |
17 | OS-Brick is meant to be used within OpenStack, which means that there are some
18 | issues when using it on non OpenStack systems.
19 |
20 | Here we take care of:
21 |
22 | - Making sure we can work without privsep and using sudo directly
23 | - Replacing an unlink privsep method that would run python code privileged
24 | - Local attachment of RBD volumes using librados
25 |
26 | Some of these changes may be later moved to OS-Brick. For now we just copied it
27 | from the nos-brick repository.
28 | """
29 | import errno
30 | import functools
31 | import os
32 |
33 | from os_brick import exception
34 | from os_brick.initiator import connector
35 | from os_brick.initiator import connectors
36 | from os_brick.privileged import rootwrap
37 | from oslo_concurrency import processutils as putils
38 | from oslo_privsep import priv_context
39 | from oslo_utils import fileutils
40 | from oslo_utils import strutils
41 | import six
42 |
43 |
44 | class RBDConnector(connectors.rbd.RBDConnector):
45 | """"Connector class to attach/detach RBD volumes locally.
46 |
47 | OS-Brick's implementation covers only 2 cases:
48 |
49 | - Local attachment on controller node.
50 | - Returning a file object on non controller nodes.
51 |
52 | We need a third one, local attachment on non controller node.
53 | """
54 | def connect_volume(self, connection_properties):
55 | # NOTE(e0ne): sanity check if ceph-common is installed.
56 | self._setup_rbd_class()
57 |
58 | # Extract connection parameters and generate config file
59 | try:
60 | user = connection_properties['auth_username']
61 | pool, volume = connection_properties['name'].split('/')
62 | cluster_name = connection_properties.get('cluster_name')
63 | monitor_ips = connection_properties.get('hosts')
64 | monitor_ports = connection_properties.get('ports')
65 | keyring = connection_properties.get('keyring')
66 | except IndexError:
67 | msg = 'Malformed connection properties'
68 | raise exception.BrickException(msg)
69 |
70 | conf = self._create_ceph_conf(monitor_ips, monitor_ports,
71 | str(cluster_name), user,
72 | keyring)
73 |
74 | link_name = self.get_rbd_device_name(pool, volume)
75 | real_path = os.path.realpath(link_name)
76 |
77 | try:
78 | # Map RBD volume if it's not already mapped
79 | if not os.path.islink(link_name) or not os.path.exists(real_path):
80 | cmd = ['rbd', 'map', volume, '--pool', pool, '--conf', conf]
81 | cmd += self._get_rbd_args(connection_properties)
82 | stdout, stderr = self._execute(*cmd,
83 | root_helper=self._root_helper,
84 | run_as_root=True)
85 | real_path = stdout.strip()
86 | # The host may not have RBD installed, and therefore won't
87 | # create the symlinks, ensure they exist
88 | if self.containerized:
89 | self._ensure_link(real_path, link_name)
90 | except Exception:
91 | fileutils.delete_if_exists(conf)
92 | raise
93 |
94 | return {'path': real_path,
95 | 'conf': conf,
96 | 'type': 'block'}
97 |
98 | def _ensure_link(self, source, link_name):
99 | self._ensure_dir(os.path.dirname(link_name))
100 | if self.im_root:
101 | try:
102 | os.symlink(source, link_name)
103 | except OSError as exc:
104 | if exc.errno != errno.EEXIST:
105 | raise
106 | # If we have a leftover link, clean it up
107 | if source != os.path.realpath(link_name):
108 | os.remove(link_name)
109 | os.symlink(source, link_name)
110 | else:
111 | self._execute('ln', '-s', '-f', source, link_name,
112 | run_as_root=True)
113 |
114 | def check_valid_device(self, path, run_as_root=True):
115 | """Verify an existing RBD handle is connected and valid."""
116 | if self.im_root:
117 | try:
118 | with open(path, 'r') as f:
119 | f.read(4096)
120 | except Exception:
121 | return False
122 | return True
123 |
124 | try:
125 | self._execute('dd', 'if=' + path, 'of=/dev/null', 'bs=4096',
126 | 'count=1', root_helper=self._root_helper,
127 | run_as_root=True)
128 | except putils.ProcessExecutionError:
129 | return False
130 | return True
131 |
132 | def disconnect_volume(self, connection_properties, device_info,
133 | force=False, ignore_errors=False):
134 | self._setup_rbd_class()
135 | pool, volume = connection_properties['name'].split('/')
136 | conf_file = device_info['conf']
137 | link_name = self.get_rbd_device_name(pool, volume)
138 | real_dev_path = os.path.realpath(link_name)
139 |
140 | if os.path.exists(real_dev_path):
141 | cmd = ['rbd', 'unmap', real_dev_path, '--conf', conf_file]
142 | cmd += self._get_rbd_args(connection_properties)
143 | self._execute(*cmd, root_helper=self._root_helper,
144 | run_as_root=True)
145 |
146 | if self.containerized:
147 | unlink_root(link_name)
148 | fileutils.delete_if_exists(conf_file)
149 |
150 | def _ensure_dir(self, path):
151 | if self.im_root:
152 | try:
153 | os.makedirs(path, 0o755)
154 | except OSError as exc:
155 | if exc.errno != errno.EEXIST:
156 | raise
157 | else:
158 | self._execute('mkdir', '-p', '-m0755', path, run_as_root=True)
159 |
160 | def _setup_class(self):
161 | try:
162 | self._execute('which', 'rbd')
163 | except putils.ProcessExecutionError:
164 | msg = 'ceph-common package not installed'
165 | raise exception.BrickException(msg)
166 |
167 | RBDConnector.im_root = os.getuid() == 0
168 | # Check if we are running containerized
169 | RBDConnector.containerized = os.stat('/proc').st_dev > 4
170 |
171 | # Don't check again to speed things on following connections
172 | RBDConnector._setup_rbd_class = lambda *args: None
173 |
174 | _setup_rbd_class = _setup_class
175 |
176 |
177 | ROOT_HELPER = 'sudo'
178 |
179 |
180 | def unlink_root(*links, **kwargs):
181 | no_errors = kwargs.get('no_errors', False)
182 | raise_at_end = kwargs.get('raise_at_end', False)
183 | exc = exception.ExceptionChainer()
184 | catch_exception = no_errors or raise_at_end
185 |
186 | error_msg = 'Some unlinks failed for %s'
187 | if os.getuid() == 0:
188 | for link in links:
189 | with exc.context(catch_exception, error_msg, links):
190 | os.unlink(link)
191 | else:
192 | with exc.context(catch_exception, error_msg, links):
193 | putils.execute('rm', *links, run_as_root=True,
194 | root_helper=ROOT_HELPER)
195 |
196 | if not no_errors and raise_at_end and exc:
197 | raise exc
198 |
199 |
200 | def _execute(*cmd, **kwargs):
201 | try:
202 | return rootwrap.custom_execute(*cmd, **kwargs)
203 | except OSError as e:
204 | sanitized_cmd = strutils.mask_password(' '.join(cmd))
205 | raise putils.ProcessExecutionError(
206 | cmd=sanitized_cmd, description=six.text_type(e))
207 |
208 |
209 | def init(root_helper='sudo'):
210 | global ROOT_HELPER
211 | ROOT_HELPER = root_helper
212 | priv_context.init(root_helper=[root_helper])
213 |
214 | existing_bgcp = connector.get_connector_properties
215 | existing_bcp = connector.InitiatorConnector.factory
216 |
217 | def my_bgcp(*args, **kwargs):
218 | if len(args):
219 | args = list(args)
220 | args[0] = ROOT_HELPER
221 | else:
222 | kwargs['root_helper'] = ROOT_HELPER
223 | kwargs['execute'] = _execute
224 | return existing_bgcp(*args, **kwargs)
225 |
226 | def my_bgc(protocol, *args, **kwargs):
227 | if len(args):
228 | # args is a tuple and we cannot do assignments
229 | args = list(args)
230 | args[0] = ROOT_HELPER
231 | else:
232 | kwargs['root_helper'] = ROOT_HELPER
233 | kwargs['execute'] = _execute
234 |
235 | # OS-Brick's implementation for RBD is not good enough for us
236 | if protocol == 'rbd':
237 | factory = RBDConnector
238 | else:
239 | factory = functools.partial(existing_bcp, protocol)
240 |
241 | return factory(*args, **kwargs)
242 |
243 | connector.get_connector_properties = my_bgcp
244 | connector.InitiatorConnector.factory = staticmethod(my_bgc)
245 | if hasattr(rootwrap, 'unlink_root'):
246 | rootwrap.unlink_root = unlink_root
247 |
--------------------------------------------------------------------------------
/cinderlib/persistence/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018, Red Hat, Inc.
2 | # All Rights Reserved.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 | # not use this file except in compliance with the License. You may obtain
6 | # a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 | # License for the specific language governing permissions and limitations
14 | # under the License.
15 | from __future__ import absolute_import
16 | import inspect
17 |
18 | from cinder.cmd import volume as volume_cmd
19 | import six
20 | from stevedore import driver
21 |
22 | from cinderlib import exception
23 | from cinderlib.persistence import base
24 |
25 |
26 | DEFAULT_STORAGE = 'memory'
27 |
28 |
29 | class MyDict(dict):
30 | """Custom non clearable dictionary.
31 |
32 | Required to overcome the nature of oslo.config where configuration comes
33 | from files and command line input.
34 |
35 | Using this dictionary we can load from memory everything and it won't clear
36 | things when we dynamically load a driver and the driver has new
37 | configuration options.
38 | """
39 | def clear(self):
40 | pass
41 |
42 |
43 | def setup(config):
44 | """Setup persistence to be used in cinderlib.
45 |
46 | By default memory persistance will be used, but there are other mechanisms
47 | available and other ways to use custom mechanisms:
48 |
49 | - Persistence plugins: Plugin mechanism uses Python entrypoints under
50 | namespace cinderlib.persistence.storage, and cinderlib comes with 3
51 | different mechanisms, "memory", "dbms", and "memory_dbms". To use any of
52 | these one must pass the string name in the storage parameter and any
53 | other configuration as keyword arguments.
54 | - Passing a class that inherits from PersistenceDriverBase as storage
55 | parameter and initialization parameters as keyword arguments.
56 | - Passing an instance that inherits from PersistenceDriverBase as storage
57 | parameter.
58 | """
59 | if config is None:
60 | config = {}
61 | else:
62 | config = config.copy()
63 |
64 | # Prevent driver dynamic loading clearing configuration options
65 | volume_cmd.CONF._ConfigOpts__cache = MyDict()
66 |
67 | # Default configuration is using memory storage
68 | storage = config.pop('storage', None) or DEFAULT_STORAGE
69 | if isinstance(storage, base.PersistenceDriverBase):
70 | return storage
71 |
72 | if inspect.isclass(storage) and issubclass(storage,
73 | base.PersistenceDriverBase):
74 | return storage(**config)
75 |
76 | if not isinstance(storage, six.string_types):
77 | raise exception.InvalidPersistence(storage)
78 |
79 | persistence_driver = driver.DriverManager(
80 | namespace='cinderlib.persistence.storage',
81 | name=storage,
82 | invoke_on_load=True,
83 | invoke_kwds=config,
84 | )
85 | return persistence_driver.driver
86 |
--------------------------------------------------------------------------------
/cinderlib/persistence/base.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018, Red Hat, Inc.
2 | # All Rights Reserved.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 | # not use this file except in compliance with the License. You may obtain
6 | # a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 | # License for the specific language governing permissions and limitations
14 | # under the License.
15 |
16 | from __future__ import absolute_import
17 |
18 | # NOTE(geguileo): Probably a good idea not to depend on cinder.cmd.volume
19 | # having all the other imports as they could change.
20 | from cinder import objects
21 | from cinder.objects import base as cinder_base_ovo
22 | from oslo_utils import timeutils
23 | from oslo_versionedobjects import fields
24 | import six
25 |
26 | import cinderlib
27 | from cinderlib import serialization
28 |
29 |
30 | class PersistenceDriverBase(object):
31 | """Provide Metadata Persistency for our resources.
32 |
33 | This class will be used to store new resources as they are created,
34 | updated, and removed, as well as provide a mechanism for users to retrieve
35 | volumes, snapshots, and connections.
36 | """
37 | def __init__(self, **kwargs):
38 | pass
39 |
40 | @property
41 | def db(self):
42 | raise NotImplementedError()
43 |
44 | def get_volumes(self, volume_id=None, volume_name=None, backend_name=None):
45 | raise NotImplementedError()
46 |
47 | def get_snapshots(self, snapshot_id=None, snapshot_name=None,
48 | volume_id=None):
49 | raise NotImplementedError()
50 |
51 | def get_connections(self, connection_id=None, volume_id=None):
52 | raise NotImplementedError()
53 |
54 | def get_key_values(self, key):
55 | raise NotImplementedError()
56 |
57 | def set_volume(self, volume):
58 | self.reset_change_tracker(volume)
59 | if volume.volume_type:
60 | volume.volume_type.obj_reset_changes()
61 | if volume.volume_type.qos_specs_id:
62 | volume.volume_type.qos_specs.obj_reset_changes()
63 |
64 | def set_snapshot(self, snapshot):
65 | self.reset_change_tracker(snapshot)
66 |
67 | def set_connection(self, connection):
68 | self.reset_change_tracker(connection)
69 |
70 | def set_key_value(self, key_value):
71 | pass
72 |
73 | def delete_volume(self, volume):
74 | self._set_deleted(volume)
75 | self.reset_change_tracker(volume)
76 |
77 | def delete_snapshot(self, snapshot):
78 | self._set_deleted(snapshot)
79 | self.reset_change_tracker(snapshot)
80 |
81 | def delete_connection(self, connection):
82 | self._set_deleted(connection)
83 | self.reset_change_tracker(connection)
84 |
85 | def delete_key_value(self, key):
86 | pass
87 |
88 | def _set_deleted(self, resource):
89 | resource._ovo.deleted = True
90 | resource._ovo.deleted_at = timeutils.utcnow()
91 | if hasattr(resource._ovo, 'status'):
92 | resource._ovo.status = 'deleted'
93 |
94 | def reset_change_tracker(self, resource, fields=None):
95 | if isinstance(fields, six.string_types):
96 | fields = (fields,)
97 | resource._ovo.obj_reset_changes(fields)
98 |
99 | def get_changed_fields(self, resource):
100 | # NOTE(geguileo): We don't use cinder_obj_get_changes to prevent
101 | # recursion to children OVO which we are not interested and may result
102 | # in circular references.
103 | result = {key: getattr(resource._ovo, key)
104 | for key in resource._changed_fields
105 | if not isinstance(resource.fields[key], fields.ObjectField)}
106 | if getattr(resource._ovo, 'volume_type_id', None):
107 | if ('qos_specs' in resource.volume_type._changed_fields and
108 | resource.volume_type.qos_specs):
109 | result['qos_specs'] = resource._ovo.volume_type.qos_specs.specs
110 | if ('extra_specs' in resource.volume_type._changed_fields and
111 | resource.volume_type.extra_specs):
112 | result['extra_specs'] = resource._ovo.volume_type.extra_specs
113 | return result
114 |
115 | def get_fields(self, resource):
116 | result = {
117 | key: getattr(resource._ovo, key)
118 | for key in resource.fields
119 | if (resource._ovo.obj_attr_is_set(key) and
120 | key not in getattr(resource, 'obj_extra_fields', []) and not
121 | isinstance(resource.fields[key], fields.ObjectField))
122 | }
123 | if getattr(resource._ovo, 'volume_type_id', None):
124 | result['extra_specs'] = resource._ovo.volume_type.extra_specs
125 | if resource._ovo.volume_type.qos_specs_id:
126 | result['qos_specs'] = resource._ovo.volume_type.qos_specs.specs
127 | return result
128 |
129 |
130 | class DB(object):
131 | """Replacement for DB access methods.
132 |
133 | This will serve as replacement for methods used by:
134 |
135 | - Drivers
136 | - OVOs' get_by_id and save methods
137 | - DB implementation
138 |
139 | Data will be retrieved using the persistence driver we setup.
140 | """
141 | GET_METHODS_PER_DB_MODEL = {
142 | objects.Volume.model: 'volume_get',
143 | objects.VolumeType.model: 'volume_type_get',
144 | objects.Snapshot.model: 'snapshot_get',
145 | objects.QualityOfServiceSpecs.model: 'qos_specs_get',
146 | }
147 |
148 | def __init__(self, persistence_driver):
149 | self.persistence = persistence_driver
150 |
151 | # Replace get_by_id OVO methods with something that will return
152 | # expected data
153 | objects.Volume.get_by_id = self.volume_get
154 | objects.Snapshot.get_by_id = self.snapshot_get
155 |
156 | # Disable saving in OVOs
157 | for ovo_name in cinder_base_ovo.CinderObjectRegistry.obj_classes():
158 | ovo_cls = getattr(objects, ovo_name)
159 | ovo_cls.save = lambda *args, **kwargs: None
160 |
161 | def volume_get(self, context, volume_id, *args, **kwargs):
162 | return self.persistence.get_volumes(volume_id)[0]._ovo
163 |
164 | def snapshot_get(self, context, snapshot_id, *args, **kwargs):
165 | return self.persistence.get_snapshots(snapshot_id)[0]._ovo
166 |
167 | def volume_type_get(self, context, id, inactive=False,
168 | expected_fields=None):
169 | if id in cinderlib.Backend._volumes_inflight:
170 | vol = cinderlib.Backend._volumes_inflight[id]
171 | else:
172 | vol = self.persistence.get_volumes(id)[0]
173 |
174 | if not vol._ovo.volume_type_id:
175 | return None
176 | return vol_type_to_dict(vol._ovo.volume_type)
177 |
178 | def qos_specs_get(self, context, qos_specs_id, inactive=False):
179 | if qos_specs_id in cinderlib.Backend._volumes_inflight:
180 | vol = cinderlib.Backend._volumes_inflight[qos_specs_id]
181 | else:
182 | vol = self.persistence.get_volumes(qos_specs_id)[0]
183 | if not vol._ovo.volume_type_id:
184 | return None
185 | return vol_type_to_dict(vol._ovo.volume_type)['qos_specs']
186 |
187 | @classmethod
188 | def image_volume_cache_get_by_volume_id(cls, context, volume_id):
189 | return None
190 |
191 | def get_by_id(self, context, model, id, *args, **kwargs):
192 | method = getattr(self, self.GET_METHODS_PER_DB_MODEL[model])
193 | return method(context, id)
194 |
195 |
196 | def vol_type_to_dict(volume_type):
197 | res = serialization.obj_to_primitive(volume_type)
198 | res = res['versioned_object.data']
199 | if res.get('qos_specs'):
200 | res['qos_specs'] = res['qos_specs']['versioned_object.data']
201 | return res
202 |
--------------------------------------------------------------------------------
/cinderlib/persistence/memory.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018, Red Hat, Inc.
2 | # All Rights Reserved.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 | # not use this file except in compliance with the License. You may obtain
6 | # a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 | # License for the specific language governing permissions and limitations
14 | # under the License.
15 |
16 | from cinderlib.persistence import base as persistence_base
17 |
18 |
19 | class MemoryPersistence(persistence_base.PersistenceDriverBase):
20 | volumes = {}
21 | snapshots = {}
22 | connections = {}
23 | key_values = {}
24 |
25 | def __init__(self):
26 | # Create fake DB for drivers
27 | self.fake_db = persistence_base.DB(self)
28 | super(MemoryPersistence, self).__init__()
29 |
30 | @property
31 | def db(self):
32 | return self.fake_db
33 |
34 | @staticmethod
35 | def _get_field(res, field):
36 | res = getattr(res, field)
37 | if field == 'host':
38 | res = res.split('@')[1].split('#')[0]
39 | return res
40 |
41 | def _filter_by(self, values, field, value):
42 | if not value:
43 | return values
44 | return [res for res in values if self._get_field(res, field) == value]
45 |
46 | def get_volumes(self, volume_id=None, volume_name=None, backend_name=None):
47 | try:
48 | res = ([self.volumes[volume_id]] if volume_id
49 | else self.volumes.values())
50 | except KeyError:
51 | return []
52 | res = self._filter_by(res, 'display_name', volume_name)
53 | res = self._filter_by(res, 'host', backend_name)
54 | return res
55 |
56 | def get_snapshots(self, snapshot_id=None, snapshot_name=None,
57 | volume_id=None):
58 | try:
59 | result = ([self.snapshots[snapshot_id]] if snapshot_id
60 | else self.snapshots.values())
61 | except KeyError:
62 | return []
63 |
64 | result = self._filter_by(result, 'volume_id', volume_id)
65 | result = self._filter_by(result, 'display_name', snapshot_name)
66 | return result
67 |
68 | def get_connections(self, connection_id=None, volume_id=None):
69 | try:
70 | result = ([self.connections[connection_id]] if connection_id
71 | else self.connections.values())
72 | except KeyError:
73 | return []
74 | result = self._filter_by(result, 'volume_id', volume_id)
75 | return result
76 |
77 | def get_key_values(self, key=None):
78 | try:
79 | result = ([self.key_values[key]] if key
80 | else list(self.key_values.values()))
81 | except KeyError:
82 | return []
83 | return result
84 |
85 | def set_volume(self, volume):
86 | self.volumes[volume.id] = volume
87 | super(MemoryPersistence, self).set_volume(volume)
88 |
89 | def set_snapshot(self, snapshot):
90 | self.snapshots[snapshot.id] = snapshot
91 | super(MemoryPersistence, self).set_snapshot(snapshot)
92 |
93 | def set_connection(self, connection):
94 | self.connections[connection.id] = connection
95 | super(MemoryPersistence, self).set_connection(connection)
96 |
97 | def set_key_value(self, key_value):
98 | self.key_values[key_value.key] = key_value
99 |
100 | def delete_volume(self, volume):
101 | self.volumes.pop(volume.id, None)
102 | super(MemoryPersistence, self).delete_volume(volume)
103 |
104 | def delete_snapshot(self, snapshot):
105 | self.snapshots.pop(snapshot.id, None)
106 | super(MemoryPersistence, self).delete_snapshot(snapshot)
107 |
108 | def delete_connection(self, connection):
109 | self.connections.pop(connection.id, None)
110 | super(MemoryPersistence, self).delete_connection(connection)
111 |
112 | def delete_key_value(self, key_value):
113 | self.key_values.pop(key_value.key, None)
114 |
--------------------------------------------------------------------------------
/cinderlib/serialization.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018, Red Hat, Inc.
2 | # All Rights Reserved.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 | # not use this file except in compliance with the License. You may obtain
6 | # a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 | # License for the specific language governing permissions and limitations
14 | # under the License.
15 |
16 | """Oslo Versioned Objects helper file.
17 |
18 | These methods help with the serialization of Cinderlib objects that uses the
19 | OVO serialization mechanism, so we remove circular references when doing the
20 | JSON serialization of objects (for example in a Volume OVO it has a 'snapshot'
21 | field which is a Snapshot OVO that has a 'volume' back reference), piggy back
22 | on the OVO's serialization mechanism to add/get additional data we want.
23 | """
24 |
25 | import functools
26 | import json as json_lib
27 | import six
28 |
29 | from cinder.objects import base as cinder_base_ovo
30 | from oslo_versionedobjects import base as base_ovo
31 | from oslo_versionedobjects import fields as ovo_fields
32 |
33 | from cinderlib import objects
34 |
35 |
36 | # Variable used to avoid circular references
37 | BACKEND_CLASS = None
38 |
39 |
40 | def setup(backend_class):
41 | global BACKEND_CLASS
42 | BACKEND_CLASS = backend_class
43 |
44 | # Use custom dehydration methods that prevent maximum recursion errors
45 | # due to circular references:
46 | # ie: snapshot -> volume -> snapshots -> snapshot
47 | base_ovo.VersionedObject.obj_to_primitive = obj_to_primitive
48 | cinder_base_ovo.CinderObject.obj_from_primitive = classmethod(
49 | obj_from_primitive)
50 |
51 | fields = base_ovo.obj_fields
52 | fields.Object.to_primitive = staticmethod(field_ovo_to_primitive)
53 | fields.Field.to_primitive = field_to_primitive
54 | fields.List.to_primitive = iterable_to_primitive
55 | fields.Set.to_primitive = iterable_to_primitive
56 | fields.Dict.to_primitive = dict_to_primitive
57 | wrap_to_primitive(fields.FieldType)
58 | wrap_to_primitive(fields.DateTime)
59 | wrap_to_primitive(fields.IPAddress)
60 |
61 |
62 | def wrap_to_primitive(cls):
63 | method = getattr(cls, 'to_primitive')
64 |
65 | @functools.wraps(method)
66 | def to_primitive(obj, attr, value, visited=None):
67 | return method(obj, attr, value)
68 | setattr(cls, 'to_primitive', staticmethod(to_primitive))
69 |
70 |
71 | def _set_visited(element, visited):
72 | # visited keeps track of elements visited to prevent loops
73 | if visited is None:
74 | visited = set()
75 | # We only care about complex object that can have loops, others are ignored
76 | # to prevent us from not serializing simple objects, such as booleans, that
77 | # can have the same instance used for multiple fields.
78 | if isinstance(element,
79 | (ovo_fields.ObjectField, cinder_base_ovo.CinderObject)):
80 | visited.add(id(element))
81 | return visited
82 |
83 |
84 | def obj_to_primitive(self, target_version=None,
85 | version_manifest=None, visited=None):
86 | # No target_version, version_manifest, or changes support
87 | visited = _set_visited(self, visited)
88 | primitive = {}
89 | for name, field in self.fields.items():
90 | if self.obj_attr_is_set(name):
91 | value = getattr(self, name)
92 | # Skip cycles
93 | if id(value) in visited:
94 | continue
95 | primitive[name] = field.to_primitive(self, name, value,
96 | visited)
97 |
98 | obj_name = self.obj_name()
99 | obj = {
100 | self._obj_primitive_key('name'): obj_name,
101 | self._obj_primitive_key('namespace'): self.OBJ_PROJECT_NAMESPACE,
102 | self._obj_primitive_key('version'): self.VERSION,
103 | self._obj_primitive_key('data'): primitive
104 | }
105 |
106 | # Piggyback to store our own data
107 | cl_obj = getattr(self, '_cl_obj', None)
108 | clib_data = cl_obj and cl_obj._to_primitive()
109 | if clib_data:
110 | obj['cinderlib.data'] = clib_data
111 |
112 | return obj
113 |
114 |
115 | def obj_from_primitive(
116 | cls, primitive, context=None,
117 | original_method=cinder_base_ovo.CinderObject.obj_from_primitive):
118 | result = original_method(primitive, context)
119 | result.cinderlib_data = primitive.get('cinderlib.data')
120 | return result
121 |
122 |
123 | def field_ovo_to_primitive(obj, attr, value, visited=None):
124 | return value.obj_to_primitive(visited=visited)
125 |
126 |
127 | def field_to_primitive(self, obj, attr, value, visited=None):
128 | if value is None:
129 | return None
130 | return self._type.to_primitive(obj, attr, value, visited)
131 |
132 |
133 | def iterable_to_primitive(self, obj, attr, value, visited=None):
134 | visited = _set_visited(self, visited)
135 | result = []
136 | for elem in value:
137 | if id(elem) in visited:
138 | continue
139 | _set_visited(elem, visited)
140 | r = self._element_type.to_primitive(obj, attr, elem, visited)
141 | result.append(r)
142 | return result
143 |
144 |
145 | def dict_to_primitive(self, obj, attr, value, visited=None):
146 | visited = _set_visited(self, visited)
147 | primitive = {}
148 | for key, elem in value.items():
149 | if id(elem) in visited:
150 | continue
151 | _set_visited(elem, visited)
152 | primitive[key] = self._element_type.to_primitive(
153 | obj, '%s["%s"]' % (attr, key), elem, visited)
154 | return primitive
155 |
156 |
157 | def load(json_src, save=False):
158 | """Load any json serialized cinderlib object."""
159 | if isinstance(json_src, six.string_types):
160 | json_src = json_lib.loads(json_src)
161 |
162 | if isinstance(json_src, list):
163 | return [getattr(objects, obj['class']).load(obj, save)
164 | for obj in json_src]
165 |
166 | return getattr(objects, json_src['class']).load(json_src, save)
167 |
168 |
169 | def json():
170 | """Convert to Json everything we have in this system."""
171 | return [backend.json for backend in BACKEND_CLASS.backends.values()]
172 |
173 |
174 | def jsons():
175 | """Convert to a Json string everything we have in this system."""
176 | return json_lib.dumps(json(), separators=(',', ':'))
177 |
178 |
179 | def dump():
180 | """Convert to Json everything we have in this system."""
181 | return [backend.dump for backend in BACKEND_CLASS.backends.values()]
182 |
183 |
184 | def dumps():
185 | """Convert to a Json string everything we have in this system."""
186 | return json_lib.dumps(dump(), separators=(',', ':'))
187 |
--------------------------------------------------------------------------------
/cinderlib/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Akrog/cinderlib/6481cd9a34744f80bdba130fe9089f1b8b7cb327/cinderlib/tests/__init__.py
--------------------------------------------------------------------------------
/cinderlib/tests/functional/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Akrog/cinderlib/6481cd9a34744f80bdba130fe9089f1b8b7cb327/cinderlib/tests/functional/__init__.py
--------------------------------------------------------------------------------
/cinderlib/tests/functional/base_tests.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018, Red Hat, Inc.
2 | # All Rights Reserved.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 | # not use this file except in compliance with the License. You may obtain
6 | # a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 | # License for the specific language governing permissions and limitations
14 | # under the License.
15 |
16 | import functools
17 | import os
18 | import subprocess
19 | import tempfile
20 |
21 | from oslo_config import cfg
22 | import six
23 | import unittest2
24 | import yaml
25 |
26 | import cinderlib
27 | from cinderlib.tests.functional import cinder_to_yaml
28 |
29 |
30 | def set_backend(func, new_name, backend_name):
31 | @functools.wraps(func)
32 | def wrapper(self, *args, **kwargs):
33 | self.backend = cinderlib.Backend.backends[backend_name]
34 | return func(self, *args, **kwargs)
35 | wrapper.__name__ = new_name
36 | wrapper.__wrapped__ = func
37 | return wrapper
38 |
39 |
40 | def test_all_backends(cls):
41 | """Decorator to run tests in a class for all available backends."""
42 | config = BaseFunctTestCase.ensure_config_loaded()
43 | for fname, func in cls.__dict__.items():
44 | if fname.startswith('test_'):
45 | for backend in config['backends']:
46 | bname = backend['volume_backend_name']
47 | test_name = '%s_on_%s' % (fname, bname)
48 | setattr(cls, test_name, set_backend(func, test_name, bname))
49 | delattr(cls, fname)
50 | return cls
51 |
52 |
53 | class BaseFunctTestCase(unittest2.TestCase):
54 | FNULL = open(os.devnull, 'w')
55 | CONFIG_FILE = os.environ.get('CL_FTEST_CFG', '/etc/cinder/cinder.conf')
56 | PRECISION = os.environ.get('CL_FTEST_PRECISION', 0)
57 | LOGGING_ENABLED = os.environ.get('CL_FTEST_LOGGING', False)
58 | ROOT_HELPER = os.environ.get('CL_FTEST_ROOT_HELPER', 'sudo')
59 | tests_config = None
60 |
61 | @classmethod
62 | def ensure_config_loaded(cls):
63 | if not cls.tests_config:
64 | # If it's a .conf type of configuration file convert it to dict
65 | if cls.CONFIG_FILE.endswith('.conf'):
66 | cls.tests_config = cinder_to_yaml.convert(cls.CONFIG_FILE)
67 | else:
68 | with open(cls.CONFIG_FILE, 'r') as f:
69 | cls.tests_config = yaml.load(f)
70 | cls.tests_config.setdefault('logs', cls.LOGGING_ENABLED)
71 | cls.tests_config.setdefault('size_precision', cls.PRECISION)
72 | return cls.tests_config
73 |
74 | @staticmethod
75 | def _replace_oslo_cli_parse():
76 | original_cli_parser = cfg.ConfigOpts._parse_cli_opts
77 |
78 | def _parse_cli_opts(self, args):
79 | return original_cli_parser(self, [])
80 |
81 | cfg.ConfigOpts._parse_cli_opts = six.create_unbound_method(
82 | _parse_cli_opts, cfg.ConfigOpts)
83 |
84 | @classmethod
85 | def setUpClass(cls):
86 | cls._replace_oslo_cli_parse()
87 | config = cls.ensure_config_loaded()
88 | # Use memory_db persistence instead of memory to ensure migrations work
89 | cinderlib.setup(root_helper=cls.ROOT_HELPER,
90 | disable_logs=not config['logs'],
91 | persistence_config={'storage': 'memory_db'})
92 |
93 | # Initialize backends
94 | cls.backends = [cinderlib.Backend(**cfg) for cfg in
95 | config['backends']]
96 | # Lazy load backend's _volumes variable using the volumes property so
97 | # new volumes are added to this list on successful creation.
98 | for backend in cls.backends:
99 | backend.volumes
100 |
101 | # Set current backend, by default is the first
102 | cls.backend = cls.backends[0]
103 | cls.size_precision = config['size_precision']
104 |
105 | @classmethod
106 | def tearDownClass(cls):
107 | errors = []
108 | # Do the cleanup of the resources the tests haven't cleaned up already
109 | for backend in cls.backends:
110 | # For each of the volumes that haven't been deleted delete the
111 | # snapshots that are still there and then the volume.
112 | # NOTE(geguileo): Don't use volumes and snapshots iterables since
113 | # they are modified when deleting.
114 | # NOTE(geguileo): Cleanup in reverse because RBD driver cannot
115 | # delete a snapshot that has a volume created from it.
116 | for vol in list(backend.volumes)[::-1]:
117 | for snap in list(vol.snapshots):
118 | try:
119 | snap.delete()
120 | except Exception as exc:
121 | errors.append('Error deleting snapshot %s from volume '
122 | '%s: %s' % (snap.id, vol.id, exc))
123 | # Detach if locally attached
124 | if vol.local_attach:
125 | try:
126 | vol.detach()
127 | except Exception as exc:
128 | errors.append('Error detaching %s for volume %s %s: '
129 | '%s' % (vol.local_attach.path, vol.id,
130 | exc))
131 |
132 | # Disconnect any existing connections
133 | for conn in vol.connections:
134 | try:
135 | conn.disconnect()
136 | except Exception as exc:
137 | errors.append('Error disconnecting volume %s: %s' %
138 | (vol.id, exc))
139 |
140 | try:
141 | vol.delete()
142 | except Exception as exc:
143 | errors.append('Error deleting volume %s: %s' %
144 | (vol.id, exc))
145 | if errors:
146 | raise Exception('Errors on test cleanup: %s' % '\n\t'.join(errors))
147 |
148 | def _root_execute(self, *args, **kwargs):
149 | cmd = [self.ROOT_HELPER]
150 | cmd.extend(args)
151 | cmd.extend("%s=%s" % (k, v) for k, v in kwargs.items())
152 | return subprocess.check_output(cmd, stderr=self.FNULL)
153 |
154 | def _create_vol(self, backend=None, **kwargs):
155 | if not backend:
156 | backend = self.backend
157 |
158 | vol_size = kwargs.setdefault('size', 1)
159 | name = kwargs.setdefault('name', backend.id)
160 |
161 | vol = backend.create_volume(**kwargs)
162 |
163 | self.assertEqual('available', vol.status)
164 | self.assertEqual(vol_size, vol.size)
165 | self.assertEqual(name, vol.display_name)
166 | self.assertIn(vol, backend.volumes)
167 | return vol
168 |
169 | def _create_snap(self, vol, **kwargs):
170 | name = kwargs.setdefault('name', vol.id)
171 |
172 | snap = vol.create_snapshot(name=vol.id)
173 |
174 | self.assertEqual('available', snap.status)
175 | self.assertEqual(vol.size, snap.volume_size)
176 | self.assertEqual(name, snap.display_name)
177 |
178 | self.assertIn(snap, vol.snapshots)
179 | return snap
180 |
181 | def _get_vol_size(self, vol, do_detach=True):
182 | if not vol.local_attach:
183 | vol.attach()
184 |
185 | try:
186 | while True:
187 | try:
188 | result = self._root_execute('lsblk', '-o', 'SIZE',
189 | '-b', vol.local_attach.path)
190 | size_bytes = result.split()[1]
191 | return float(size_bytes) / 1024.0 / 1024.0 / 1024.0
192 | # NOTE(geguileo): We can't catch subprocess.CalledProcessError
193 | # because somehow we get an instance from a different
194 | # subprocess.CalledProcessError class that isn't the same.
195 | except Exception as exc:
196 | # If the volume is not yet available
197 | if getattr(exc, 'returncode', 0) != 32:
198 | raise
199 | finally:
200 | if do_detach:
201 | vol.detach()
202 |
203 | def _write_data(self, vol, data=None, do_detach=True):
204 | if not data:
205 | data = b'0123456789' * 100
206 |
207 | if not vol.local_attach:
208 | vol.attach()
209 |
210 | # TODO(geguileo: This will not work on Windows, for that we need to
211 | # pass delete=False and do the manual deletion ourselves.
212 | try:
213 | with tempfile.NamedTemporaryFile() as f:
214 | f.write(data)
215 | f.flush()
216 | self._root_execute('dd', 'if=' + f.name,
217 | of=vol.local_attach.path)
218 | finally:
219 | if do_detach:
220 | vol.detach()
221 |
222 | return data
223 |
224 | def _read_data(self, vol, length, do_detach=True):
225 | if not vol.local_attach:
226 | vol.attach()
227 | try:
228 | stdout = self._root_execute('dd', 'if=' + vol.local_attach.path,
229 | count=1, ibs=length)
230 | finally:
231 | if do_detach:
232 | vol.detach()
233 | return stdout
234 |
235 | def _pools_info(self, stats):
236 | return stats.get('pools', [stats])
237 |
238 | def assertSize(self, expected_size, actual_size):
239 | if self.size_precision:
240 | self.assertAlmostEqual(expected_size, actual_size,
241 | self.size_precision)
242 | else:
243 | self.assertEqual(expected_size, actual_size)
244 |
--------------------------------------------------------------------------------
/cinderlib/tests/functional/ceph.yaml:
--------------------------------------------------------------------------------
1 | # Logs are way too verbose, so we disable them
2 | logs: false
3 |
4 | # We only define one backend
5 | backends:
6 | - volume_backend_name: ceph
7 | volume_driver: cinder.volume.drivers.rbd.RBDDriver
8 | rbd_user: admin
9 | rbd_pool: rbd
10 | rbd_ceph_conf: /etc/ceph/ceph.conf
11 | rbd_keyring_conf: /etc/ceph/ceph.client.admin.keyring
12 |
--------------------------------------------------------------------------------
/cinderlib/tests/functional/cinder_to_yaml.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018, Red Hat, Inc.
2 | # All Rights Reserved.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 | # not use this file except in compliance with the License. You may obtain
6 | # a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 | # License for the specific language governing permissions and limitations
14 | # under the License.
15 |
16 | from os import path
17 | import yaml
18 |
19 | from six.moves import configparser
20 |
21 | from cinder.cmd import volume
22 | volume.objects.register_all() # noqa
23 |
24 | from cinder.volume import configuration as config
25 | from cinder.volume import manager
26 |
27 |
28 | def convert(cinder_source, yaml_dest=None):
29 | result_cfgs = []
30 |
31 | if not path.exists(cinder_source):
32 | raise Exception("Cinder config file %s doesn't exist" % cinder_source)
33 |
34 | # Manually parse the Cinder configuration file so we know which options are
35 | # set.
36 | parser = configparser.ConfigParser()
37 | parser.read(cinder_source)
38 | enabled_backends = parser.get('DEFAULT', 'enabled_backends')
39 | backends = [name.strip() for name in enabled_backends.split(',') if name]
40 |
41 | volume.CONF(('--config-file', cinder_source), project='cinder')
42 |
43 | for backend in backends:
44 | options_present = parser.options(backend)
45 |
46 | # Dynamically loading the driver triggers adding the specific
47 | # configuration options to the backend_defaults section
48 | cfg = config.Configuration(manager.volume_backend_opts,
49 | config_group=backend)
50 | driver_ns = cfg.volume_driver.rsplit('.', 1)[0]
51 | __import__(driver_ns)
52 |
53 | # Use the backend_defaults section to extract the configuration for
54 | # options that are present in the backend section and add them to
55 | # the backend section.
56 | opts = volume.CONF._groups['backend_defaults']._opts
57 | known_present_options = [opt for opt in options_present if opt in opts]
58 | volume_opts = [opts[option]['opt'] for option in known_present_options]
59 | cfg.append_config_values(volume_opts)
60 |
61 | # Now retrieve the options that are set in the configuration file.
62 | result_cfgs.append({option: cfg.safe_get(option)
63 | for option in known_present_options})
64 |
65 | result = {'backends': result_cfgs}
66 | if yaml_dest:
67 | # Write the YAML to the destination
68 | with open(yaml_dest, 'w') as f:
69 | yaml.dump(result, f)
70 | return result
71 |
--------------------------------------------------------------------------------
/cinderlib/tests/functional/lvm.yaml:
--------------------------------------------------------------------------------
1 | # For Fedora, CentOS, RHEL we require the targetcli package.
2 | # For Ubuntu we require lio-utils or changing the target iscsi_helper
3 | #
4 |
5 | # Logs are way too verbose, so we disable them
6 | logs: false
7 |
8 | # We only define one backend
9 | backends:
10 | - volume_backend_name: lvm
11 | volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
12 | volume_group: cinder-volumes
13 | target_protocol: iscsi
14 | target_helper: lioadm
15 |
--------------------------------------------------------------------------------
/cinderlib/tests/functional/test_basic.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018, Red Hat, Inc.
2 | # All Rights Reserved.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 | # not use this file except in compliance with the License. You may obtain
6 | # a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 | # License for the specific language governing permissions and limitations
14 | # under the License.
15 |
16 | import os
17 |
18 | import base_tests
19 |
20 |
21 | @base_tests.test_all_backends
22 | class BackendFunctBasic(base_tests.BaseFunctTestCase):
23 |
24 | def test_stats(self):
25 | stats = self.backend.stats()
26 | self.assertIn('vendor_name', stats)
27 | self.assertIn('volume_backend_name', stats)
28 | pools_info = self._pools_info(stats)
29 | for pool_info in pools_info:
30 | self.assertIn('free_capacity_gb', pool_info)
31 | self.assertIn('total_capacity_gb', pool_info)
32 |
33 | def _volumes_in_pools(self, pools_info):
34 | if not any('total_volumes' in p for p in pools_info):
35 | return None
36 | return sum(p.get('total_volumes', 0) for p in pools_info)
37 |
38 | def test_stats_with_creation(self):
39 | # This test can fail if we are don't have exclusive usage of the
40 | # storage pool used in the tests or if the specific driver does not
41 | # return the right values in allocated_capacity_gb or
42 | # provisioned_capacity_gb.
43 | initial_stats = self.backend.stats(refresh=True)
44 | vol = self._create_vol(self.backend)
45 | new_stats = self.backend.stats(refresh=True)
46 |
47 | initial_pools_info = self._pools_info(initial_stats)
48 | new_pools_info = self._pools_info(new_stats)
49 |
50 | initial_volumes = self._volumes_in_pools(initial_pools_info)
51 | new_volumes = self._volumes_in_pools(new_pools_info)
52 |
53 | # If the backend is reporting the number of volumes, check them
54 | if initial_volumes is not None:
55 | self.assertEqual(initial_volumes + 1, new_volumes)
56 |
57 | initial_size = sum(p.get('allocated_capacity_gb',
58 | p.get('provisioned_capacity_gb', 0))
59 | for p in initial_pools_info)
60 | new_size = sum(p.get('allocated_capacity_gb',
61 | p.get('provisioned_capacity_gb', vol.size))
62 | for p in new_pools_info)
63 | self.assertEqual(initial_size + vol.size, new_size)
64 |
65 | def test_create_volume(self):
66 | vol = self._create_vol(self.backend)
67 | vol_size = self._get_vol_size(vol)
68 | self.assertSize(vol.size, vol_size)
69 | # We are not testing delete, so leave the deletion to the tearDown
70 |
71 | def test_create_delete_volume(self):
72 | vol = self._create_vol(self.backend)
73 |
74 | vol.delete()
75 | self.assertEqual('deleted', vol.status)
76 | self.assertTrue(vol.deleted)
77 | self.assertNotIn(vol, self.backend.volumes)
78 |
79 | # Confirm idempotency of the operation by deleting it again
80 | vol._ovo.status = 'error'
81 | vol._ovo.deleted = False
82 | vol.delete()
83 | self.assertEqual('deleted', vol.status)
84 | self.assertTrue(vol.deleted)
85 |
86 | def test_create_snapshot(self):
87 | vol = self._create_vol(self.backend)
88 | self._create_snap(vol)
89 | # We are not testing delete, so leave the deletion to the tearDown
90 |
91 | def test_create_delete_snapshot(self):
92 | vol = self._create_vol(self.backend)
93 | snap = self._create_snap(vol)
94 |
95 | snap.delete()
96 | self.assertEqual('deleted', snap.status)
97 | self.assertTrue(snap.deleted)
98 | self.assertNotIn(snap, vol.snapshots)
99 |
100 | # Confirm idempotency of the operation by deleting it again
101 | snap._ovo.status = 'error'
102 | snap._ovo.deleted = False
103 | snap.delete()
104 | self.assertEqual('deleted', snap.status)
105 | self.assertTrue(snap.deleted)
106 |
107 | def test_attach_volume(self):
108 | vol = self._create_vol(self.backend)
109 |
110 | attach = vol.attach()
111 | path = attach.path
112 |
113 | self.assertIs(attach, vol.local_attach)
114 | self.assertIn(attach, vol.connections)
115 |
116 | self.assertTrue(os.path.exists(path))
117 | # We are not testing detach, so leave it to the tearDown
118 |
119 | def test_attach_detach_volume(self):
120 | vol = self._create_vol(self.backend)
121 |
122 | attach = vol.attach()
123 | self.assertIs(attach, vol.local_attach)
124 | self.assertIn(attach, vol.connections)
125 |
126 | vol.detach()
127 | self.assertIsNone(vol.local_attach)
128 | self.assertNotIn(attach, vol.connections)
129 |
130 | def test_attach_detach_volume_via_attachment(self):
131 | vol = self._create_vol(self.backend)
132 |
133 | attach = vol.attach()
134 | self.assertTrue(attach.attached)
135 | path = attach.path
136 |
137 | self.assertTrue(os.path.exists(path))
138 |
139 | attach.detach()
140 | self.assertFalse(attach.attached)
141 | self.assertIsNone(vol.local_attach)
142 |
143 | # We haven't disconnected the volume, just detached it
144 | self.assertIn(attach, vol.connections)
145 |
146 | attach.disconnect()
147 | self.assertNotIn(attach, vol.connections)
148 |
149 | def test_disk_io(self):
150 | vol = self._create_vol(self.backend)
151 | data = self._write_data(vol)
152 |
153 | read_data = self._read_data(vol, len(data))
154 |
155 | self.assertEqual(data, read_data)
156 |
157 | def test_extend(self):
158 | vol = self._create_vol(self.backend)
159 | original_size = vol.size
160 | result_original_size = self._get_vol_size(vol)
161 | self.assertSize(original_size, result_original_size)
162 |
163 | new_size = vol.size + 1
164 | vol.extend(new_size)
165 |
166 | self.assertEqual(new_size, vol.size)
167 | result_new_size = self._get_vol_size(vol)
168 | self.assertSize(new_size, result_new_size)
169 |
170 | def test_clone(self):
171 | vol = self._create_vol(self.backend)
172 | original_size = self._get_vol_size(vol, do_detach=False)
173 | data = self._write_data(vol)
174 |
175 | new_vol = vol.clone()
176 | self.assertEqual(vol.size, new_vol.size)
177 |
178 | cloned_size = self._get_vol_size(new_vol, do_detach=False)
179 | read_data = self._read_data(new_vol, len(data))
180 | self.assertEqual(original_size, cloned_size)
181 | self.assertEqual(data, read_data)
182 |
183 | def test_create_volume_from_snapshot(self):
184 | # Create a volume and write some data
185 | vol = self._create_vol(self.backend)
186 | original_size = self._get_vol_size(vol, do_detach=False)
187 | data = self._write_data(vol)
188 |
189 | # Take a snapshot
190 | snap = vol.create_snapshot()
191 | self.assertEqual(vol.size, snap.volume_size)
192 |
193 | # Change the data in the volume
194 | reversed_data = data[::-1]
195 | self._write_data(vol, data=reversed_data)
196 |
197 | # Create a new volume from the snapshot with the original data
198 | new_vol = snap.create_volume()
199 | self.assertEqual(vol.size, new_vol.size)
200 |
201 | created_size = self._get_vol_size(new_vol, do_detach=False)
202 | read_data = self._read_data(new_vol, len(data))
203 | self.assertEqual(original_size, created_size)
204 | self.assertEqual(data, read_data)
205 |
--------------------------------------------------------------------------------
/cinderlib/tests/unit/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Akrog/cinderlib/6481cd9a34744f80bdba130fe9089f1b8b7cb327/cinderlib/tests/unit/__init__.py
--------------------------------------------------------------------------------
/cinderlib/tests/unit/base.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018, Red Hat, Inc.
2 | # All Rights Reserved.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 | # not use this file except in compliance with the License. You may obtain
6 | # a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 | # License for the specific language governing permissions and limitations
14 | # under the License.
15 |
16 |
17 | import mock
18 | import unittest2
19 |
20 | from oslo_config import cfg
21 | import six
22 |
23 | import cinderlib
24 | from cinderlib.tests.unit import utils
25 |
26 |
27 | def _replace_oslo_cli_parse():
28 | original_cli_parser = cfg.ConfigOpts._parse_cli_opts
29 |
30 | def _parse_cli_opts(self, args):
31 | return original_cli_parser(self, [])
32 |
33 | cfg.ConfigOpts._parse_cli_opts = six.create_unbound_method(_parse_cli_opts,
34 | cfg.ConfigOpts)
35 |
36 |
37 | _replace_oslo_cli_parse()
38 | cinderlib.setup(persistence_config={'storage': utils.get_mock_persistence()})
39 |
40 |
41 | class BaseTest(unittest2.TestCase):
42 | PERSISTENCE_CFG = None
43 |
44 | def setUp(self):
45 | if not self.PERSISTENCE_CFG:
46 | cfg = {'storage': utils.get_mock_persistence()}
47 | cinderlib.Backend.set_persistence(cfg)
48 | self.backend_name = 'fake_backend'
49 | self.backend = utils.FakeBackend(volume_backend_name=self.backend_name)
50 | self.persistence = self.backend.persistence
51 | cinderlib.Backend._volumes_inflight = {}
52 |
53 | def tearDown(self):
54 | # Clear all existing backends
55 | cinderlib.Backend.backends = {}
56 |
57 | def patch(self, path, *args, **kwargs):
58 | """Use python mock to mock a path with automatic cleanup."""
59 | patcher = mock.patch(path, *args, **kwargs)
60 | result = patcher.start()
61 | self.addCleanup(patcher.stop)
62 | return result
63 |
--------------------------------------------------------------------------------
/cinderlib/tests/unit/nos_brick.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019, Red Hat, Inc.
2 | # All Rights Reserved.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 | # not use this file except in compliance with the License. You may obtain
6 | # a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 | # License for the specific language governing permissions and limitations
14 | # under the License.
15 |
16 | import errno
17 |
18 | import mock
19 |
20 | from cinderlib import nos_brick
21 | from cinderlib.tests.unit import base
22 |
23 |
24 | class TestRBDConnector(base.BaseTest):
25 | def setUp(self):
26 | self.connector = nos_brick.RBDConnector('sudo')
27 | self.connector.im_root = False
28 | self.containerized = False
29 | self.connector._setup_rbd_class = lambda *args: None
30 |
31 | @mock.patch.object(nos_brick.RBDConnector, '_execute')
32 | @mock.patch('os.makedirs')
33 | def test__ensure_dir(self, mkdir_mock, exec_mock):
34 | self.connector._ensure_dir(mock.sentinel.path)
35 | exec_mock.assert_called_once_with('mkdir', '-p', '-m0755',
36 | mock.sentinel.path, run_as_root=True)
37 | mkdir_mock.assert_not_called()
38 |
39 | @mock.patch.object(nos_brick.RBDConnector, '_execute')
40 | @mock.patch('os.makedirs')
41 | def test__ensure_dir_root(self, mkdir_mock, exec_mock):
42 | self.connector.im_root = True
43 | self.connector._ensure_dir(mock.sentinel.path)
44 | mkdir_mock.assert_called_once_with(mock.sentinel.path, 0o755)
45 | exec_mock.assert_not_called()
46 |
47 | @mock.patch.object(nos_brick.RBDConnector, '_execute')
48 | @mock.patch('os.makedirs', side_effect=OSError(errno.EEXIST, ''))
49 | def test__ensure_dir_root_exists(self, mkdir_mock, exec_mock):
50 | self.connector.im_root = True
51 | self.connector._ensure_dir(mock.sentinel.path)
52 | mkdir_mock.assert_called_once_with(mock.sentinel.path, 0o755)
53 | exec_mock.assert_not_called()
54 |
55 | @mock.patch.object(nos_brick.RBDConnector, '_execute')
56 | @mock.patch('os.makedirs', side_effect=OSError(errno.EPERM, ''))
57 | def test__ensure_dir_root_fails(self, mkdir_mock, exec_mock):
58 | self.connector.im_root = True
59 | with self.assertRaises(OSError) as exc:
60 | self.connector._ensure_dir(mock.sentinel.path)
61 | self.assertEqual(mkdir_mock.side_effect, exc.exception)
62 | mkdir_mock.assert_called_once_with(mock.sentinel.path, 0o755)
63 | exec_mock.assert_not_called()
64 |
65 | @mock.patch('os.path.realpath')
66 | @mock.patch.object(nos_brick.RBDConnector, '_execute')
67 | @mock.patch.object(nos_brick.RBDConnector, '_ensure_dir')
68 | @mock.patch('os.symlink')
69 | def test__ensure_link(self, link_mock, dir_mock, exec_mock, path_mock):
70 | source = '/dev/rbd0'
71 | link = '/dev/rbd/rbd/volume-xyz'
72 | self.connector._ensure_link(source, link)
73 | dir_mock.assert_called_once_with('/dev/rbd/rbd')
74 | exec_mock.assert_called_once_with('ln', '-s', '-f', source, link,
75 | run_as_root=True)
76 | link_mock.assert_not_called()
77 | path_mock.assert_not_called()
78 |
79 | @mock.patch('os.path.realpath')
80 | @mock.patch.object(nos_brick.RBDConnector, '_execute')
81 | @mock.patch.object(nos_brick.RBDConnector, '_ensure_dir')
82 | @mock.patch('os.symlink')
83 | def test__ensure_link_root(self, link_mock, dir_mock, exec_mock,
84 | path_mock):
85 | self.connector.im_root = True
86 | source = '/dev/rbd0'
87 | link = '/dev/rbd/rbd/volume-xyz'
88 | self.connector._ensure_link(source, link)
89 | dir_mock.assert_called_once_with('/dev/rbd/rbd')
90 | exec_mock.assert_not_called()
91 | link_mock.assert_called_once_with(source, link)
92 | path_mock.assert_not_called()
93 |
94 | @mock.patch('os.path.realpath')
95 | @mock.patch.object(nos_brick.RBDConnector, '_execute')
96 | @mock.patch.object(nos_brick.RBDConnector, '_ensure_dir')
97 | @mock.patch('os.symlink', side_effect=OSError(errno.EEXIST, ''))
98 | def test__ensure_link_root_exists(self, link_mock, dir_mock, exec_mock,
99 | path_mock):
100 | self.connector.im_root = True
101 | source = '/dev/rbd0'
102 | path_mock.return_value = source
103 | link = '/dev/rbd/rbd/volume-xyz'
104 | self.connector._ensure_link(source, link)
105 | dir_mock.assert_called_once_with('/dev/rbd/rbd')
106 | exec_mock.assert_not_called()
107 | link_mock.assert_called_once_with(source, link)
108 |
109 | @mock.patch('os.path.realpath')
110 | @mock.patch.object(nos_brick.RBDConnector, '_execute')
111 | @mock.patch.object(nos_brick.RBDConnector, '_ensure_dir')
112 | @mock.patch('os.symlink', side_effect=OSError(errno.EPERM, ''))
113 | def test__ensure_link_root_fails(self, link_mock, dir_mock, exec_mock,
114 | path_mock):
115 | self.connector.im_root = True
116 | source = '/dev/rbd0'
117 | path_mock.return_value = source
118 | link = '/dev/rbd/rbd/volume-xyz'
119 |
120 | with self.assertRaises(OSError) as exc:
121 | self.connector._ensure_link(source, link)
122 |
123 | self.assertEqual(link_mock.side_effect, exc.exception)
124 | dir_mock.assert_called_once_with('/dev/rbd/rbd')
125 | exec_mock.assert_not_called()
126 | link_mock.assert_called_once_with(source, link)
127 |
128 | @mock.patch('os.remove')
129 | @mock.patch('os.path.realpath')
130 | @mock.patch.object(nos_brick.RBDConnector, '_execute')
131 | @mock.patch.object(nos_brick.RBDConnector, '_ensure_dir')
132 | @mock.patch('os.symlink', side_effect=[OSError(errno.EEXIST, ''), None])
133 | def test__ensure_link_root_replace(self, link_mock, dir_mock, exec_mock,
134 | path_mock, remove_mock):
135 | self.connector.im_root = True
136 | source = '/dev/rbd0'
137 | path_mock.return_value = '/dev/rbd1'
138 | link = '/dev/rbd/rbd/volume-xyz'
139 | self.connector._ensure_link(source, link)
140 | dir_mock.assert_called_once_with('/dev/rbd/rbd')
141 | exec_mock.assert_not_called()
142 | remove_mock.assert_called_once_with(link)
143 | self.assertListEqual(
144 | [mock.call(source, link), mock.call(source, link)],
145 | link_mock.mock_calls)
146 |
--------------------------------------------------------------------------------
/cinderlib/tests/unit/objects/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Akrog/cinderlib/6481cd9a34744f80bdba130fe9089f1b8b7cb327/cinderlib/tests/unit/objects/__init__.py
--------------------------------------------------------------------------------
/cinderlib/tests/unit/objects/test_snapshot.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018, Red Hat, Inc.
2 | # All Rights Reserved.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 | # not use this file except in compliance with the License. You may obtain
6 | # a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 | # License for the specific language governing permissions and limitations
14 | # under the License.
15 |
16 | import mock
17 |
18 | from cinderlib import exception
19 | from cinderlib import objects
20 | from cinderlib.tests.unit import base
21 |
22 |
23 | class TestSnapshot(base.BaseTest):
24 | def setUp(self):
25 | super(TestSnapshot, self).setUp()
26 | self.vol = objects.Volume(self.backend_name, size=10,
27 | extra_specs={'e': 'v'},
28 | qos_specs={'q': 'qv'})
29 | self.snap = objects.Snapshot(self.vol,
30 | name='my_snap', description='my_desc')
31 | self.vol._snapshots.append(self.snap)
32 | self.vol._ovo.snapshots.objects.append(self.snap._ovo)
33 |
34 | def test_init_from_volume(self):
35 | self.assertIsNotNone(self.snap.id)
36 | self.assertEqual(self.backend, self.snap.backend)
37 | self.assertEqual('my_snap', self.snap.name)
38 | self.assertEqual('my_snap', self.snap.display_name)
39 | self.assertEqual('my_desc', self.snap.description)
40 | self.assertEqual(self.vol.user_id, self.snap.user_id)
41 | self.assertEqual(self.vol.project_id, self.snap.project_id)
42 | self.assertEqual(self.vol.id, self.snap.volume_id)
43 | self.assertEqual(self.vol.size, self.snap.volume_size)
44 | self.assertEqual(self.vol._ovo, self.snap._ovo.volume)
45 | self.assertEqual(self.vol.volume_type_id, self.snap.volume_type_id)
46 | self.assertEqual(self.vol, self.snap.volume)
47 |
48 | def test_init_from_ovo(self):
49 | snap2 = objects.Snapshot(None, __ovo=self.snap._ovo)
50 | self.assertEqual(self.snap.backend, snap2.backend)
51 | self.assertEqual(self.snap._ovo, snap2._ovo)
52 | self.assertEqual(self.vol, self.snap.volume)
53 |
54 | def test_create(self):
55 | update_vol = {'provider_id': 'provider_id'}
56 | self.backend.driver.create_snapshot.return_value = update_vol
57 | self.snap.create()
58 | self.assertEqual('available', self.snap.status)
59 | self.assertEqual('provider_id', self.snap.provider_id)
60 | self.backend.driver.create_snapshot.assert_called_once_with(
61 | self.snap._ovo)
62 | self.persistence.set_snapshot.assert_called_once_with(self.snap)
63 |
64 | def test_create_error(self):
65 | self.backend.driver.create_snapshot.side_effect = exception.NotFound
66 | with self.assertRaises(exception.NotFound) as assert_context:
67 | self.snap.create()
68 |
69 | self.assertEqual(self.snap, assert_context.exception.resource)
70 | self.backend.driver.create_snapshot.assert_called_once_with(
71 | self.snap._ovo)
72 | self.assertEqual('error', self.snap.status)
73 | self.persistence.set_snapshot.assert_called_once_with(self.snap)
74 |
75 | def test_delete(self):
76 | with mock.patch.object(
77 | self.vol, '_snapshot_removed',
78 | wraps=self.vol._snapshot_removed) as snap_removed_mock:
79 | self.snap.delete()
80 | snap_removed_mock.assert_called_once_with(self.snap)
81 | self.backend.driver.delete_snapshot.assert_called_once_with(
82 | self.snap._ovo)
83 | self.persistence.delete_snapshot.assert_called_once_with(self.snap)
84 | self.assertEqual([], self.vol.snapshots)
85 | self.assertEqual([], self.vol._ovo.snapshots.objects)
86 | self.assertEqual('deleted', self.snap._ovo.status)
87 |
88 | @mock.patch('cinderlib.objects.Volume._snapshot_removed')
89 | def test_delete_error(self, snap_removed_mock):
90 | self.backend.driver.delete_snapshot.side_effect = exception.NotFound
91 | with self.assertRaises(exception.NotFound) as assert_context:
92 | self.snap.delete()
93 | self.assertEqual(self.snap, assert_context.exception.resource)
94 | self.backend.driver.delete_snapshot.assert_called_once_with(
95 | self.snap._ovo)
96 | snap_removed_mock.assert_not_called()
97 | self.persistence.delete_snapshot.assert_not_called()
98 | self.assertEqual([self.snap], self.vol.snapshots)
99 | self.assertEqual([self.snap._ovo], self.vol._ovo.snapshots.objects)
100 | self.assertEqual('error_deleting', self.snap._ovo.status)
101 |
102 | def test_create_volume(self):
103 | create_mock = self.backend.driver.create_volume_from_snapshot
104 | create_mock.return_value = None
105 | vol2 = self.snap.create_volume(name='new_name', description='new_desc')
106 | create_mock.assert_called_once_with(vol2._ovo, self.snap._ovo)
107 | self.assertEqual('available', vol2.status)
108 | self.assertEqual(1, len(self.backend._volumes))
109 | self.assertEqual(vol2, self.backend._volumes[0])
110 | self.persistence.set_volume.assert_called_once_with(vol2)
111 | self.assertEqual(self.vol.id, self.vol.volume_type_id)
112 | self.assertNotEqual(self.vol.id, vol2.id)
113 | self.assertEqual(vol2.id, vol2.volume_type_id)
114 | self.assertEqual(self.vol.volume_type.extra_specs,
115 | vol2.volume_type.extra_specs)
116 | self.assertEqual(self.vol.volume_type.qos_specs.specs,
117 | vol2.volume_type.qos_specs.specs)
118 |
119 | def test_create_volume_error(self):
120 | create_mock = self.backend.driver.create_volume_from_snapshot
121 | create_mock.side_effect = exception.NotFound
122 | with self.assertRaises(exception.NotFound) as assert_context:
123 | self.snap.create_volume()
124 | self.assertEqual(1, len(self.backend._volumes_inflight))
125 | vol2 = list(self.backend._volumes_inflight.values())[0]
126 | self.assertEqual(vol2, assert_context.exception.resource)
127 | create_mock.assert_called_once_with(vol2, self.snap._ovo)
128 | self.assertEqual('error', vol2.status)
129 | self.persistence.set_volume.assert_called_once_with(mock.ANY)
130 |
131 | def test_get_by_id(self):
132 | mock_get_snaps = self.persistence.get_snapshots
133 | mock_get_snaps.return_value = [mock.sentinel.snap]
134 | res = objects.Snapshot.get_by_id(mock.sentinel.snap_id)
135 | mock_get_snaps.assert_called_once_with(
136 | snapshot_id=mock.sentinel.snap_id)
137 | self.assertEqual(mock.sentinel.snap, res)
138 |
139 | def test_get_by_id_not_found(self):
140 | mock_get_snaps = self.persistence.get_snapshots
141 | mock_get_snaps.return_value = None
142 | self.assertRaises(exception.SnapshotNotFound,
143 | objects.Snapshot.get_by_id, mock.sentinel.snap_id)
144 | mock_get_snaps.assert_called_once_with(
145 | snapshot_id=mock.sentinel.snap_id)
146 |
147 | def test_get_by_name(self):
148 | res = objects.Snapshot.get_by_name(mock.sentinel.name)
149 | mock_get_snaps = self.persistence.get_snapshots
150 | mock_get_snaps.assert_called_once_with(
151 | snapshot_name=mock.sentinel.name)
152 | self.assertEqual(mock_get_snaps.return_value, res)
153 |
--------------------------------------------------------------------------------
/cinderlib/tests/unit/persistence/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Akrog/cinderlib/6481cd9a34744f80bdba130fe9089f1b8b7cb327/cinderlib/tests/unit/persistence/__init__.py
--------------------------------------------------------------------------------
/cinderlib/tests/unit/persistence/test_dbms.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018, Red Hat, Inc.
2 | # All Rights Reserved.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 | # not use this file except in compliance with the License. You may obtain
6 | # a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 | # License for the specific language governing permissions and limitations
14 | # under the License.
15 |
16 | import tempfile
17 |
18 | from cinder.db.sqlalchemy import api as sqla_api
19 | from cinder import objects as cinder_ovos
20 | from oslo_db import api as oslo_db_api
21 |
22 | import cinderlib
23 | from cinderlib.persistence import dbms
24 | from cinderlib.tests.unit.persistence import base
25 |
26 |
27 | class TestDBPersistence(base.BasePersistenceTest):
28 | CONNECTION = 'sqlite:///' + tempfile.NamedTemporaryFile().name
29 | PERSISTENCE_CFG = {'storage': 'db',
30 | 'connection': CONNECTION}
31 |
32 | def tearDown(self):
33 | sqla_api.model_query(self.context, sqla_api.models.Snapshot).delete()
34 | sqla_api.model_query(self.context,
35 | sqla_api.models.VolumeAttachment).delete()
36 | sqla_api.model_query(self.context,
37 | sqla_api.models.Volume).delete()
38 | sqla_api.get_session().query(dbms.KeyValue).delete()
39 | super(TestDBPersistence, self).tearDown()
40 |
41 | def test_db(self):
42 | self.assertIsInstance(self.persistence.db,
43 | oslo_db_api.DBAPI)
44 |
45 | def test_set_volume(self):
46 | res = sqla_api.volume_get_all(self.context)
47 | self.assertListEqual([], res)
48 |
49 | vol = cinderlib.Volume(self.backend, size=1, name='disk')
50 | expected = {'availability_zone': vol.availability_zone,
51 | 'size': vol.size, 'name': vol.name}
52 |
53 | self.persistence.set_volume(vol)
54 |
55 | db_vol = sqla_api.volume_get(self.context, vol.id)
56 | actual = {'availability_zone': db_vol.availability_zone,
57 | 'size': db_vol.size, 'name': db_vol.display_name}
58 |
59 | self.assertDictEqual(expected, actual)
60 |
61 | def test_set_snapshot(self):
62 | vol = cinderlib.Volume(self.backend, size=1, name='disk')
63 | snap = cinderlib.Snapshot(vol, name='disk')
64 |
65 | self.assertEqual(0, len(sqla_api.snapshot_get_all(self.context)))
66 |
67 | self.persistence.set_snapshot(snap)
68 |
69 | db_entries = sqla_api.snapshot_get_all(self.context)
70 | self.assertEqual(1, len(db_entries))
71 |
72 | ovo_snap = cinder_ovos.Snapshot(self.context)
73 | ovo_snap._from_db_object(ovo_snap._context, ovo_snap, db_entries[0])
74 | cl_snap = cinderlib.Snapshot(vol, __ovo=ovo_snap)
75 |
76 | self.assertEqualObj(snap, cl_snap)
77 |
78 | def test_set_connection(self):
79 | vol = cinderlib.Volume(self.backend, size=1, name='disk')
80 | conn = cinderlib.Connection(self.backend, volume=vol, connector={},
81 | connection_info={'conn': {'data': {}}})
82 |
83 | self.assertEqual(0,
84 | len(sqla_api.volume_attachment_get_all(self.context)))
85 |
86 | self.persistence.set_connection(conn)
87 |
88 | db_entries = sqla_api.volume_attachment_get_all(self.context)
89 | self.assertEqual(1, len(db_entries))
90 |
91 | ovo_conn = cinder_ovos.VolumeAttachment(self.context)
92 | ovo_conn._from_db_object(ovo_conn._context, ovo_conn, db_entries[0])
93 | cl_conn = cinderlib.Connection(vol.backend, volume=vol, __ovo=ovo_conn)
94 |
95 | self.assertEqualObj(conn, cl_conn)
96 |
97 | def test_set_key_values(self):
98 | res = sqla_api.get_session().query(dbms.KeyValue).all()
99 | self.assertListEqual([], res)
100 |
101 | expected = [dbms.KeyValue(key='key', value='value')]
102 | self.persistence.set_key_value(expected[0])
103 |
104 | actual = sqla_api.get_session().query(dbms.KeyValue).all()
105 | self.assertListEqualObj(expected, actual)
106 |
107 |
108 | class TestMemoryDBPersistence(TestDBPersistence):
109 | PERSISTENCE_CFG = {'storage': 'memory_db'}
110 |
--------------------------------------------------------------------------------
/cinderlib/tests/unit/persistence/test_memory.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018, Red Hat, Inc.
2 | # All Rights Reserved.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 | # not use this file except in compliance with the License. You may obtain
6 | # a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 | # License for the specific language governing permissions and limitations
14 | # under the License.
15 |
16 | import cinderlib
17 | from cinderlib.tests.unit.persistence import base
18 |
19 |
20 | class TestMemoryPersistence(base.BasePersistenceTest):
21 | PERSISTENCE_CFG = {'storage': 'memory'}
22 |
23 | def tearDown(self):
24 | # Since this plugin uses class attributes we have to clear them
25 | self.persistence.volumes = {}
26 | self.persistence.snapshots = {}
27 | self.persistence.connections = {}
28 | self.persistence.key_values = {}
29 | super(TestMemoryPersistence, self).tearDown()
30 |
31 | def test_db(self):
32 | self.assertIsInstance(self.persistence.db,
33 | cinderlib.persistence.base.DB)
34 |
35 | def test_set_volume(self):
36 | vol = cinderlib.Volume(self.backend, size=1, name='disk')
37 | self.assertDictEqual({}, self.persistence.volumes)
38 |
39 | self.persistence.set_volume(vol)
40 | self.assertDictEqual({vol.id: vol}, self.persistence.volumes)
41 |
42 | def test_set_snapshot(self):
43 | vol = cinderlib.Volume(self.backend, size=1, name='disk')
44 | snap = cinderlib.Snapshot(vol, name='disk')
45 |
46 | self.assertDictEqual({}, self.persistence.snapshots)
47 |
48 | self.persistence.set_snapshot(snap)
49 | self.assertDictEqual({snap.id: snap}, self.persistence.snapshots)
50 |
51 | def test_set_connection(self):
52 | vol = cinderlib.Volume(self.backend, size=1, name='disk')
53 | conn = cinderlib.Connection(self.backend, volume=vol, connector={},
54 | connection_info={'conn': {'data': {}}})
55 |
56 | self.assertDictEqual({}, self.persistence.connections)
57 |
58 | self.persistence.set_connection(conn)
59 | self.assertDictEqual({conn.id: conn}, self.persistence.connections)
60 |
61 | def test_set_key_values(self):
62 | self.assertDictEqual({}, self.persistence.key_values)
63 | expected = [cinderlib.KeyValue('key', 'value')]
64 | self.persistence.set_key_value(expected[0])
65 | self.assertTrue('key' in self.persistence.key_values)
66 | self.assertEqual(expected, list(self.persistence.key_values.values()))
67 |
--------------------------------------------------------------------------------
/cinderlib/tests/unit/utils.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018, Red Hat, Inc.
2 | # All Rights Reserved.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 | # not use this file except in compliance with the License. You may obtain
6 | # a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 | # License for the specific language governing permissions and limitations
14 | # under the License.
15 |
16 | import mock
17 |
18 | import cinderlib
19 | from cinderlib.persistence import base
20 |
21 |
22 | def get_mock_persistence():
23 | return mock.MagicMock(spec=base.PersistenceDriverBase)
24 |
25 |
26 | class FakeBackend(cinderlib.Backend):
27 | def __init__(self, *args, **kwargs):
28 | driver_name = kwargs.get('volume_backend_name', 'fake')
29 | cinderlib.Backend.backends[driver_name] = self
30 | self._driver_cfg = {'volume_backend_name': driver_name}
31 | self.driver = mock.Mock()
32 | self.driver.persistence = cinderlib.Backend.persistence
33 | self._pool_names = (driver_name,)
34 | self._volumes = []
35 |
--------------------------------------------------------------------------------
/cinderlib/utils.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019, Red Hat, Inc.
2 | # All Rights Reserved.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 | # not use this file except in compliance with the License. You may obtain
6 | # a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 | # License for the specific language governing permissions and limitations
14 | # under the License.
15 |
16 |
17 | def find_by_id(resource_id, elements):
18 | if elements:
19 | for i, element in enumerate(elements):
20 | if resource_id == element.id:
21 | return i, element
22 | return None, None
23 |
24 |
25 | def add_by_id(resource, elements):
26 | if elements is not None:
27 | i, element = find_by_id(resource.id, elements)
28 | if element:
29 | elements[i] = resource
30 | else:
31 | elements.append(resource)
32 |
--------------------------------------------------------------------------------
/cinderlib/workarounds.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018, Red Hat, Inc.
2 | # All Rights Reserved.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 | # not use this file except in compliance with the License. You may obtain
6 | # a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 | # License for the specific language governing permissions and limitations
14 | # under the License.
15 |
16 | import six
17 |
18 | if six.PY2:
19 | # Python 2 workaround for getaddrinfo (fails if port is valid unicode)
20 | def my_getaddrinfo(original, host, port, *args, **kwargs):
21 | if isinstance(port, six.text_type):
22 | port = str(port)
23 | return original(host, port, *args, **kwargs)
24 | import functools
25 | import socket
26 | socket.getaddrinfo = functools.partial(my_getaddrinfo, socket.getaddrinfo)
27 |
--------------------------------------------------------------------------------
/devstack/README.md:
--------------------------------------------------------------------------------
1 | This directory contains the cinderlib DevStack plugin.
2 |
3 | To configure cinderlib with DevStack, you will need to enable this plugin by
4 | adding one line to the [[local|localrc]] section of your local.conf file.
5 |
6 | To enable the plugin, add a line of the form:
7 |
8 | enable_plugin cinderlib [GITREF]
9 |
10 | where
11 |
12 | is the URL of a cinderlib repository
13 | [GITREF] is an optional git ref (branch/ref/tag). The default is master.
14 |
15 | For example:
16 |
17 | enable_plugin cinderlib https://git.openstack.org/openstack/cinderlib
18 |
19 | Another example using Stein's stable branch:
20 |
21 | enable_plugin cinderlib https://git.openstack.org/openstack/cinderlib stable/stein
22 |
23 | The cinderlib DevStack plugin will install cinderlib from Git by default, but
24 | it can installed from PyPi using the `CINDERLIB_FROM_GIT` configuration option.
25 |
26 | CINDERLIB_FROM_GIT=False
27 |
28 | The plugin will also generate the code equivalent to the deployed Cinder's
29 | configuration in `$CINDERLIB_SAMPLE_DIR/cinderlib.py` which defaults to the
30 | same directory where the Cinder configuration is saved.
31 |
32 | For more information, see the [DevStack plugin documentation](https://docs.openstack.org/devstack/latest/plugins.html).
33 |
--------------------------------------------------------------------------------
/devstack/override-defaults:
--------------------------------------------------------------------------------
1 | ALL_LIBS+=" cinderlib"
2 | CINDERLIB_FROM_GIT=$(trueorfalse True CINDERLIB_FROM_GIT)
3 |
4 | if [[ "$CINDERLIB_FROM_GIT" == "True" ]]; then
5 | PROJECTS="openstack/cinderlib $PROJECTS"
6 | LIBS_FROM_GIT="cinderlib,$LIBS_FROM_GIT"
7 | fi
8 |
--------------------------------------------------------------------------------
/devstack/plugin.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # plugin.sh - DevStack plugin.sh dispatch script for cinderlib
3 |
4 | _XTRACE_CINDERLIB=$(set +o | grep xtrace)
5 |
6 | function install_cinderlib {
7 | if use_library_from_git "cinderlib"; then
8 | git_clone_by_name "cinderlib"
9 | setup_dev_lib "cinderlib"
10 | else
11 | pip_install cinderlib
12 | fi
13 | }
14 |
15 | function generate_python_code {
16 | if use_library_from_git "cinderlib"; then
17 | sudo ${GITDIR["cinderlib"]}/tools/cinder-cfg-to-python.py $CINDER_CONF $CINDERLIB_SAMPLE
18 | else
19 | # We need to download the script since it's not part of the pypi package
20 | curl -s https://git.openstack.org/cgit/openstack/cinderlib/plain/tools/cinder-cfg-to-python.py | sudo python - $CINDER_CONF $CINDERLIB_SAMPLE
21 | fi
22 | }
23 |
24 | stable_compare="stable/[a-r]"
25 | # Cinderlib only makes sense if Cinder is enabled and we are in stein or later
26 | if [[ ! "${GITBRANCH["cinderlib"]}" =~ $stable_compare ]] && is_service_enabled cinder; then
27 |
28 | if [[ "$1" == "stack" && "$2" == "install" ]]; then
29 | # Perform installation of service source
30 | echo_summary "Installing cinderlib"
31 | install_cinderlib
32 |
33 | # Plugins such as Ceph configure themselves at post-config, so we have to
34 | # configure ourselves at the next stage, "extra"
35 | elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
36 | # Generate the cinderlib configuration
37 | echo_summary "Generating cinderlib initialization example python code"
38 | generate_python_code
39 | fi
40 |
41 | if [[ "$1" == "clean" || "$1" == "unstack" ]]; then
42 | echo_summary "Removing cinderlib and its code example from cinder.conf"
43 | sudo rm -f $CINDERLIB_SAMPLE
44 | pip_uninstall cinderlib
45 | fi
46 | fi
47 |
48 | # Restore xtrace
49 | $_XTRACE_CINDERLIB
50 |
--------------------------------------------------------------------------------
/devstack/settings:
--------------------------------------------------------------------------------
1 | # Defaults
2 | # --------
3 |
4 | # Set up default directories
5 | CINDERLIB_SAMPLE_DIR=${CINDERLIB_CONF_DIR:-/etc/cinder}
6 | CINDERLIB_SAMPLE=$CINDERLIB_SAMPLE_DIR/cinderlib.py
7 | CINDERLIB_FROM_GIT=$(trueorfalse True CINDERLIB_FROM_GIT)
8 |
9 | define_plugin cinderlib
10 |
--------------------------------------------------------------------------------
/doc/.gitignore:
--------------------------------------------------------------------------------
1 | build/*
2 | source/api/*
3 | .autogenerated
4 |
--------------------------------------------------------------------------------
/doc/requirements.txt:
--------------------------------------------------------------------------------
1 | openstackdocstheme>=1.18.1 # Apache-2.0
2 | reno>=2.5.0 # Apache-2.0
3 | doc8>=0.6.0 # Apache-2.0
4 | sphinx!=1.6.6,!=1.6.7,>=1.6.2 # BSD
5 | os-api-ref>=1.4.0 # Apache-2.0
6 | sphinxcontrib-apidoc>=0.2.0 # BSD
7 |
--------------------------------------------------------------------------------
/doc/source/Makefile:
--------------------------------------------------------------------------------
1 | # Makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = sphinx-build
7 | PAPER =
8 | BUILDDIR = _build
9 |
10 | # User-friendly check for sphinx-build
11 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
13 | endif
14 |
15 | # Internal variables.
16 | PAPEROPT_a4 = -D latex_paper_size=a4
17 | PAPEROPT_letter = -D latex_paper_size=letter
18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
19 | # the i18n builder cannot share the environment and doctrees with the others
20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
21 |
22 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
23 |
24 | help:
25 | @echo "Please use \`make ' where is one of"
26 | @echo " html to make standalone HTML files"
27 | @echo " dirhtml to make HTML files named index.html in directories"
28 | @echo " singlehtml to make a single large HTML file"
29 | @echo " pickle to make pickle files"
30 | @echo " json to make JSON files"
31 | @echo " htmlhelp to make HTML files and a HTML help project"
32 | @echo " qthelp to make HTML files and a qthelp project"
33 | @echo " devhelp to make HTML files and a Devhelp project"
34 | @echo " epub to make an epub"
35 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
36 | @echo " latexpdf to make LaTeX files and run them through pdflatex"
37 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
38 | @echo " text to make text files"
39 | @echo " man to make manual pages"
40 | @echo " texinfo to make Texinfo files"
41 | @echo " info to make Texinfo files and run them through makeinfo"
42 | @echo " gettext to make PO message catalogs"
43 | @echo " changes to make an overview of all changed/added/deprecated items"
44 | @echo " xml to make Docutils-native XML files"
45 | @echo " pseudoxml to make pseudoxml-XML files for display purposes"
46 | @echo " linkcheck to check all external links for integrity"
47 | @echo " doctest to run all doctests embedded in the documentation (if enabled)"
48 |
49 | clean:
50 | rm -rf $(BUILDDIR)/*
51 |
52 | html:
53 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
54 | @echo
55 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
56 |
57 | dirhtml:
58 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
59 | @echo
60 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
61 |
62 | singlehtml:
63 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
64 | @echo
65 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
66 |
67 | pickle:
68 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
69 | @echo
70 | @echo "Build finished; now you can process the pickle files."
71 |
72 | json:
73 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
74 | @echo
75 | @echo "Build finished; now you can process the JSON files."
76 |
77 | htmlhelp:
78 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
79 | @echo
80 | @echo "Build finished; now you can run HTML Help Workshop with the" \
81 | ".hhp project file in $(BUILDDIR)/htmlhelp."
82 |
83 | qthelp:
84 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
85 | @echo
86 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \
87 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
88 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/cinderlib.qhcp"
89 | @echo "To view the help file:"
90 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/cinderlib.qhc"
91 |
92 | devhelp:
93 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
94 | @echo
95 | @echo "Build finished."
96 | @echo "To view the help file:"
97 | @echo "# mkdir -p $$HOME/.local/share/devhelp/cinderlib"
98 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/cinderlib"
99 | @echo "# devhelp"
100 |
101 | epub:
102 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
103 | @echo
104 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
105 |
106 | latex:
107 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
108 | @echo
109 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
110 | @echo "Run \`make' in that directory to run these through (pdf)latex" \
111 | "(use \`make latexpdf' here to do that automatically)."
112 |
113 | latexpdf:
114 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
115 | @echo "Running LaTeX files through pdflatex..."
116 | $(MAKE) -C $(BUILDDIR)/latex all-pdf
117 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
118 |
119 | latexpdfja:
120 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
121 | @echo "Running LaTeX files through platex and dvipdfmx..."
122 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
123 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
124 |
125 | text:
126 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
127 | @echo
128 | @echo "Build finished. The text files are in $(BUILDDIR)/text."
129 |
130 | man:
131 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
132 | @echo
133 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
134 |
135 | texinfo:
136 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
137 | @echo
138 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
139 | @echo "Run \`make' in that directory to run these through makeinfo" \
140 | "(use \`make info' here to do that automatically)."
141 |
142 | info:
143 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
144 | @echo "Running Texinfo files through makeinfo..."
145 | make -C $(BUILDDIR)/texinfo info
146 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
147 |
148 | gettext:
149 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
150 | @echo
151 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
152 |
153 | changes:
154 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
155 | @echo
156 | @echo "The overview file is in $(BUILDDIR)/changes."
157 |
158 | linkcheck:
159 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
160 | @echo
161 | @echo "Link check complete; look for any errors in the above output " \
162 | "or in $(BUILDDIR)/linkcheck/output.txt."
163 |
164 | doctest:
165 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
166 | @echo "Testing of doctests in the sources finished, look at the " \
167 | "results in $(BUILDDIR)/doctest/output.txt."
168 |
169 | xml:
170 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
171 | @echo
172 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml."
173 |
174 | pseudoxml:
175 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
176 | @echo
177 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
178 |
--------------------------------------------------------------------------------
/doc/source/_extra/.placeholder:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Akrog/cinderlib/6481cd9a34744f80bdba130fe9089f1b8b7cb327/doc/source/_extra/.placeholder
--------------------------------------------------------------------------------
/doc/source/_static/.placeholder:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/doc/source/conf.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | #
4 | # cinderlib documentation build configuration file, created by
5 | # sphinx-quickstart on Tue Jul 9 22:26:36 2013.
6 | #
7 | # This file is execfile()d with the current directory set to its
8 | # containing dir.
9 | #
10 | # Note that not all possible configuration values are present in this
11 | # autogenerated file.
12 | #
13 | # All configuration values have a default; values that are commented out
14 | # serve to show the default.
15 |
16 | import os
17 | import sys
18 |
19 | # If extensions (or modules to document with autodoc) are in another
20 | # directory, add these directories to sys.path here. If the directory is
21 | # relative to the documentation root, use os.path.abspath to make it
22 | # absolute, like shown here.
23 | project_root = os.path.abspath('../../')
24 | sys.path.insert(0, project_root)
25 |
26 | # # Get the project root dir, which is the parent dir of this
27 | # import pdb; pdb.set_trace()
28 | # cwd = os.getcwd()
29 | # project_root = os.path.dirname(cwd)
30 | #
31 | # # Insert the project root dir as the first element in the PYTHONPATH.
32 | # # This lets us ensure that the source package is imported, and that its
33 | # # version is used.
34 | # sys.path.insert(0, project_root)
35 |
36 |
37 | # -- General configuration ---------------------------------------------
38 |
39 | # If your documentation needs a minimal Sphinx version, state it here.
40 | needs_sphinx = '1.6.5'
41 |
42 | # Add any Sphinx extension module names here, as strings. They can be
43 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
44 | extensions = ['sphinx.ext.autodoc',
45 | 'sphinx.ext.viewcode',
46 | 'sphinxcontrib.apidoc',
47 | 'openstackdocstheme']
48 |
49 | # sphinxcontrib.apidoc options
50 | apidoc_module_dir = '../../cinderlib'
51 | apidoc_output_dir = 'api'
52 | apidoc_excluded_paths = [
53 | 'tests/*',
54 | 'tests',
55 | 'persistence/dbms.py',
56 | 'persistence/memory.py',
57 | ]
58 | apidoc_separate_modules = True
59 | apidoc_toc_file = False
60 |
61 | autodoc_mock_imports = ['cinder', 'os_brick', 'oslo_utils',
62 | 'oslo_versionedobjects', 'oslo_concurrency',
63 | 'oslo_log', 'stevedore', 'oslo_db', 'oslo_config',
64 | 'oslo_privsep', 'cinder.db.sqlalchemy']
65 |
66 | # Add any paths that contain templates here, relative to this directory.
67 | templates_path = ['_templates']
68 |
69 | # The suffix of source filenames.
70 | source_suffix = '.rst'
71 |
72 | # The encoding of source files.
73 | #source_encoding = 'utf-8-sig'
74 |
75 | # The master toctree document.
76 | master_doc = 'index'
77 |
78 | # List of directories, relative to source directory, that shouldn't be searched
79 | # for source files.
80 | exclude_trees = []
81 |
82 | # General information about the project.
83 | project = u'Cinder Library'
84 | copyright = u"2017, Cinder Developers"
85 |
86 | # openstackdocstheme options
87 | repository_name = 'openstack/cinderlib'
88 | bug_project = 'cinderlib'
89 | bug_tag = ''
90 | html_last_updated_fmt = '%Y-%m-%d %H:%M'
91 |
92 | # The language for content autogenerated by Sphinx. Refer to documentation
93 | # for a list of supported languages.
94 | #language = None
95 |
96 | # There are two options for replacing |today|: either, you set today to
97 | # some non-false value, then it is used:
98 | #today = ''
99 | # Else, today_fmt is used as the format for a strftime call.
100 | #today_fmt = '%B %d, %Y'
101 |
102 | # List of patterns, relative to source directory, that match files and
103 | # directories to ignore when looking for source files.
104 | exclude_patterns = []
105 |
106 | # The reST default role (used for this markup: `text`) to use for all
107 | # documents.
108 | #default_role = None
109 |
110 | # If true, '()' will be appended to :func: etc. cross-reference text.
111 | #add_function_parentheses = True
112 |
113 | # If true, the current module name will be prepended to all description
114 | # unit titles (such as .. function::).
115 | add_module_names = False
116 |
117 | # If true, sectionauthor and moduleauthor directives will be shown in the
118 | # output. They are ignored by default.
119 | show_authors = False
120 |
121 | # The name of the Pygments (syntax highlighting) style to use.
122 | pygments_style = 'sphinx'
123 |
124 | # A list of ignored prefixes for module index sorting.
125 | modindex_common_prefix = ['cinderlib.']
126 |
127 | # If true, keep warnings as "system message" paragraphs in the built
128 | # documents.
129 | #keep_warnings = False
130 |
131 |
132 | # -- Options for HTML output -------------------------------------------
133 |
134 | # The theme to use for HTML and HTML Help pages. See the documentation for
135 | # a list of builtin themes.
136 | html_theme = 'openstackdocs'
137 |
138 | # Theme options are theme-specific and customize the look and feel of a
139 | # theme further. For a list of options available for each theme, see the
140 | # documentation.
141 | #html_theme_options = {}
142 |
143 | # Add any paths that contain custom themes here, relative to this directory.
144 | #html_theme_path = []
145 |
146 | # The name for this set of Sphinx documents. If None, it defaults to
147 | # " v documentation".
148 | #html_title = None
149 |
150 | # A shorter title for the navigation bar. Default is the same as
151 | # html_title.
152 | #html_short_title = None
153 |
154 | # The name of an image file (relative to this directory) to place at the
155 | # top of the sidebar.
156 | #html_logo = None
157 |
158 | # The name of an image file (within the static path) to use as favicon
159 | # of the docs. This file should be a Windows icon file (.ico) being
160 | # 16x16 or 32x32 pixels large.
161 | #html_favicon = None
162 |
163 | # Add any paths that contain custom static files (such as style sheets)
164 | # here, relative to this directory. They are copied after the builtin
165 | # static files, so a file named "default.css" will overwrite the builtin
166 | # "default.css".
167 | html_static_path = ['_static']
168 |
169 | # Add any paths that contain "extra" files, such as .htaccess.
170 | html_extra_path = ['_extra']
171 |
172 | # If not '', a 'Last updated on:' timestamp is inserted at every page
173 | # bottom, using the given strftime format.
174 | #html_last_updated_fmt = '%b %d, %Y'
175 |
176 | # If true, SmartyPants will be used to convert quotes and dashes to
177 | # typographically correct entities.
178 | #html_use_smartypants = True
179 |
180 | # Custom sidebar templates, maps document names to template names.
181 | #html_sidebars = {}
182 |
183 | # Additional templates that should be rendered to pages, maps page names
184 | # to template names.
185 | #html_additional_pages = {}
186 |
187 | # If false, no module index is generated.
188 | #html_domain_indices = True
189 |
190 | # If false, no index is generated.
191 | #html_use_index = True
192 |
193 | # If true, the index is split into individual pages for each letter.
194 | #html_split_index = False
195 |
196 | # If true, links to the reST sources are added to the pages.
197 | #html_show_sourcelink = True
198 |
199 | # If true, "Created using Sphinx" is shown in the HTML footer.
200 | # Default is True.
201 | #html_show_sphinx = True
202 |
203 | # If true, "(C) Copyright ..." is shown in the HTML footer.
204 | # Default is True.
205 | #html_show_copyright = True
206 |
207 | # If true, an OpenSearch description file will be output, and all pages
208 | # will contain a tag referring to it. The value of this option
209 | # must be the base URL from which the finished HTML is served.
210 | #html_use_opensearch = ''
211 |
212 | # This is the file name suffix for HTML files (e.g. ".xhtml").
213 | #html_file_suffix = None
214 |
215 | # Output file base name for HTML help builder.
216 | htmlhelp_basename = 'cinderlibdoc'
217 |
218 |
219 | # -- Options for LaTeX output ------------------------------------------
220 |
221 | latex_elements = {
222 | # The paper size ('letterpaper' or 'a4paper').
223 | #'papersize': 'letterpaper',
224 |
225 | # The font size ('10pt', '11pt' or '12pt').
226 | #'pointsize': '10pt',
227 |
228 | # Additional stuff for the LaTeX preamble.
229 | #'preamble': '',
230 | }
231 |
232 | # Grouping the document tree into LaTeX files. List of tuples
233 | # (source start file, target name, title, author, documentclass
234 | # [howto/manual]).
235 | latex_documents = [
236 | ('index', 'cinderlib.tex',
237 | u'Cinder Library Documentation',
238 | u'Cinder Contributors', 'manual'),
239 | ]
240 |
241 | # The name of an image file (relative to this directory) to place at
242 | # the top of the title page.
243 | #latex_logo = None
244 |
245 | # For "manual" documents, if this is true, then toplevel headings
246 | # are parts, not chapters.
247 | #latex_use_parts = False
248 |
249 | # If true, show page references after internal links.
250 | #latex_show_pagerefs = False
251 |
252 | # If true, show URL addresses after external links.
253 | #latex_show_urls = False
254 |
255 | # Documents to append as an appendix to all manuals.
256 | #latex_appendices = []
257 |
258 | # If false, no module index is generated.
259 | #latex_domain_indices = True
260 |
261 |
262 | # -- Options for manual page output ------------------------------------
263 |
264 | # One entry per manual page. List of tuples
265 | # (source start file, name, description, authors, manual section).
266 | man_pages = [
267 | ('index', 'cinderlib',
268 | u'Cinder Library Documentation',
269 | [u'Cinder Contributors'], 1)
270 | ]
271 |
272 | # If true, show URL addresses after external links.
273 | #man_show_urls = False
274 |
275 |
276 | # -- Options for Texinfo output ----------------------------------------
277 |
278 | # Grouping the document tree into Texinfo files. List of tuples
279 | # (source start file, target name, title, author,
280 | # dir menu entry, description, category)
281 | texinfo_documents = [
282 | ('index', 'cinderlib',
283 | u'Cinder Library Documentation',
284 | u'Cinder Contributors',
285 | 'cinderlib',
286 | 'Direct usage of Cinder Block Storage drivers without the services.',
287 | 'Miscellaneous'),
288 | ]
289 |
290 | # Documents to append as an appendix to all manuals.
291 | #texinfo_appendices = []
292 |
293 | # If false, no module index is generated.
294 | #texinfo_domain_indices = True
295 |
296 | # How to display URL addresses: 'footnote', 'no', or 'inline'.
297 | #texinfo_show_urls = 'footnote'
298 |
299 | # If true, do not generate a @detailmenu in the "Top" node's menu.
300 | #texinfo_no_detailmenu = False
301 |
--------------------------------------------------------------------------------
/doc/source/contributing.rst:
--------------------------------------------------------------------------------
1 | Contributing
2 | ============
3 |
4 | .. include:: ../../CONTRIBUTING.rst
5 |
--------------------------------------------------------------------------------
/doc/source/index.rst:
--------------------------------------------------------------------------------
1 | Welcome to Cinder Library's documentation!
2 | ==========================================
3 |
4 | .. image:: https://img.shields.io/pypi/v/cinderlib.svg
5 | :target: https://pypi.python.org/pypi/cinderlib
6 |
7 | .. image:: https://img.shields.io/pypi/pyversions/cinderlib.svg
8 | :target: https://pypi.python.org/pypi/cinderlib
9 |
10 | .. image:: https://img.shields.io/:license-apache-blue.svg
11 | :target: http://www.apache.org/licenses/LICENSE-2.0
12 |
13 | |
14 |
15 | The Cinder Library, also known as cinderlib, is a Python library that leverages
16 | the Cinder project to provide an object oriented abstraction around Cinder's
17 | storage drivers to allow their usage directly without running any of the Cinder
18 | services or surrounding services, such as KeyStone, MySQL or RabbitMQ.
19 |
20 | The library is intended for developers who only need the basic CRUD
21 | functionality of the drivers and don't care for all the additional features
22 | Cinder provides such as quotas, replication, multi-tenancy, migrations,
23 | retyping, scheduling, backups, authorization, authentication, REST API, etc.
24 |
25 | The library was originally created as an external project, so it didn't have
26 | the broad range of backend testing Cinder does, and only a limited number of
27 | drivers were validated at the time. Drivers should work out of the box, and
28 | we'll keep a list of drivers that have added the cinderlib functional tests to
29 | the driver gates confirming they work and ensuring they will keep working.
30 |
31 | Features
32 | --------
33 |
34 | * Use a Cinder driver without running a DBMS, Message broker, or Cinder
35 | service.
36 |
37 | * Using multiple simultaneous drivers on the same application.
38 |
39 | * Basic operations support:
40 |
41 | - Create volume
42 | - Delete volume
43 | - Extend volume
44 | - Clone volume
45 | - Create snapshot
46 | - Delete snapshot
47 | - Create volume from snapshot
48 | - Connect volume
49 | - Disconnect volume
50 | - Local attach
51 | - Local detach
52 | - Validate connector
53 | - Extra Specs for specific backend functionality.
54 | - Backend QoS
55 | - Multi-pool support
56 |
57 | * Metadata persistence plugins:
58 |
59 | - Stateless: Caller stores JSON serialization.
60 | - Database: Metadata is stored in a database: MySQL, PostgreSQL, SQLite...
61 | - Custom plugin: Caller provides module to store Metadata and cinderlib calls
62 | it when necessary.
63 |
64 | Example
65 | -------
66 |
67 | The following code extract is a simple example to illustrate how cinderlib
68 | works. The code will use the LVM backend to create a volume, attach it to the
69 | local host via iSCSI, and finally snapshot it:
70 |
71 | .. code-block:: python
72 |
73 | import cinderlib as cl
74 |
75 | # Initialize the LVM driver
76 | lvm = cl.Backend(volume_driver='cinder.volume.drivers.lvm.LVMVolumeDriver',
77 | volume_group='cinder-volumes',
78 | target_protocol='iscsi',
79 | target_helper='lioadm',
80 | volume_backend_name='lvm_iscsi')
81 |
82 | # Create a 1GB volume
83 | vol = lvm.create_volume(1, name='lvm-vol')
84 |
85 | # Export, initialize, and do a local attach of the volume
86 | attach = vol.attach()
87 |
88 | print('Volume %s attached to %s' % (vol.id, attach.path))
89 |
90 | # Snapshot it
91 | snap = vol.create_snapshot('lvm-snap')
92 |
93 | Table of Contents
94 | -----------------
95 |
96 | .. toctree::
97 | :maxdepth: 2
98 |
99 | installation
100 | usage
101 | contributing
102 | limitations
103 |
--------------------------------------------------------------------------------
/doc/source/installation.rst:
--------------------------------------------------------------------------------
1 | .. highlight:: shell
2 |
3 | ============
4 | Installation
5 | ============
6 |
7 | The Cinder Library is an interfacing library that doesn't have any storage
8 | driver code, so it expects Cinder drivers to be installed in the system to run
9 | properly.
10 |
11 | We can use the latest stable release or the latest code from master branch.
12 |
13 |
14 | Stable release
15 | --------------
16 |
17 | Drivers
18 | _______
19 |
20 | For Red Hat distributions the recommendation is to use RPMs to install the
21 | Cinder drivers instead of using `pip`. If we don't have access to the
22 | `Red Hat OpenStack Platform packages
23 | `_
24 | we can use the `RDO community packages `_.
25 |
26 | On CentOS, the Extras repository provides the RPM that enables the OpenStack
27 | repository. Extras is enabled by default on CentOS 7, so you can simply install
28 | the RPM to set up the OpenStack repository:
29 |
30 | .. code-block:: console
31 |
32 | # yum install -y centos-release-openstack-rocky
33 | # yum install -y openstack-cinder
34 |
35 | On RHEL and Fedora, you'll need to download and install the RDO repository RPM
36 | to set up the OpenStack repository:
37 |
38 | .. code-block:: console
39 |
40 | # yum install -y https://www.rdoproject.org/repos/rdo-release.rpm
41 | # yum install -y openstack-cinder
42 |
43 |
44 | We can also install directly from source on the system or a virtual environment:
45 |
46 | .. code-block:: console
47 |
48 | $ virtualenv venv
49 | $ source venv/bin/activate
50 | (venv) $ pip install git+git://github.com/openstack/cinder.git@stable/rocky
51 |
52 | Library
53 | _______
54 |
55 | To install Cinder Library we'll use PyPI, so we'll make sure to have the `pip`_
56 | command available:
57 |
58 | .. code-block:: console
59 |
60 | # yum install -y python-pip
61 | # pip install cinderlib
62 |
63 | This is the preferred method to install Cinder Library, as it will always
64 | install the most recent stable release.
65 |
66 | If you don't have `pip`_ installed, this `Python installation guide`_ can guide
67 | you through the process.
68 |
69 | .. _pip: https://pip.pypa.io
70 | .. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/
71 |
72 |
73 | Latest code
74 | -----------
75 |
76 | Drivers
77 | _______
78 |
79 | If we don't have a packaged version or if we want to use a virtual environment
80 | we can install the drivers from source:
81 |
82 | .. code-block:: console
83 |
84 | $ virtualenv cinder
85 | $ source cinder/bin/activate
86 | $ pip install git+git://github.com/openstack/cinder.git
87 |
88 | Library
89 | _______
90 |
91 | The sources for Cinder Library can be downloaded from the `Github repo`_ to use
92 | the latest version of the library.
93 |
94 | You can either clone the public repository:
95 |
96 | .. code-block:: console
97 |
98 | $ git clone git://github.com/akrog/cinderlib
99 |
100 | Or download the `tarball`_:
101 |
102 | .. code-block:: console
103 |
104 | $ curl -OL https://github.com/akrog/cinderlib/tarball/master
105 |
106 | Once you have a copy of the source, you can install it with:
107 |
108 | .. code-block:: console
109 |
110 | $ virtualenv cinder
111 | $ python setup.py install
112 |
113 | .. _Github repo: https://github.com/openstack/cinderlib
114 | .. _tarball: https://github.com/openstack/cinderlib/tarball/master
115 |
--------------------------------------------------------------------------------
/doc/source/limitations.rst:
--------------------------------------------------------------------------------
1 | Limitations
2 | -----------
3 |
4 | Cinderlib works around a number of issues that were preventing the usage of the
5 | drivers by other Python applications, some of these are:
6 |
7 | - *Oslo config* configuration loading.
8 | - Cinder-volume dynamic configuration loading.
9 | - Privileged helper service.
10 | - DLM configuration.
11 | - Disabling of cinder logging.
12 | - Direct DB access within drivers.
13 | - *Oslo Versioned Objects* DB access methods such as `refresh` and `save`.
14 | - Circular references in *Oslo Versioned Objects* for serialization.
15 | - Using multiple drivers in the same process.
16 |
17 | Being in its early development stages, the library is in no way close to the
18 | robustness or feature richness that the Cinder project provides. Some of the
19 | more noticeable limitations one should be aware of are:
20 |
21 | - Most methods don't perform argument validation so it's a classic GIGO_
22 | library.
23 |
24 | - The logic has been kept to a minimum and higher functioning logic is expected
25 | to be handled by the caller: Quotas, tenant control, migration, etc.
26 |
27 | - Limited test coverage.
28 |
29 | - Only a subset of Cinder available operations are supported by the library.
30 |
31 | Besides *cinderlib's* own limitations the library also inherits some from
32 | *Cinder's* code and will be bound by the same restrictions and behaviors of the
33 | drivers as if they were running under the standard *Cinder* services. The most
34 | notorious ones are:
35 |
36 | - Dependency on the *eventlet* library.
37 |
38 | - Behavior inconsistency on some operations across drivers. For example you
39 | can find drivers where cloning is a cheap operation performed by the storage
40 | array whereas other will actually create a new volume, attach the source and
41 | new volume and perform a full copy of the data.
42 |
43 | - External dependencies must be handled manually. So users will have to take
44 | care of any library, package, or CLI tool that is required by the driver.
45 |
46 | - Relies on command execution via *sudo* for attach/detach operations as well
47 | as some CLI tools.
48 |
49 | .. _GIGO: https://en.wikipedia.org/wiki/Garbage_in,_garbage_out
50 |
--------------------------------------------------------------------------------
/doc/source/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | REM Command file for Sphinx documentation
4 |
5 | if "%SPHINXBUILD%" == "" (
6 | set SPHINXBUILD=sphinx-build
7 | )
8 | set BUILDDIR=_build
9 | set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
10 | set I18NSPHINXOPTS=%SPHINXOPTS% .
11 | if NOT "%PAPER%" == "" (
12 | set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
13 | set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
14 | )
15 |
16 | if "%1" == "" goto help
17 |
18 | if "%1" == "help" (
19 | :help
20 | echo.Please use `make ^` where ^ is one of
21 | echo. html to make standalone HTML files
22 | echo. dirhtml to make HTML files named index.html in directories
23 | echo. singlehtml to make a single large HTML file
24 | echo. pickle to make pickle files
25 | echo. json to make JSON files
26 | echo. htmlhelp to make HTML files and a HTML help project
27 | echo. qthelp to make HTML files and a qthelp project
28 | echo. devhelp to make HTML files and a Devhelp project
29 | echo. epub to make an epub
30 | echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
31 | echo. text to make text files
32 | echo. man to make manual pages
33 | echo. texinfo to make Texinfo files
34 | echo. gettext to make PO message catalogs
35 | echo. changes to make an overview over all changed/added/deprecated items
36 | echo. xml to make Docutils-native XML files
37 | echo. pseudoxml to make pseudoxml-XML files for display purposes
38 | echo. linkcheck to check all external links for integrity
39 | echo. doctest to run all doctests embedded in the documentation if enabled
40 | goto end
41 | )
42 |
43 | if "%1" == "clean" (
44 | for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
45 | del /q /s %BUILDDIR%\*
46 | goto end
47 | )
48 |
49 |
50 | %SPHINXBUILD% 2> nul
51 | if errorlevel 9009 (
52 | echo.
53 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
54 | echo.installed, then set the SPHINXBUILD environment variable to point
55 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
56 | echo.may add the Sphinx directory to PATH.
57 | echo.
58 | echo.If you don't have Sphinx installed, grab it from
59 | echo.http://sphinx-doc.org/
60 | exit /b 1
61 | )
62 |
63 | if "%1" == "html" (
64 | %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
65 | if errorlevel 1 exit /b 1
66 | echo.
67 | echo.Build finished. The HTML pages are in %BUILDDIR%/html.
68 | goto end
69 | )
70 |
71 | if "%1" == "dirhtml" (
72 | %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
73 | if errorlevel 1 exit /b 1
74 | echo.
75 | echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
76 | goto end
77 | )
78 |
79 | if "%1" == "singlehtml" (
80 | %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
81 | if errorlevel 1 exit /b 1
82 | echo.
83 | echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
84 | goto end
85 | )
86 |
87 | if "%1" == "pickle" (
88 | %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
89 | if errorlevel 1 exit /b 1
90 | echo.
91 | echo.Build finished; now you can process the pickle files.
92 | goto end
93 | )
94 |
95 | if "%1" == "json" (
96 | %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
97 | if errorlevel 1 exit /b 1
98 | echo.
99 | echo.Build finished; now you can process the JSON files.
100 | goto end
101 | )
102 |
103 | if "%1" == "htmlhelp" (
104 | %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
105 | if errorlevel 1 exit /b 1
106 | echo.
107 | echo.Build finished; now you can run HTML Help Workshop with the ^
108 | .hhp project file in %BUILDDIR%/htmlhelp.
109 | goto end
110 | )
111 |
112 | if "%1" == "qthelp" (
113 | %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
114 | if errorlevel 1 exit /b 1
115 | echo.
116 | echo.Build finished; now you can run "qcollectiongenerator" with the ^
117 | .qhcp project file in %BUILDDIR%/qthelp, like this:
118 | echo.^> qcollectiongenerator %BUILDDIR%\qthelp\cinderlib.qhcp
119 | echo.To view the help file:
120 | echo.^> assistant -collectionFile %BUILDDIR%\qthelp\cinderlib.ghc
121 | goto end
122 | )
123 |
124 | if "%1" == "devhelp" (
125 | %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
126 | if errorlevel 1 exit /b 1
127 | echo.
128 | echo.Build finished.
129 | goto end
130 | )
131 |
132 | if "%1" == "epub" (
133 | %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
134 | if errorlevel 1 exit /b 1
135 | echo.
136 | echo.Build finished. The epub file is in %BUILDDIR%/epub.
137 | goto end
138 | )
139 |
140 | if "%1" == "latex" (
141 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
142 | if errorlevel 1 exit /b 1
143 | echo.
144 | echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
145 | goto end
146 | )
147 |
148 | if "%1" == "latexpdf" (
149 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
150 | cd %BUILDDIR%/latex
151 | make all-pdf
152 | cd %BUILDDIR%/..
153 | echo.
154 | echo.Build finished; the PDF files are in %BUILDDIR%/latex.
155 | goto end
156 | )
157 |
158 | if "%1" == "latexpdfja" (
159 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
160 | cd %BUILDDIR%/latex
161 | make all-pdf-ja
162 | cd %BUILDDIR%/..
163 | echo.
164 | echo.Build finished; the PDF files are in %BUILDDIR%/latex.
165 | goto end
166 | )
167 |
168 | if "%1" == "text" (
169 | %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
170 | if errorlevel 1 exit /b 1
171 | echo.
172 | echo.Build finished. The text files are in %BUILDDIR%/text.
173 | goto end
174 | )
175 |
176 | if "%1" == "man" (
177 | %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
178 | if errorlevel 1 exit /b 1
179 | echo.
180 | echo.Build finished. The manual pages are in %BUILDDIR%/man.
181 | goto end
182 | )
183 |
184 | if "%1" == "texinfo" (
185 | %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
186 | if errorlevel 1 exit /b 1
187 | echo.
188 | echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
189 | goto end
190 | )
191 |
192 | if "%1" == "gettext" (
193 | %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
194 | if errorlevel 1 exit /b 1
195 | echo.
196 | echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
197 | goto end
198 | )
199 |
200 | if "%1" == "changes" (
201 | %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
202 | if errorlevel 1 exit /b 1
203 | echo.
204 | echo.The overview file is in %BUILDDIR%/changes.
205 | goto end
206 | )
207 |
208 | if "%1" == "linkcheck" (
209 | %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
210 | if errorlevel 1 exit /b 1
211 | echo.
212 | echo.Link check complete; look for any errors in the above output ^
213 | or in %BUILDDIR%/linkcheck/output.txt.
214 | goto end
215 | )
216 |
217 | if "%1" == "doctest" (
218 | %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
219 | if errorlevel 1 exit /b 1
220 | echo.
221 | echo.Testing of doctests in the sources finished, look at the ^
222 | results in %BUILDDIR%/doctest/output.txt.
223 | goto end
224 | )
225 |
226 | if "%1" == "xml" (
227 | %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml
228 | if errorlevel 1 exit /b 1
229 | echo.
230 | echo.Build finished. The XML files are in %BUILDDIR%/xml.
231 | goto end
232 | )
233 |
234 | if "%1" == "pseudoxml" (
235 | %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml
236 | if errorlevel 1 exit /b 1
237 | echo.
238 | echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml.
239 | goto end
240 | )
241 |
242 | :end
243 |
--------------------------------------------------------------------------------
/doc/source/topics/initialization.rst:
--------------------------------------------------------------------------------
1 | ==============
2 | Initialization
3 | ==============
4 |
5 | The cinderlib itself doesn't require an initialization, as it tries to provide
6 | sensible settings, but in some cases we may want to modify these defaults to
7 | fit a specific desired behavior and the library provides a mechanism to support
8 | this.
9 |
10 | Library initialization should be done before making any other library call,
11 | including *Backend* initialization and loading serialized data, if we try to do
12 | it after other calls the library will raise an `Exception`.
13 |
14 | Provided *setup* method is `cinderlib.Backend.global_setup`, but for
15 | convenience the library provides a reference to this class method in
16 | `cinderlib.setup`
17 |
18 | The method definition is as follows:
19 |
20 | .. code-block:: python
21 |
22 | @classmethod
23 | def global_setup(cls, file_locks_path=None, root_helper='sudo',
24 | suppress_requests_ssl_warnings=True, disable_logs=True,
25 | non_uuid_ids=False, output_all_backend_info=False,
26 | project_id=None, user_id=None, persistence_config=None,
27 | fail_on_missing_backend=True, host=None,
28 | **cinder_config_params):
29 |
30 | The meaning of the library's configuration options are:
31 |
32 | file_locks_path
33 | ---------------
34 |
35 | Cinder is a complex system that can support Active-Active deployments, and each
36 | driver and storage backend has different restrictions, so in order to
37 | facilitate mutual exclusion it provides 3 different types of locks depending
38 | on the scope the driver requires:
39 |
40 | - Between threads of the same process.
41 | - Between different processes on the same host.
42 | - In all the OpenStack deployment.
43 |
44 | Cinderlib doesn't currently support the third type of locks, but that should
45 | not be an inconvenience for most cinderlib usage.
46 |
47 | Cinder uses file locks for the between process locking and cinderlib uses that
48 | same kind of locking for the third type of locks, which is also what Cinder
49 | uses when not deployed in an Active-Active fashion.
50 |
51 | Parameter defaults to `None`, which will use the path indicated by the
52 | `state_path` configuration option. It defaults to the current directory.
53 |
54 | root_helper
55 | -----------
56 |
57 | There are some operations in *Cinder* drivers that require `sudo` privileges,
58 | this could be because they are running Python code that requires it or because
59 | they are running a command with `sudo`.
60 |
61 | Attaching and detaching operations with *cinderlib* will also require `sudo`
62 | privileges.
63 |
64 | This configuration option allows us to define a custom root helper or disabling
65 | all `sudo` operations passing an empty string when we know we don't require
66 | them and we are running the process with a non passwordless `sudo` user.
67 |
68 | Defaults to `sudo`.
69 |
70 | suppress_requests_ssl_warnings
71 | ------------------------------
72 |
73 | Controls the suppression of the *requests* library SSL certificate warnings.
74 |
75 | Defaults to `True`.
76 |
77 | non_uuid_ids
78 | ------------
79 |
80 | As mentioned in the :doc:`volumes` section we can provide resource IDs manually
81 | at creation time, and some drivers even support non UUID identificators, but
82 | since that's not a given validation will reject any non UUID value.
83 |
84 | This configuration option allows us to disable the validation on the IDs, at
85 | the user's risk.
86 |
87 | Defaults to `False`.
88 |
89 | output_all_backend_info
90 | -----------------------
91 |
92 | Whether to include the *Backend* configuration when serializing objects.
93 | Detailed information can be found in the :doc:`serialization` section.
94 |
95 | Defaults to `False`.
96 |
97 | disable_logs
98 | ------------
99 |
100 | *Cinder* drivers are meant to be run within a full blown service, so they can
101 | be quite verbose in terms of logging, that's why *cinderlib* disables it by
102 | default.
103 |
104 | Defaults to `True`.
105 |
106 | project_id
107 | ----------
108 |
109 | *Cinder* is a multi-tenant service, and when resources are created they belong
110 | to a specific tenant/project. With this parameter we can define, using a
111 | string, an identifier for our project that will be assigned to the resources we
112 | create.
113 |
114 | Defaults to `cinderlib`.
115 |
116 | user_id
117 | -------
118 |
119 | Within each project/tenant the *Cinder* project supports multiple users, so
120 | when it creates a resource a reference to the user that created it is stored
121 | in the resource. Using this this parameter we can define, using a string, an
122 | identifier for the user of cinderlib to be recorded in the resources.
123 |
124 | Defaults to `cinderlib`.
125 |
126 | persistence_config
127 | ------------------
128 |
129 | *Cinderlib* operation requires data persistence, which is achieved with a
130 | metadata persistence plugin mechanism.
131 |
132 | The project includes 2 types of plugins providing 3 different persistence
133 | solutions and more can be used via Python modules and passing custom plugins in
134 | this parameter.
135 |
136 | Users of the *cinderlib* library must decide which plugin best fits their needs
137 | and pass the appropriate configuration in a dictionary as the
138 | `persistence_config` parameter.
139 |
140 | The parameter is optional, and defaults to the `memory` plugin, but if it's
141 | passed it must always include the `storage` key specifying the plugin to be
142 | used. All other key-value pairs must be valid parameters for the specific
143 | plugin.
144 |
145 | Value for the `storage` key can be a string identifying a plugin registered
146 | using Python entrypoints, an instance of a class inheriting from
147 | `PersistenceDriverBase`, or a `PersistenceDriverBase` class.
148 |
149 | Information regarding available plugins, their description and parameters, and
150 | different ways to initialize the persistence can be found in the
151 | :doc:`metadata` section.
152 |
153 | fail_on_missing_backend
154 | -----------------------
155 |
156 | To facilitate operations on resources, *Cinderlib* stores a reference to the
157 | instance of the *backend* in most of the in-memory objects.
158 |
159 | When deserializing or retrieving objects from the metadata persistence storage
160 | *cinderlib* tries to properly set this *backend* instance based on the
161 | *backends* currently in memory.
162 |
163 | Trying to load an object without having instantiated the *backend* will result
164 | in an error, unless we define `fail_on_missing_backend` to `False` on
165 | initialization.
166 |
167 | This is useful if we are sharing the metadata persistence storage and we want
168 | to load a volume that is already connected to do just the attachment.
169 |
170 | host
171 | ----
172 |
173 | Host configuration option used for all volumes created by this cinderlib
174 | execution.
175 |
176 | On cinderlib volumes are selected based on the backend name, not on the
177 | host@backend combination like cinder does. Therefore backend names must be
178 | unique across all cinderlib applications that are using the same persistence
179 | storage backend.
180 |
181 | A second application running cinderlib with a different host value will have
182 | access to the same resources if it uses the same backend name.
183 |
184 | Defaults to the host's hostname.
185 |
186 | Other keyword arguments
187 | -----------------------
188 |
189 | Any other keyword argument passed to the initialization method will be
190 | considered a *Cinder* configuration option in the `[DEFAULT]` section.
191 |
192 | This can be useful to set additional logging configuration like debug log
193 | level, the `state_path` used by default in many option, or other options like
194 | the `ssh_hosts_key_file` required by drivers that use SSH.
195 |
196 | For a list of the possible configuration options one should look into the
197 | *Cinder* project's documentation.
198 |
--------------------------------------------------------------------------------
/doc/source/topics/metadata.rst:
--------------------------------------------------------------------------------
1 | ====================
2 | Metadata Persistence
3 | ====================
4 |
5 | *Cinder* drivers are not stateless, and the interface between the *Cinder* core
6 | code and the drivers allows them to return data that can be stored in the
7 | database. Some drivers, that have not been updated, are even accessing the
8 | database directly.
9 |
10 | Because *cinderlib* uses the *Cinder* drivers as they are, it cannot be
11 | stateless either.
12 |
13 | Originally *cinderlib* stored all the required metadata in RAM, and passed the
14 | responsibility of persisting this information to the user of the library.
15 |
16 | Library users would create or modify resources using *cinderlib*, and then
17 | serialize the resources and manage the storage of this information themselves.
18 | This allowed referencing those resources after exiting the application and in
19 | case of a crash.
20 |
21 | This solution would result in code duplication across projects, as many library
22 | users would end up using the same storage types for the serialized data.
23 | That's when the metadata persistence plugin was introduced in the code.
24 |
25 | With the metadata plugin mechanism we can have plugins for different storages
26 | and they can be shared between different projects.
27 |
28 | *Cinderlib* includes 2 types of plugins providing 3 different persistence
29 | solutions:
30 |
31 | - Memory (the default)
32 | - Database
33 | - Database in memory
34 |
35 | Using the memory mechanisms users can still use the JSON serialization
36 | mechanism to store the medatada.
37 |
38 | Currently we have memory and database plugins. Users can store the data
39 | wherever they want using the JSON serialization mechanism or with a custom
40 | metadata plugin.
41 |
42 | Persistence mechanism must be configured before initializing any *Backend*
43 | using the `persistence_config` parameter in the `setup` or `global_setup`
44 | methods.
45 |
46 | .. note:: When deserializing data using the `load` method on memory based
47 | storage we will not be making this data available using the *Backend* unless
48 | we pass `save=True` on the `load` call.
49 |
50 |
51 | Memory plugin
52 | -------------
53 |
54 | The memory plugin is the fastest one, but it's has its drawbacks. It doesn't
55 | provide persistence across application restarts and it's more likely to have
56 | issues than the database plugin.
57 |
58 | Even though it's more likely to present issues with some untested drivers, it
59 | is still the default plugin, because it's the plugin that exposes the raw
60 | plugin mechanism and will expose any incompatibility issues with external
61 | plugins in *Cinder* drivers.
62 |
63 | This plugin is identified with the name `memory`, and here we can see a simple
64 | example of how to save everything to the database:
65 |
66 | .. code-block:: python
67 |
68 | import cinderlib as cl
69 |
70 | cl.setup(persistence_config={'storage': 'memory'})
71 |
72 | lvm = cl.Backend(volume_driver='cinder.volume.drivers.lvm.LVMVolumeDriver',
73 | volume_group='cinder-volumes',
74 | target_protocol='iscsi',
75 | target_helper='lioadm',
76 | volume_backend_name='lvm_iscsi')
77 | vol = lvm.create_volume(1)
78 |
79 | with open('lvm.txt', 'w') as f:
80 | f.write(lvm.dumps)
81 |
82 | And how to load it back:
83 |
84 | .. code-block:: python
85 |
86 | import cinderlib as cl
87 |
88 | cl.setup(persistence_config={'storage': 'memory'})
89 |
90 | lvm = cl.Backend(volume_driver='cinder.volume.drivers.lvm.LVMVolumeDriver',
91 | volume_group='cinder-volumes',
92 | target_protocol='iscsi',
93 | target_helper='lioadm',
94 | volume_backend_name='lvm_iscsi')
95 |
96 | with open('cinderlib.txt', 'r') as f:
97 | data = f.read()
98 | backends = cl.load(data, save=True)
99 | print backends[0].volumes
100 |
101 |
102 | Database plugin
103 | ---------------
104 |
105 | This metadata plugin is the most likely to be compatible with any *Cinder*
106 | driver, as its built on top of *Cinder's* actual database layer.
107 |
108 | This plugin includes 2 storage options: memory and real database. They are
109 | identified with the storage identifiers `memory_db` and `db` respectively.
110 |
111 | The memory option will store the data as an in memory SQLite database. This
112 | option helps debugging issues on untested drivers. If a driver works with the
113 | memory database plugin, but doesn't with the `memory` one, then the issue is
114 | most likely caused by the driver accessing the database. Accessing the
115 | database could be happening directly importing the database layer, or
116 | indirectly using versioned objects.
117 |
118 | The memory database doesn't require any additional configuration, but when
119 | using a real database we must pass the connection information using `SQLAlchemy
120 | database URLs format`_ as the value of the `connection` key.
121 |
122 | .. code-block:: python
123 |
124 | import cinderlib as cl
125 |
126 | persistence_config = {'storage': 'db', 'connection': 'sqlite:///cl.sqlite'}
127 | cl.setup(persistence_config=persistence_config)
128 |
129 | lvm = cl.Backend(volume_driver='cinder.volume.drivers.lvm.LVMVolumeDriver',
130 | volume_group='cinder-volumes',
131 | target_protocol='iscsi',
132 | target_helper='lioadm',
133 | volume_backend_name='lvm_iscsi')
134 | vol = lvm.create_volume(1)
135 |
136 | Using it later is exactly the same:
137 |
138 | .. code-block:: python
139 |
140 | import cinderlib as cl
141 |
142 | persistence_config = {'storage': 'db', 'connection': 'sqlite:///cl.sqlite'}
143 | cl.setup(persistence_config=persistence_config)
144 |
145 | lvm = cl.Backend(volume_driver='cinder.volume.drivers.lvm.LVMVolumeDriver',
146 | volume_group='cinder-volumes',
147 | target_protocol='iscsi',
148 | target_helper='lioadm',
149 | volume_backend_name='lvm_iscsi')
150 |
151 | print lvm.volumes
152 |
153 |
154 | Custom plugins
155 | --------------
156 |
157 | The plugin mechanism uses Python entrypoints to identify plugins present in the
158 | system. So any module exposing the `cinderlib.persistence.storage` entrypoint
159 | will be recognized as a *cinderlib* metadata persistence plugin.
160 |
161 | As an example, the definition in `setup.py` of the entrypoints for the plugins
162 | included in *cinderlib* is:
163 |
164 | .. code-block:: python
165 |
166 | entry_points={
167 | 'cinderlib.persistence.storage': [
168 | 'memory = cinderlib.persistence.memory:MemoryPersistence',
169 | 'db = cinderlib.persistence.dbms:DBPersistence',
170 | 'memory_db = cinderlib.persistence.dbms:MemoryDBPersistence',
171 | ],
172 | },
173 |
174 | But there may be cases were we don't want to create entry points available
175 | system wide, and we want an application only plugin mechanism. For this
176 | purpose *cinderlib* supports passing a plugin instance or class as the value of
177 | the `storage` key in the `persistence_config` parameters.
178 |
179 | The instance and class must inherit from the `PersistenceDriverBase` in
180 | `cinderlib/persistence/base.py` and implement all the following methods:
181 |
182 | - `db`
183 | - `get_volumes`
184 | - `get_snapshots`
185 | - `get_connections`
186 | - `get_key_values`
187 | - `set_volume`
188 | - `set_snapshot`
189 | - `set_connection`
190 | - `set_key_value`
191 | - `delete_volume`
192 | - `delete_snapshot`
193 | - `delete_connection`
194 | - `delete_key_value`
195 |
196 | And the `__init__` method is usually needed as well, and it will receive as
197 | keyword arguments the parameters provided in the `persistence_config`. The
198 | `storage` key-value pair is not included as part of the keyword parameters.
199 |
200 | The invocation with a class plugin would look something like this:
201 |
202 |
203 | .. code-block:: python
204 |
205 | import cinderlib as cl
206 | from cinderlib.persistence import base
207 |
208 | class MyPlugin(base.PersistenceDriverBase):
209 | def __init__(self, location, user, password):
210 | ...
211 |
212 | persistence_config = {'storage': MyPlugin, 'location': '127.0.0.1',
213 | 'user': 'admin', 'password': 'nomoresecrets'}
214 | cl.setup(persistence_config=persistence_config)
215 |
216 | lvm = cl.Backend(volume_driver='cinder.volume.drivers.lvm.LVMVolumeDriver',
217 | volume_group='cinder-volumes',
218 | target_protocol='iscsi',
219 | target_helper='lioadm',
220 | volume_backend_name='lvm_iscsi')
221 |
222 |
223 | Migrating storage
224 | -----------------
225 |
226 | Metadata is crucial for the proper operation of *cinderlib*, as the *Cinder*
227 | drivers cannot retrieve this information from the storage backend.
228 |
229 | There may be cases where we want to stop using a metadata plugin and start
230 | using another one, but we have metadata on the old plugin, so we need to
231 | migrate this information from one backend to another.
232 |
233 | To achieve a metadata migration we can use methods `refresh`, `dump`, `load`,
234 | and `set_persistence`.
235 |
236 | An example code of how to migrate from SQLite to MySQL could look like this:
237 |
238 | .. code-block:: python
239 |
240 | import cinderlib as cl
241 |
242 | # Setup the source persistence plugin
243 | persistence_config = {'storage': 'db',
244 | 'connection': 'sqlite:///cinderlib.sqlite'}
245 | cl.setup(persistence_config=persistence_config)
246 |
247 | # Setup backends we want to migrate
248 | lvm = cl.Backend(volume_driver='cinder.volume.drivers.lvm.LVMVolumeDriver',
249 | volume_group='cinder-volumes',
250 | target_protocol='iscsi',
251 | target_helper='lioadm',
252 | volume_backend_name='lvm_iscsi')
253 |
254 | # Get all the data into memory
255 | data = cl.dump()
256 |
257 | # Setup new persistence plugin
258 | new_config = {
259 | 'storage': 'db',
260 | 'connection': 'mysql+pymysql://user:password@IP/cinder?charset=utf8'
261 | }
262 | cl.Backend.set_persistence(new_config)
263 |
264 | # Load and save the data into the new plugin
265 | backends = cl.load(data, save=True)
266 |
267 |
268 | .. _SQLAlchemy database URLs format: http://docs.sqlalchemy.org/en/latest/core/engines.html#database-urls
269 |
--------------------------------------------------------------------------------
/doc/source/topics/serialization.rst:
--------------------------------------------------------------------------------
1 | =============
2 | Serialization
3 | =============
4 |
5 | A *Cinder* driver is stateless on itself, but it still requires the right data
6 | to work, and that's why the cinder-volume service takes care of storing the
7 | state in the DB. This means that *cinderlib* will have to simulate the DB for
8 | the drivers, as some operations actually return additional data that needs to
9 | be kept and provided in any future operation.
10 |
11 | Originally *cinderlib* stored all the required metadata in RAM, and passed the
12 | responsibility of persisting this information to the user of the library.
13 |
14 | Library users would create or modify resources using *cinderlib*, and then
15 | would have to serialize the resources and manage the storage of this
16 | information. This allowed referencing those resources after exiting the
17 | application and in case of a crash.
18 |
19 | Now we support :doc:`metadata` plugins, but there are still cases were we'll
20 | want to serialize the data:
21 |
22 | - When logging or debugging resources.
23 | - When using a metadata plugin that stores the data in memory.
24 | - Over the wire transmission of the connection information to attach a volume
25 | on a remote nodattach a volume on a remote node.
26 |
27 | We have multiple methods to satisfy these needs, to serialize the data (`json`,
28 | `jsons`, `dump`, `dumps`), to deserialize it (`load`), and to convert to a user
29 | friendly object (`to_dict`).
30 |
31 | To JSON
32 | -------
33 |
34 | We can get a JSON representation of any *cinderlib* object - *Backend*,
35 | *Volume*, *Snapshot*, and *Connection* - using their following properties:
36 |
37 | - `json`: Returns a JSON representation of the current object information as a
38 | Python dictionary. Lazy loadable objects that have not been loaded will not
39 | be present in the resulting dictionary.
40 |
41 | - `jsons`: Returns a string with the JSON representation. It's the equivalent
42 | of converting to a string the dictionary from the `json` property.
43 |
44 | - `dump`: Identical to the `json` property with the exception that it ensures
45 | all lazy loadable attributes have been loaded. If an attribute had already
46 | been loaded its contents will not be refreshed.
47 |
48 | - `dumps`: Returns a string with the JSON representation of the fully loaded
49 | object. It's the equivalent of converting to a string the dictionary from
50 | the `dump` property.
51 |
52 | Besides these resource specific properties, we also have their equivalent
53 | methods at the library level that will operate on all the *Backends* present in
54 | the application.
55 |
56 | .. attention:: On the objects, these are properties (`volume.dumps`), but on
57 | the library, these are methods (`cinderlib.dumps()`).
58 |
59 | .. note::
60 |
61 | We don't have to worry about circular references, such as a *Volume* with a
62 | *Snapshot* that has a reference to its source *Volume*, since *cinderlib*
63 | is prepared to handle them.
64 |
65 | To demonstrate the serialization in *cinderlib* we can look at an easy way to
66 | save all the *Backends'* resources information from an application that uses
67 | *cinderlib* with the metadata stored in memory:
68 |
69 | .. code-block:: python
70 |
71 | with open('cinderlib.txt', 'w') as f:
72 | f.write(cinderlib.dumps())
73 |
74 | In a similar way we can also store a single *Backend* or a single *Volume*:
75 |
76 | .. code-block:: python
77 |
78 | vol = lvm.create_volume(size=1)
79 |
80 | with open('lvm.txt', 'w') as f:
81 | f.write(lvm.dumps)
82 |
83 | with open('vol.txt', 'w') as f:
84 | f.write(vol.dumps)
85 |
86 | We must remember that `dump` and `dumps` triggers loading of properties that
87 | are not already loaded. Any lazy loadable property that was already loaded
88 | will not be updated. A good way to ensure we are using the latest data is to
89 | trigger a `refresh` on the backends before doing the `dump` or `dumps`.
90 |
91 | .. code-block:: python
92 |
93 | for backend in cinderlib.Backend.backends:
94 | backend.refresh()
95 |
96 | with open('cinderlib.txt', 'w') as f:
97 | f.write(cinderlib.dumps())
98 |
99 | When serializing *cinderlib* resources we'll get all the data currently
100 | present. This means that when serializing a volume that is attached and has
101 | snapshots we'll get them all serialized.
102 |
103 | There are some cases where we don't want this, such as when implementing a
104 | persistence metadata plugin. We should use the `to_json` and `to_jsons`
105 | methods for such cases, as they will return a simplified serialization of the
106 | resource containing only the data from the resource itself.
107 |
108 | From JSON
109 | ---------
110 |
111 | Just like we had the `json`, `jsons`, `dump`, and `dumps` methods in all the
112 | *cinderlib* objects to serialize data, we also have the `load` method to
113 | deserialize this data back and recreate a *cinderlib* internal representation
114 | from JSON, be it stored in a Python string or a Python dictionary.
115 |
116 | The `load` method is present in *Backend*, *Volume*, *Snapshot*, and
117 | *Connection* classes as well as in the library itself. The resource specific
118 | `load` class method is the exact counterpart of the serialization methods, and
119 | it will deserialize the specific resource from the class its being called from.
120 |
121 | The library's `load` method is capable of loading anything we have serialized.
122 | Not only can it load the full list of *Backends* with their resources, but it
123 | can also load individual resources. This makes it the recommended way to
124 | deserialize any data in *cinderlib*. By default, serialization and the
125 | metadata storage are disconnected, so loading serialized data will not ensure
126 | that the data is present in the persistence storage. We can ensure that
127 | deserialized data is present in the persistence storage passing `save=True` to
128 | the loading method.
129 |
130 | Considering the files we created in the earlier examples we can easily load our
131 | whole configuration with:
132 |
133 | .. code-block:: python
134 |
135 | # We must have initialized the Backends before reaching this point
136 |
137 | with open('cinderlib.txt', 'r') as f:
138 | data = f.read()
139 | backends = cinderlib.load(data, save=True)
140 |
141 | And for a specific backend or an individual volume:
142 |
143 | .. code-block:: python
144 |
145 | # We must have initialized the Backends before reaching this point
146 |
147 | with open('lvm.txt', 'r') as f:
148 | data = f.read()
149 | lvm = cinderlib.load(data, save=True)
150 |
151 | with open('vol.txt', 'r') as f:
152 | data = f.read()
153 | vol = cinderlib.load(data)
154 |
155 | This is the preferred way to deserialize objects, but we could also use the
156 | specific object's `load` method.
157 |
158 | .. code-block:: python
159 |
160 | # We must have initialized the Backends before reaching this point
161 |
162 | with open('lvm.txt', 'r') as f:
163 | data = f.read()
164 | lvm = cinderlib.Backend.load(data)
165 |
166 | with open('vol.txt', 'r') as f:
167 | data = f.read()
168 | vol = cinderlib.Volume.load(data)
169 |
170 | To dict
171 | -------
172 |
173 | Serialization properties and methos presented earlier are meant to store all
174 | the data and allow reuse of that data when using drivers of different releases.
175 | So it will include all required information to be backward compatible when
176 | moving from release N *Cinder* drivers to release N+1 drivers.
177 |
178 | There will be times when we'll just want to have a nice dictionary
179 | representation of a resource, be it to log it, to display it while debugging,
180 | or to send it from our controller application to the node where we are going to
181 | be doing the attachment. For these specific cases all resources, except the
182 | *Backend* have a `to_dict` method (not property this time) that will only
183 | return the relevant data from the resources.
184 |
185 |
186 | Backend configuration
187 | ---------------------
188 |
189 | When *cinderlib* serializes any object it also stores the *Backend* this object
190 | belongs to. For security reasons it only stores the identifier of the backend
191 | by default, which is the `volume_backend_name`. Since we are only storing a
192 | reference to the *Backend*, this means that when we are going through the
193 | deserialization process the *Backend* the object belonged to must already be
194 | present in *cinderlib*.
195 |
196 | This should be OK for most *cinderlib* usages, since it's common practice to
197 | store the storage backend connection information (credentials, addresses, etc.)
198 | in a different location than the data; but there may be situations (for example
199 | while testing) where we'll want to store everything in the same file, not only
200 | the *cinderlib* representation of all the storage resources but also the
201 | *Backend* configuration required to access the storage array.
202 |
203 | To enable the serialization of the whole driver configuration we have to
204 | specify `output_all_backend_info=True` on the *cinderlib* initialization
205 | resulting in a self contained file with all the information required to manage
206 | the resources.
207 |
208 | This means that with this configuration option we won't need to configure the
209 | *Backends* prior to loading the serialized JSON data, we can just load the data
210 | and *cinderlib* will automatically setup the *Backends*.
211 |
--------------------------------------------------------------------------------
/doc/source/topics/snapshots.rst:
--------------------------------------------------------------------------------
1 | =========
2 | Snapshots
3 | =========
4 |
5 | The *Snapshot* class provides the abstraction layer required to perform all
6 | operations on an existing snapshot, which means that the snapshot creation
7 | operation must be invoked from other class instance, since the new snapshot we
8 | want to create doesn't exist yet and we cannot use the *Snapshot* class to
9 | manage it.
10 |
11 | Create
12 | ------
13 |
14 | Once we have a *Volume* instance we are ready to create snapshots from it, and
15 | we can do it for attached as well as detached volumes.
16 |
17 | .. note::
18 |
19 | Some drivers, like the NFS, require assistance from the Compute service for
20 | attached volumes, so there is currently no way of doing this with
21 | *cinderlib*
22 |
23 | Creating a snapshot can only be performed by the `create_snapshot` method from
24 | our *Volume* instance, and once we have created a snapshot it will be tracked
25 | in the *Volume* instance's `snapshots` set.
26 |
27 | Here is a simple code to create a snapshot and use the `snapshots` set to
28 | verify that both, the returned value by the call as well as the entry added to
29 | the `snapshots` attribute, reference the same object and that the `volume`
30 | attribute in the *Snapshot* is referencing the source volume.
31 |
32 | .. code-block:: python
33 |
34 | vol = lvm.create_volume(size=1)
35 | snap = vol.create_snapshot()
36 | assert snap is list(vol.snapshots)[0]
37 | assert vol is snap.volume
38 |
39 | Delete
40 | ------
41 |
42 | Once we have created a *Snapshot* we can use its `delete` method to permanently
43 | remove it from the storage backend.
44 |
45 | Deleting a snapshot will remove its reference from the source *Volume*'s
46 | `snapshots` set.
47 |
48 | .. code-block:: python
49 |
50 | vol = lvm.create_volume(size=1)
51 | snap = vol.create_snapshot()
52 | assert 1 == len(vol.snapshots)
53 | snap.delete()
54 | assert 0 == len(vol.snapshots)
55 |
56 | Other methods
57 | -------------
58 |
59 | All other methods available in the *Snapshot* class will be explained in their
60 | relevant sections:
61 |
62 | - `load` will be explained together with `json`, `jsons`, `dump`, and `dumps`
63 | properties, and the `to_dict` method in the :doc:`serialization` section.
64 |
65 | - `refresh` will reload the volume from the metadata storage and reload any
66 | lazy loadable property that has already been loaded. Covered in the
67 | :doc:`serialization` and :doc:`tracking` sections.
68 |
69 | - `create_volume` method has been covered in the :doc:`volumes` section.
70 |
--------------------------------------------------------------------------------
/doc/source/topics/tracking.rst:
--------------------------------------------------------------------------------
1 | Resource tracking
2 | -----------------
3 |
4 | *Cinderlib* users will surely have their own variables to keep track of the
5 | *Backends*, *Volumes*, *Snapshots*, and *Connections*, but there may be cases
6 | where this is not enough, be it because we are in a place in our code where we
7 | don't have access to the original variables, because we want to iterate all
8 | instances, or maybe we are running some manual tests and we have lost the
9 | reference to a resource.
10 |
11 | For these cases we can use *cinderlib's* various tracking systems to access the
12 | resources. These tracking systems are also used by *cinderlib* in the
13 | serialization process. They all used to be in memory, but some will now reside
14 | in the metadata persistence storage.
15 |
16 | *Cinderlib* keeps track of all:
17 |
18 | - Initialized *Backends*.
19 | - Existing volumes in a *Backend*.
20 | - Connections to a volume.
21 | - Local attachment to a volume.
22 | - Snapshots for a given volume.
23 |
24 | Initialized *Backends* are stored in a dictionary in `Backends.backends` using
25 | the `volume_backend_name` as key.
26 |
27 | Existing volumes in a *Backend* are stored in the persistence storage, and can
28 | be lazy loaded using the *Backend* instance's `volumes` property.
29 |
30 | Existing *Snapshots* for a *Volume* are stored in the persistence storage, and
31 | can be lazy loaded using the *Volume* instance's `snapshots` property.
32 |
33 | Connections to a *Volume* are stored in the persistence storage, and can be
34 | lazy loaded using the *Volume* instance's `connections` property.
35 |
36 | .. note:: Lazy loadable properties will only load the value the first time we
37 | access them. Successive accesses will just return the cached value. To
38 | retrieve latest values for them as well as for the instance we can use the
39 | `refresh` method.
40 |
41 | The local attachment *Connection* of a volume is stored in the *Volume*
42 | instance's `local_attach` attribute and is stored in memory, so unloading the
43 | library will lose this information.
44 |
45 | We can easily use all these properties to display the status of all the
46 | resources we've created:
47 |
48 | .. code-block:: python
49 |
50 | # If volumes lazy loadable property was already loaded, refresh it
51 | lvm_backend.refresh()
52 |
53 | for vol in lvm_backend.volumes:
54 | print('Volume %s is currently %s' % (vol.id, vol.status)
55 |
56 | # Refresh volume's snapshots and connections if previously lazy loaded
57 | vol.refresh()
58 |
59 | for snap in vol.snapshots:
60 | print('Snapshot %s for volume %s is currently %s' %
61 | (snap.id, snap.volume.id, snap.status))
62 |
63 | for conn in vol.connections:
64 | print('Connection from %s with ip %s to volume %s is %s' %
65 | (conn.connector_info['host'], conn.connector_info['ip'],
66 | conn.volume.id, conn.status))
67 |
--------------------------------------------------------------------------------
/doc/source/topics/volumes.rst:
--------------------------------------------------------------------------------
1 | =======
2 | Volumes
3 | =======
4 |
5 | "The *Volume* class provides the abstraction layer required to perform all
6 | operations on an existing volume. Volume creation operations are carried out
7 | at the *Backend* level.
8 |
9 | Create
10 | ------
11 |
12 | The base resource in storage is the volume, and to create one the *cinderlib*
13 | provides three different mechanisms, each one with a different method that will
14 | be called on the source of the new volume.
15 |
16 | So we have:
17 |
18 | - Empty volumes that have no resource source and will have to be created
19 | directly on the *Backend* via the `create_volume` method.
20 |
21 | - Cloned volumes that will be created from a source *Volume* using its `clone`
22 | method.
23 |
24 | - Volumes from a snapshot, where the creation is initiated by the
25 | `create_volume` method from the *Snapshot* instance.
26 |
27 | .. note::
28 |
29 | *Cinder* NFS backends will create an image and not a directory to store
30 | files, which falls in line with *Cinder* being a Block Storage provider and
31 | not filesystem provider like *Manila* is.
32 |
33 | So assuming that we have an `lvm` variable holding an initialized *Backend*
34 | instance we could create a new 1GB volume quite easily:
35 |
36 | .. code-block:: python
37 |
38 | print('Stats before creating the volume are:')
39 | pprint(lvm.stats())
40 | vol = lvm.create_volume(1)
41 | print('Stats after creating the volume are:')
42 | pprint(lvm.stats())
43 |
44 |
45 | Now, if we have a volume that already contains data and we want to create a new
46 | volume that starts with the same contents we can use the source volume as the
47 | cloning source:
48 |
49 | .. code-block:: python
50 |
51 | cloned_vol = vol.clone()
52 |
53 | Some drivers support cloning to a bigger volume, so we could define the new
54 | size in the call and the driver would take care of extending the volume after
55 | cloning it, this is usually tightly linked to the `extend` operation support by
56 | the driver.
57 |
58 | Cloning to a greater size would look like this:
59 |
60 | .. code-block:: python
61 |
62 | new_size = vol.size + 1
63 | cloned_bigger_volume = vol.clone(size=new_size)
64 |
65 | .. note::
66 |
67 | Cloning efficiency is directly linked to the storage backend in use, so it
68 | will not have the same performance in all backends. While some backends
69 | like the Ceph/RBD will be extremely efficient others may range from slow to
70 | being actually implemented as a `dd` operation performed by the driver
71 | attaching source and destination volumes.
72 |
73 | .. code-block:: python
74 |
75 | vol = snap.create_volume()
76 |
77 | .. note::
78 |
79 | Just like with the cloning functionality, not all storage backends can
80 | efficiently handle creating a volume from a snapshot.
81 |
82 | On volume creation we can pass additional parameters like a `name` or a
83 | `description`, but these will be irrelevant for the actual volume creation and
84 | will only be useful to us to easily identify our volumes or to store additional
85 | information.
86 |
87 | Available fields with their types can be found in `Cinder's Volume OVO
88 | definition
89 | `_,
90 | but most of them are only relevant within the full *Cinder* service.
91 |
92 | We can access these fields as if they were part of the *cinderlib* *Volume*
93 | instance, since the class will try to retrieve any non *cinderlib* *Volume*
94 | from *Cinder*'s internal OVO representation.
95 |
96 | Some of the fields we could be interested in are:
97 |
98 | - `id`: UUID-4 unique identifier for the volume.
99 |
100 | - `user_id`: String identifier, in *Cinder* it's a UUID, but we can choose
101 | here.
102 |
103 | - `project_id`: String identifier, in *Cinder* it's a UUID, but we can choose
104 | here.
105 |
106 | - `snapshot_id`: ID of the source snapshot used to create the volume. This
107 | will be filled by *cinderlib*.
108 |
109 | - `host`: Used to store the backend name information together with the host
110 | name where cinderlib is running. This information is stored as a string in
111 | the form of *host@backend#pool*. This is an optional parameter, and passing
112 | it to `create_volume` will override default value, allowing us caller to
113 | request a specific pool for multi-pool backends, though we recommend using
114 | the `pool_name` parameter instead. Issues will arise if parameter doesn't
115 | contain correct information.
116 |
117 | - `pool_name`: Pool name to use when creating the volume. Default is to use
118 | the first or only pool. To know possible values for a backend use the
119 | `pool_names` property on the *Backend* instance.
120 |
121 | - `size`: Volume size in GBi.
122 |
123 | - `availability_zone`: In case we want to define AZs.
124 |
125 | - `status`: This represents the status of the volume, and the most important
126 | statuses are `available`, `error`, `deleted`, `in-use`, `creating`.
127 |
128 | - `attach_status`: This can be `attached` or `detached`.
129 |
130 | - `scheduled_at`: Date-time when the volume was scheduled to be created.
131 | Currently not being used by *cinderlib*.
132 |
133 | - `launched_at`: Date-time when the volume creation was completed. Currently
134 | not being used by *cinderlib*.
135 |
136 | - `deleted`: Boolean value indicating whether the volume has already been
137 | deleted. It will be filled by *cinderlib*.
138 |
139 | - `terminated_at`: When the volume delete was sent to the backend.
140 |
141 | - `deleted_at`: When the volume delete was completed.
142 |
143 | - `display_name`: Name identifier, this is passed as `name` to all *cinderlib*
144 | volume creation methods.
145 |
146 | - `display_description`: Long description of the volume, this is passed as
147 | `description` to all *cinderlib* volume creation methods.
148 |
149 | - `source_volid`: ID of the source volume used to create this volume. This
150 | will be filled by *cinderlib*.
151 |
152 | - `bootable`: Not relevant for *cinderlib*, but maybe useful for the
153 | *cinderlib* user.
154 |
155 | - `extra_specs`: Extra volume configuration used by some drivers to specify
156 | additional information, such as compression, deduplication, etc. Key-Value
157 | pairs are driver specific.
158 |
159 | - `qos_specs`: Backend QoS configuration. Dictionary with driver specific
160 | key-value pares that enforced by the backend.
161 |
162 | .. note::
163 |
164 | *Cinderlib* automatically generates a UUID for the `id` if one is not
165 | provided at volume creation time, but the caller can actually provide a
166 | specific `id`.
167 |
168 | By default the `id` is limited to valid UUID and this is the only kind of
169 | ID that is guaranteed to work on all drivers. For drivers that support non
170 | UUID IDs we can instruct *cinderlib* to modify *Cinder*'s behavior and
171 | allow them. This is done on *cinderlib* initialization time passing
172 | `non_uuid_ids=True`.
173 |
174 | .. note::
175 |
176 | *Cinderlib* does not do scheduling on driver pools, so setting the
177 | `extra_specs` for a volume on drivers that expect the scheduler to select
178 | a specific pool using them will have the same behavior as in Cinder.
179 |
180 | In that case the caller of Cinderlib is expected to go through the stats
181 | and check the pool that matches the criteria and pass it to the Backend's
182 | `create_volume` method on the `pool_name` parameter.
183 |
184 | Delete
185 | ------
186 |
187 | Once we have created a *Volume* we can use its `delete` method to permanently
188 | remove it from the storage backend.
189 |
190 | In *Cinder* there are safeguards to prevent a delete operation from completing
191 | if it has snapshots (unless the delete request comes with the `cascade` option
192 | set to true), but here in *cinderlib* we don't, so it's the callers
193 | responsibility to delete the snapshots.
194 |
195 | Deleting a volume with snapshots doesn't have a defined behavior for *Cinder*
196 | drivers, since it's never meant to happen, so some storage backends delete the
197 | snapshots, other leave them as they were, and others will fail the request.
198 |
199 | Example of creating and deleting a volume:
200 |
201 | .. code-block:: python
202 |
203 | vol = lvm.create_volume(size=1)
204 | vol.delete()
205 |
206 | .. attention::
207 |
208 | When deleting a volume that was the source of a cloning operation some
209 | backends cannot delete them (since they have copy-on-write clones) and they
210 | just keep them as a silent volume that will be deleted when its snapshot
211 | and clones are deleted.
212 |
213 | Extend
214 | ------
215 |
216 | Many storage backends and *Cinder* drivers support extending a volume to have
217 | more space and you can do this via the `extend` method present in your *Volume*
218 | instance.
219 |
220 | If the *Cinder* driver doesn't implement the extend operation it will raise a
221 | `NotImplementedError`.
222 |
223 | The only parameter received by the `extend` method is the new size, and this
224 | must always be greater than the current value because *cinderlib* is not
225 | validating this at the moment.
226 |
227 | Example of creating, extending, and deleting a volume:
228 |
229 | .. code-block:: python
230 |
231 | vol = lvm.create_volume(size=1)
232 | print('Vol %s has %s GBi' % (vol.id, vol.size))
233 | vol.extend(2)
234 | print('Extended vol %s has %s GBi' % (vol.id, vol.size))
235 | vol.delete()
236 |
237 | Other methods
238 | -------------
239 |
240 | All other methods available in the *Volume* class will be explained in their
241 | relevant sections:
242 |
243 | - `load` will be explained together with `json`, `jsons`, `dump`, and `dumps`
244 | properties, and the `to_dict` method in the :doc:`serialization` section.
245 |
246 | - `refresh` will reload the volume from the metadata storage and reload any
247 | lazy loadable property that has already been loaded. Covered in the
248 | :doc:`serialization` and :doc:`tracking` sections.
249 |
250 | - `create_snapshot` method will be covered in the :doc:`snapshots` section
251 | together with the `snapshots` attribute.
252 |
253 | - `attach`, `detach`, `connect`, and `disconnect` methods will be explained in
254 | the :doc:`connections` section.
255 |
--------------------------------------------------------------------------------
/doc/source/usage.rst:
--------------------------------------------------------------------------------
1 | =====
2 | Usage
3 | =====
4 |
5 | Thanks to the fully Object Oriented abstraction, instead of a classic method
6 | invocation passing the resources to work on, *cinderlib* makes it easy to hit
7 | the ground running when managing storage resources.
8 |
9 | Once the *Cinder* and *cinderlib* packages are installed we just have to import
10 | the library to start using it:
11 |
12 | .. code-block:: python
13 |
14 | import cinderlib
15 |
16 | .. note::
17 |
18 | Installing the *Cinder* package does not require to start any of its
19 | services (volume, scheduler, api) or auxiliary services (KeyStone, MySQL,
20 | RabbitMQ, etc.).
21 |
22 | Usage documentation is not too long, and it is recommended to read it all
23 | before using the library to be sure we have at least a high level view of the
24 | different aspects related to managing our storage with *cinderlib*.
25 |
26 | Before going into too much detail there are some aspects we need to clarify to
27 | make sure our terminology is in sync and we understand where each piece fits.
28 |
29 | In *cinderlib* we have *Backends*, that refer to a storage array's specific
30 | connection configuration so it usually doesn't refer to the whole storage. With
31 | a backend we'll usually have access to the configured pool.
32 |
33 | Resources managed by *cinderlib* are *Volumes* and *Snapshots*, and a *Volume*
34 | can be created from a *Backend*, another *Volume*, or from a *Snapshot*, and a
35 | *Snapshot* can only be created from a *Volume*.
36 |
37 | Once we have a volume we can create *Connections* so it can be accessible from
38 | other hosts or we can do a local *Attachment* of the volume which will retrieve
39 | required local connection information of this host, create a *Connection* on
40 | the storage to this host, and then do the local *Attachment*.
41 |
42 | Given that *Cinder* drivers are not stateless, *cinderlib* cannot be either.
43 | That's why there is a metadata persistence plugin mechanism to provide
44 | different ways to store resource states. Currently we have memory and database
45 | plugins. Users can store the data wherever they want using the JSON
46 | serialization mechanism or with a custom metadata plugin.
47 |
48 | Each of the different topics are treated in detail on their specific sections:
49 |
50 | .. toctree::
51 | :maxdepth: 1
52 |
53 | topics/initialization
54 | topics/backends
55 | topics/volumes
56 | topics/snapshots
57 | topics/connections
58 | topics/serialization
59 | topics/tracking
60 | topics/metadata
61 |
62 | Auto-generated documentation is also available:
63 |
64 | .. toctree::
65 | :maxdepth: 2
66 |
67 | api/cinderlib
68 |
--------------------------------------------------------------------------------
/lower-constraints.txt:
--------------------------------------------------------------------------------
1 | cinder==13.0.0
2 | flake8==2.5.5
3 | hacking==0.12.0
4 | mock==2.0.0
5 | openstackdocstheme==1.18.1
6 | os-brick==2.7.0
7 | pyflakes==0.8.1
8 | pbr==2.0.0
9 | pep8==1.5.7
10 | reno==2.5.0
11 | six==1.10.0
12 | Sphinx==1.6.2
13 | sphixcontrib-websupport==1.0.1
14 | stestr==1.0.0
15 | stevedore==1.20.0
16 | unittest2==1.1.0
17 | urllib3==1.21.1
18 |
--------------------------------------------------------------------------------
/playbooks/cinder-gate-run.yaml:
--------------------------------------------------------------------------------
1 | # Variables: devstack_base_dir, cinderlib_log_file, cinderlib_ignore_errors
2 | - hosts: all
3 | become: True
4 | vars:
5 | base_dir: "{{ devstack_base_dir | default('/opt/stack/new') }}"
6 | default_log_file: "{{ base_dir }}/logs/cinderlib.txt"
7 | tasks:
8 | - name: Locate unit2 binary location
9 | shell:
10 | cmd: which unit2
11 | register: unit2_which
12 |
13 | - name: Add sudoers role for cinderlib unit2
14 | copy:
15 | dest: /etc/sudoers.d/zuul-sudo-unit2
16 | content: "zuul ALL = NOPASSWD:{{ unit2_which.stdout }} discover -v -s cinderlib/tests/functional\n"
17 | mode: 0440
18 |
19 | - name: Validate sudoers config after edits
20 | command: "/usr/sbin/visudo -c"
21 |
22 | - name: Run cinderlib functional tests
23 | shell:
24 | cmd: "set -o pipefail && {{ unit2_which.stdout }} discover -v -s cinderlib/tests/functional 2>&1 | tee {{ cinderlib_log_file | default(default_log_file)}}"
25 | chdir: "{{ base_dir }}/cinderlib"
26 | executable: /bin/bash
27 | ignore_errors: "{{ cinderlib_ignore_errors | default(false) | bool}}"
28 |
--------------------------------------------------------------------------------
/playbooks/setup-ceph.yaml:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018, Red Hat, Inc.
2 | # All Rights Reserved.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 | # not use this file except in compliance with the License. You may obtain
6 | # a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 | # License for the specific language governing permissions and limitations
14 | # under the License.
15 | #
16 | #
17 | ---
18 | #------------------------------------------------------------------------------
19 | # Setup an Ceph cluster that will be used by cinderlib's functional tests
20 | #------------------------------------------------------------------------------
21 | - hosts: all
22 | vars:
23 | ansible_become: yes
24 | tasks:
25 | # Tox job has in its pre.yaml the ensure-tox role, which installs tox
26 | # from pip, which brings six from pip. This conflicts with ceph-common's
27 | # python-six dependency.
28 | - name: Remove pip's six from the system
29 | pip:
30 | name: six
31 | state: absent
32 |
33 | - name: Install ceph-common and epel-release
34 | yum:
35 | name: ['epel-release', 'ceph-common']
36 | state: present
37 |
38 | - name: Install Docker from epel
39 | yum:
40 | name: 'docker'
41 | state: present
42 |
43 | - name: Start Docker
44 | service:
45 | name: docker
46 | state: started
47 |
48 | - name: Start Ceph demo
49 | command: |
50 | docker run -d
51 | --name ceph-demo
52 | -e MON_IP=127.0.0.1
53 | -e CEPH_PUBLIC_NETWORK=127.0.0.1/0
54 | -e DEMO_DAEMONS="osd mds"
55 | --net=host
56 | --volume /etc/ceph:/etc/ceph
57 | --privileged
58 | ceph/daemon:latest-luminous
59 | demo
60 |
61 | - name: Wait for ceph.conf
62 | wait_for:
63 | path: /etc/ceph/ceph.conf
64 | search_regex: '[global]'
65 |
66 | - name: Set ceph features in config
67 | lineinfile:
68 | path: /etc/ceph/ceph.conf
69 | insertafter: '[global]'
70 | line: 'rbd default features = 3'
71 | state: present
72 |
73 | - name: Set ceph keyring mode
74 | file:
75 | path: /etc/ceph/ceph.client.admin.keyring
76 | mode: 0644
77 |
--------------------------------------------------------------------------------
/playbooks/setup-lvm.yaml:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2018, Red Hat, Inc.
2 | # All Rights Reserved.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 | # not use this file except in compliance with the License. You may obtain
6 | # a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 | # License for the specific language governing permissions and limitations
14 | # under the License.
15 | #
16 | #
17 | ---
18 | #------------------------------------------------------------------------------
19 | # Setup an LVM VG that will be used by cinderlib's functional tests
20 | #------------------------------------------------------------------------------
21 | - hosts: all
22 | vars:
23 | cldir: .
24 | vg: cinder-volumes
25 | ansible_become: yes
26 | tasks:
27 | - name: Install LVM package
28 | package:
29 | name: lvm2
30 | state: present
31 |
32 | - name: Start LVM metadata
33 | service:
34 | name: lvm2-lvmetad
35 | state: started
36 |
37 | - name: Create LVM backing file
38 | command: "truncate -s 10G {{vg}}"
39 | args:
40 | creates: "{{cldir}}/{{vg}}"
41 |
42 | - name: Check if VG already exists
43 | shell: "losetup -l | awk '/{{vg}}/ {print $1}'"
44 | changed_when: false
45 | register: existing_loop_device
46 |
47 | - name: "Create loopback device {{vg}}"
48 | command: "losetup --show -f {{cldir}}/{{vg}}"
49 | register: new_loop_device
50 | when: existing_loop_device.stdout == ''
51 | # Workaround because Ansible destroys registers when skipped
52 | - set_fact: loop_device="{{ new_loop_device.stdout if new_loop_device.changed else existing_loop_device.stdout }}"
53 |
54 | - name: "Create VG {{vg}}"
55 | shell: "vgcreate {{vg}} {{loop_device}} && touch {{cldir}}/lvm.vgcreate"
56 | args:
57 | creates: "{{cldir}}/lvm.vgcreate"
58 |
59 | - command: "vgscan --cache"
60 | changed_when: false
61 |
62 | - name: Install iSCSI package
63 | package:
64 | name: iscsi-initiator-utils
65 | state: present
66 |
67 | - name: Create initiator name
68 | shell: echo InitiatorName=`iscsi-iname` > /etc/iscsi/initiatorname.iscsi
69 | args:
70 | creates: /etc/iscsi/initiatorname.iscsi
71 |
72 | - name: Start iSCSI initiator
73 | service:
74 | name: iscsid
75 | state: started
76 |
--------------------------------------------------------------------------------
/releasenotes/notes/cinderlib-a458b8e23b6d35f4.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | prelude: >
3 |
4 | The Cinder Library, also known as cinderlib, is a Python library that
5 | leverages the Cinder project to provide an object oriented abstraction
6 | around Cinder's storage drivers to allow their usage directly without
7 | running any of the Cinder services or surrounding services, such as
8 | KeyStone, MySQL or RabbitMQ.
9 |
10 | This is the Tech Preview release of the library, and is intended for
11 | developers who only need the basic CRUD functionality of the drivers and
12 | don't care for all the additional features Cinder provides such as quotas,
13 | replication, multi-tenancy, migrations, retyping, scheduling, backups,
14 | authorization, authentication, REST API, etc.
15 | features:
16 | - Use a Cinder driver without running a DBMS, Message broker, or Cinder
17 | service.
18 |
19 | - Using multiple simultaneous drivers on the same application.
20 |
21 | - |
22 | Basic operations support.
23 |
24 | * Create volume
25 | * Delete volume
26 | * Extend volume
27 | * Clone volume
28 | * Create snapshot
29 | * Delete snapshot
30 | * Create volume from snapshot
31 | * Connect volume
32 | * Disconnect volume
33 | * Local attach
34 | * Local detach
35 | * Validate connector
36 | * Extra Specs for specific backend functionality.
37 | * Backend QoS
38 | * Multi-pool support
39 |
40 | - |
41 | Metadata persistence plugins.
42 |
43 | * Stateless: Caller stores JSON serialization.
44 | * Database: Metadata is stored in a database: MySQL, PostgreSQL, SQLite...
45 | * Custom plugin: Caller provides module to store Metadata and cinderlib
46 | calls
47 |
--------------------------------------------------------------------------------
/releasenotes/source/conf.py:
--------------------------------------------------------------------------------
1 | # Licensed under the Apache License, Version 2.0 (the "License");
2 | # you may not use this file except in compliance with the License.
3 | # You may obtain a copy of the License at
4 | #
5 | # http://www.apache.org/licenses/LICENSE-2.0
6 | #
7 | # Unless required by applicable law or agreed to in writing, software
8 | # distributed under the License is distributed on an "AS IS" BASIS,
9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
10 | # implied.
11 | # See the License for the specific language governing permissions and
12 | # limitations under the License.
13 | #
14 | # os-brick Release Notes documentation build configuration file
15 | #
16 | # Refer to the Sphinx documentation for advice on configuring this file:
17 | #
18 | # http://www.sphinx-doc.org/en/stable/config.html
19 |
20 | # -- General configuration ------------------------------------------------
21 |
22 | # Add any Sphinx extension module names here, as strings. They can be
23 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
24 | # ones.
25 | extensions = [
26 | 'reno.sphinxext',
27 | 'openstackdocstheme',
28 | ]
29 |
30 | # The suffix of source filenames.
31 | source_suffix = '.rst'
32 |
33 | # The master toctree document.
34 | master_doc = 'index'
35 |
36 | # General information about the project.
37 | project = u'Cinderlib Release Notes'
38 | copyright = u'2017, Cinder Developers'
39 |
40 | # Release notes are unversioned, so we don't need to set version and release
41 | version = ''
42 | release = ''
43 |
44 | # -- Options for HTML output ----------------------------------------------
45 |
46 | # The theme to use for HTML and HTML Help pages. See the documentation for
47 | # a list of builtin themes.
48 | html_theme = 'openstackdocs'
49 |
50 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
51 | # using the given strftime format.
52 | html_last_updated_fmt = '%Y-%m-%d %H:%M'
53 |
54 | # -- Options for openstackdocstheme -------------------------------------------
55 |
56 | repository_name = 'openstack/cinderlib'
57 | bug_project = 'cinderlib'
58 | bug_tag = ''
59 |
--------------------------------------------------------------------------------
/releasenotes/source/index.rst:
--------------------------------------------------------------------------------
1 | =========================
2 | Cinderlib Release Notes
3 | =========================
4 |
5 | .. toctree::
6 | :maxdepth: 1
7 |
8 | unreleased
9 |
--------------------------------------------------------------------------------
/releasenotes/source/unreleased.rst:
--------------------------------------------------------------------------------
1 | ==============================
2 | Current Series Release Notes
3 | ==============================
4 |
5 | .. release-notes::
6 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | cinder
2 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [bdist_wheel]
2 | universal = 1
3 |
4 | [flake8]
5 | exclude = .git,.venv,.tox,dist,doc,*egg,build
6 |
7 | [metadata]
8 | name = cinderlib
9 | summary = Direct usage of Cinder Block Storage drivers without the services
10 | description-file =
11 | README.rst
12 | author = OpenStack
13 | author-email = openstack-discuss@lists.openstack.org
14 | home-page = https://docs.openstack.org/cinderlib/latest/
15 | classifier =
16 | Environment :: OpenStack
17 | Intended Audience :: Information Technology
18 | Intended Audience :: System Administrators
19 | Intended Audience :: Developers
20 | License :: OSI Approved :: Apache Software License
21 | Operating System :: POSIX :: Linux
22 | Programming Language :: Python
23 | Programming Language :: Python :: 2
24 | Programming Language :: Python :: 2.7
25 | Programming Language :: Python :: 3
26 | Programming Language :: Python :: 3.5
27 |
28 | [global]
29 | setup-hooks =
30 | pbr.hooks.setup_hook
31 |
32 | [files]
33 | packages =
34 | cinderlib
35 |
36 | [entry_points]
37 | cinderlib.persistence.storage =
38 | memory = cinderlib.persistence.memory:MemoryPersistence
39 | db = cinderlib.persistence.dbms:DBPersistence
40 | memory_db = cinderlib.persistence.dbms:MemoryDBPersistence
41 |
42 | [egg_info]
43 | tag_build =
44 | tag_date = 0
45 | tag_svn_revision = 0
46 |
47 | [compile_catalog]
48 | directory = cinderlib/locale
49 | domain = cinderlib
50 |
51 | [update_catalog]
52 | domain = cinderlib
53 | output_dir = cinderlib/locale
54 | input_file = cinderlib/locale/cinderlib.pot
55 |
56 | [extract_messages]
57 | keywords = _ gettext ngettext l_ lazy_gettext
58 | mapping_file = babel.cfg
59 | output_file = cinderlib/locale/cinderlib.pot
60 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2019, Red Hat, Inc.
2 | # All Rights Reserved.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 | # not use this file except in compliance with the License. You may obtain
6 | # a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 | # License for the specific language governing permissions and limitations
14 | # under the License.
15 |
16 | # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
17 | import setuptools
18 |
19 | # In python < 2.7.4, a lazy loading of package `pbr` will break
20 | # setuptools if some other modules registered functions in `atexit`.
21 | # solution from: http://bugs.python.org/issue15881#msg170215
22 | try:
23 | import multiprocessing # noqa
24 | except ImportError:
25 | pass
26 |
27 | setuptools.setup(
28 | setup_requires=['pbr>=2.0.0'],
29 | pbr=True)
30 |
--------------------------------------------------------------------------------
/test-requirements.txt:
--------------------------------------------------------------------------------
1 | # The order of packages is significant, because pip processes them in the order
2 | # of appearance. Changing the order has an impact on the overall integration
3 | # process, which may cause wedges in the gate later.
4 |
5 | pbr!=2.1.0,>=2.0.0 # Apache-2.0
6 | hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0
7 | coverage!=4.4,>=4.0 # Apache-2.0
8 | ddt>=1.0.1 # MIT
9 | oslotest>=3.2.0 # Apache-2.0
10 | testscenarios>=0.4 # Apache-2.0/BSD
11 | testtools>=2.2.0 # MIT
12 | stestr>=1.0.0 # Apache-2.0
13 |
--------------------------------------------------------------------------------
/tools/cinder-cfg-to-python.py:
--------------------------------------------------------------------------------
1 | #!/bin/env python
2 | # Copyright (c) 2017, Red Hat, Inc.
3 | # All Rights Reserved.
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
6 | # not use this file except in compliance with the License. You may obtain
7 | # a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
13 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
14 | # License for the specific language governing permissions and limitations
15 | # under the License.
16 | """Generate Python code to initialize cinderlib based on Cinder config file
17 |
18 | This tool generates Python code to instantiate backends using a cinder.conf
19 | file.
20 |
21 | It supports multiple backends as defined in enabled_backends.
22 |
23 | This program uses the oslo.config module to load configuration options instead
24 | of using configparser directly because drivers will need variables to have the
25 | right type (string, list, integer...), and the types are defined in the code
26 | using oslo.config.
27 |
28 | cinder-cfg-to_python cinder.conf cinderlib-conf.py
29 |
30 | If no output is provided it will use stdout, and if we also don't provide an
31 | input file, it will default to /etc/cinder/cinder.conf.
32 | """
33 |
34 | import sys
35 |
36 | import six
37 |
38 | from cinderlib.tests.functional import cinder_to_yaml
39 |
40 |
41 | def _to_str(value):
42 | if isinstance(value, six.string_types):
43 | return '"' + value + '"'
44 | return value
45 |
46 |
47 | def main(source, dest):
48 | config = cinder_to_yaml.convert(source)
49 | result = ['import cinderlib as cl']
50 | for backend in config['backends']:
51 | name = backend['volume_backend_name']
52 | name = name.replace(' ', '_').replace('-', '_')
53 | cfg = ', '.join('%s=%s' % (k, _to_str(v)) for k, v in backend.items())
54 | result.append('%s = cl.Backend(%s)' % (name, cfg))
55 |
56 | with open(dest, 'w') as f:
57 | f.write('\n\n'.join(result) + '\n')
58 |
59 |
60 | if __name__ == '__main__':
61 | source = '/etc/cinder/cinder.conf' if len(sys.argv) < 2 else sys.argv[1]
62 | dest = '/dev/stdout' if len(sys.argv) < 3 else sys.argv[2]
63 | main(source, dest)
64 |
--------------------------------------------------------------------------------
/tools/coding-checks.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | set -eu
4 |
5 | usage() {
6 | echo "Usage: $0 [OPTION]..."
7 | echo "Run Cinderlib's coding check(s)"
8 | echo ""
9 | echo " -Y, --pylint [] Run pylint check on the entire cinderlib module or just files changed in basecommit (e.g. HEAD~1)"
10 | echo " -h, --help Print this usage message"
11 | echo
12 | exit 0
13 | }
14 |
15 | process_options() {
16 | i=1
17 | while [ $i -le $# ]; do
18 | eval opt=\$$i
19 | case $opt in
20 | -h|--help) usage;;
21 | -Y|--pylint) pylint=1;;
22 | *) scriptargs="$scriptargs $opt"
23 | esac
24 | i=$((i+1))
25 | done
26 | }
27 |
28 | run_pylint() {
29 | local target="${scriptargs:-HEAD~1}"
30 |
31 | if [[ "$target" = *"all"* ]]; then
32 | files="cinderlib"
33 | else
34 | files=$(git diff --name-only --diff-filter=ACMRU $target "*.py")
35 | fi
36 |
37 | if [ -n "${files}" ]; then
38 | echo "Running pylint against:"
39 | printf "\t%s\n" "${files[@]}"
40 | pylint --rcfile=.pylintrc --output-format=colorized ${files} -E \
41 | -j `python -c 'import multiprocessing as mp; print(mp.cpu_count())'`
42 | else
43 | echo "No python changes in this commit, pylint check not required."
44 | exit 0
45 | fi
46 | }
47 |
48 | scriptargs=
49 | pylint=1
50 |
51 | process_options $@
52 |
53 | if [ $pylint -eq 1 ]; then
54 | run_pylint
55 | exit 0
56 | fi
57 |
--------------------------------------------------------------------------------
/tools/fast8.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | NUM_COMMITS=${FAST8_NUM_COMMITS:-1}
4 |
5 | if [[ $NUM_COMMITS = "smart" ]]; then
6 | # Run on all commits not submitted yet
7 | # (sort of -- only checks vs. "master" since this is easy)
8 | NUM_COMMITS=$(git cherry master | wc -l)
9 | fi
10 |
11 | echo "Checking last $NUM_COMMITS commits."
12 |
13 | cd $(dirname "$0")/..
14 | CHANGED=$(git diff --name-only HEAD~${NUM_COMMITS} | tr '\n' ' ')
15 |
16 | # Skip files that don't exist
17 | # (have been git rm'd)
18 | CHECK=""
19 | for FILE in $CHANGED; do
20 | if [ -f "$FILE" ]; then
21 | CHECK="$CHECK $FILE"
22 | fi
23 | done
24 |
25 | diff -u --from-file /dev/null $CHECK | flake8 --diff
26 |
--------------------------------------------------------------------------------
/tools/lvm-prepare.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # Must be run as root
4 |
5 | dd if=/dev/zero of=cinder-volumes bs=1048576 seek=22527 count=1
6 | lodevice=`losetup --show -f ./cinder-volumes`
7 | pvcreate $lodevice
8 | vgcreate cinder-volumes $lodevice
9 | vgscan --cache
10 |
--------------------------------------------------------------------------------
/tools/virtualenv-sudo.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # Script to ensure that calling commands added in the virtualenv with sudo will
3 | # be able to find them during the functional tests, ie: cinder-rtstool
4 |
5 | params=()
6 | for arg in "$@"; do params+=("\"$arg\""); done
7 | params="${params[@]}"
8 | sudo -E --preserve-env=PATH /bin/bash -c "$params"
9 |
--------------------------------------------------------------------------------
/tox.ini:
--------------------------------------------------------------------------------
1 | [tox]
2 | minversion = 2.0
3 | envlist = py27, py36, pep8
4 | skipsdist = True
5 | setenv = VIRTUAL_ENV={envdir}
6 |
7 | [testenv]
8 | setenv = OS_STDOUT_CAPTURE=1
9 | OS_STDERR_CAPTURE=1
10 | OS_TEST_TIMEOUT=60
11 | OS_TEST_PATH=./cinderlib/tests/unit
12 | usedevelop=True
13 | install_command = pip install {opts} {packages}
14 | # Use cinder from master instead of from PyPi
15 | deps= -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt}
16 | -r{toxinidir}/test-requirements.txt
17 | git+git://github.com/openstack/cinder.git
18 |
19 | commands =
20 | find . -ignore_readdir_race -type f -name "*.pyc" -delete
21 | stestr run {posargs}
22 | stestr slowest
23 |
24 | whitelist_externals =
25 | bash
26 | find
27 | passenv = *_proxy *_PROXY
28 |
29 | [testenv:functional]
30 | usedevelop=True
31 | basepython=python2.7
32 | setenv = OS_TEST_PATH=./cinderlib/tests/functional
33 | CL_FTEST_CFG={toxinidir}/{env:CL_FTEST_CFG:cinderlib/tests/functional/lvm.yaml}
34 | CL_FTEST_ROOT_HELPER={env:CL_FTEST_ROOT_HELPER:{toxinidir}/tools/virtualenv-sudo.sh}
35 | ADD_PATHS=/usr/local/sbin:/usr/sbin
36 |
37 | sitepackages = True
38 | # Not reusing py27's env due to https://github.com/tox-dev/tox/issues/477
39 | # envdir = {toxworkdir}/py27
40 |
41 | # Must run serially or test_stats_with_creation may fail occasionally
42 | commands =
43 | find . -ignore_readdir_race -type f -name "*.pyc" -delete
44 | # Tox has a bug and it ignores the PATH set in setenv, so we work around it
45 | bash -i -c "PATH=$PATH:$ADD_PATHS stestr run --serial {posargs}"
46 | stestr slowest
47 |
48 | whitelist_externals =
49 | bash
50 | find
51 |
52 | [testenv:functional-py36]
53 | usedevelop=True
54 | setenv =
55 | {[testenv:functional]setenv}
56 | sitepackages = True
57 | basepython=python3.6
58 | # Not reusing py35's env due to https://github.com/tox-dev/tox/issues/477
59 | # envdir = {toxworkdir}/py35
60 | commands = {[testenv:functional]commands}
61 | whitelist_externals = {[testenv:functional]whitelist_externals}
62 |
63 | [testenv:releasenotes]
64 | # Not reusing doc's env due to https://github.com/tox-dev/tox/issues/477
65 | # envdir = {toxworkdir}/docs
66 | basepython = python3
67 | deps =
68 | -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt}
69 | -r{toxinidir}/doc/requirements.txt
70 | commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html
71 |
72 | [testenv:docs]
73 | basepython = python3
74 | deps =
75 | -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt}
76 | -r{toxinidir}/doc/requirements.txt
77 | commands =
78 | doc8 --ignore D001 --ignore-path .tox --ignore-path *.egg-info --ignore-path doc/build --ignore-path .eggs/*/EGG-INFO/*.txt -e txt -e rst
79 | rm -rf doc/build .autogenerated doc/source/api
80 | sphinx-build -W -b html doc/source doc/build/html
81 | rm -rf api-ref/build
82 | whitelist_externals = rm
83 |
84 | [testenv:pylint]
85 | basepython = python3
86 | deps = -r{toxinidir}/test-requirements.txt
87 | -r{toxinidir}/requirements.txt
88 | pylint==2.1.1
89 | commands =
90 | bash ./tools/coding-checks.sh --pylint {posargs}
91 |
92 | [testenv:cover]
93 | # Also do not run test_coverage_ext tests while gathering coverage as those
94 | # tests conflict with coverage.
95 | basepython = python3
96 | setenv =
97 | {[testenv]setenv}
98 | PYTHON=coverage run --source cinderlib --parallel-mode
99 | commands =
100 | stestr run {posargs}
101 | coverage combine
102 | coverage html -d cover
103 | coverage xml -o cover/coverage.xml
104 |
105 | [testenv:pep8]
106 | basepython=python3
107 | commands=flake8 {posargs} .
108 | deps=
109 | flake8
110 | -r{toxinidir}/test-requirements.txt
111 |
112 | [testenv:fast8]
113 | basepython=python3
114 | # Not reusing Flake8's env due to https://github.com/tox-dev/tox/issues/477
115 | # envdir = {toxworkdir}/flake8
116 | commands={toxinidir}/tools/fast8.sh
117 | passenv = FAST8_NUM_COMMITS
118 |
--------------------------------------------------------------------------------