├── .gitignore ├── .gitreview ├── .zuul.yaml ├── LICENSE ├── README.rst ├── bindep.txt ├── doc ├── requirements.txt ├── source │ ├── _extra │ │ └── .htaccess │ ├── conf.py │ ├── index.rst │ └── specs └── test │ └── redirect-tests.txt ├── setup.cfg ├── setup.py ├── specs ├── 2023.1 │ └── placeholder.rst ├── 2023.2 │ └── dedicated-volume-backup-status-field.rst ├── 2024.1 │ └── quota-system.rst ├── 2024.2 │ ├── LUKS-image-encryption.rst │ ├── extend-volume-completion-action.rst │ └── user-visible-information-in-volume-types.rst ├── 2025.1 │ ├── migration-progress-in-volume-details.rst │ ├── openapi.rst │ └── remove-me.rst ├── juno │ ├── affinity-antiaffinity-filter.rst │ ├── cinder-storwize-driver-qos.rst │ ├── configurable-ssh-host-key-policy.rst │ ├── consistency-groups.rst │ ├── datera-driver.rst │ ├── debug-translation-removal.rst │ ├── deprecate_v1_api.rst │ ├── emc-vmax-driver-juno-update.rst │ ├── emc-vnx-direct-driver-juno-update.rst │ ├── hyper-v-smbfs-volume-driver.rst │ ├── i18n-enablement.rst │ ├── limit-volume-copy-bandwidth.rst │ ├── oracle-zfssa-cinder-driver.rst │ ├── pool-aware-cinder-scheduler.rst │ ├── pure-iscsi-volume-driver.rst │ ├── restblock-driver.rst │ ├── smbfs-volume-driver.rst │ ├── support-GPFS-nas-ibmnas-driver.rst │ ├── support-reset-state-for-backup.rst │ ├── support-volume-backup-for-qcow2.rst │ ├── support-volume-num-weighter.rst │ ├── task-log.rst │ ├── united-policy.json-in-cinder.rst │ ├── vmdk-backup.rst │ ├── volume-replication.rst │ └── xtremio_cinder_volume_driver.rst ├── kilo │ ├── abc-volume-drivers.rst │ ├── backup-notification.rst │ ├── chiscsi-iscsi-helper.rst │ ├── cinder-objects.rst │ ├── consistency-groups-kilo-update.rst │ ├── database-purge.rst │ ├── db-volume-filtering.rst │ ├── driver-private-data.rst │ ├── filtering-weighing-with-driver-supplied-functions.rst │ ├── incremental-backup.rst │ ├── iscsi-alternative-portal.rst │ ├── iscsi-multipath-enhancement.rst │ ├── limit-volume-copy-bps-per-backend.rst │ ├── linux-systemz.rst │ ├── multi-attach-volume.rst │ ├── nfs-backup.rst │ ├── over-subscription-in-thin-provisioning.rst │ ├── private-volume-types.rst │ ├── remotefs-cfg-improvements.rst │ ├── support-iscsi-driver.rst │ ├── support-volume-backup-quota.rst │ ├── unit-test-cases-for-cinder-scripts.rst │ ├── vmdk-oslo.vmware.rst │ ├── volume-sorting.rst │ └── volume-type-description.rst ├── liberty │ ├── abc-driver-update.rst │ ├── adopt-guru-meditation-report.rst │ ├── brick-add-open-iscsi-transport-suppport.rst │ ├── cinder-backend-report-discard.rst │ ├── cinder-internal-tenant.rst │ ├── cinder-nested-quota-driver.rst │ ├── clone-cg.rst │ ├── clone-image-in-glance-cinder-backend.rst │ ├── create-export-connector.rst │ ├── create-states.rst │ ├── db-archiving.rst │ ├── db2-database.rst │ ├── efficient-volume-copy-for-volume-migration.rst │ ├── enhance-list-operations-pagination-keys.rst │ ├── extract-brick.rst │ ├── generic-volume-migration.rst │ ├── get-vol-type-extra-specs.rst │ ├── huawei-sdshypervisor-driver.rst │ ├── image-volume-cache.rst │ ├── implement-force-detach-for-safe-cleanup.rst │ ├── incremental-backup-improvements-for-l.rst │ ├── non-disruptive-backup.rst │ ├── non-eventlet-wsgi-app.rst │ ├── optimze-rbd-copy-volume-to-image.rst │ ├── replication_v2.rst │ ├── rootwrap-daemon-mode.rst │ ├── rpc-object-compatibility.rst │ ├── standard-capabilities.rst │ ├── support-force-delete-backup.rst │ ├── support-import-export-snapshots.rst │ ├── support-modify-volume-image-metadata.rst │ ├── valid-states-api.rst │ ├── vhost-support.rst │ ├── volume-and-snap-delete.rst │ ├── volume-migration-improvement.rst │ └── volume-types-public-update.rst ├── mitaka │ ├── add_pagination_to_other_resources.rst │ ├── api-microversions.rst │ ├── assisted_snapshot_improvements.rst │ ├── backup-snapshots.rst │ ├── brick-extend-attached-volume.rst │ ├── brick-fetch-paths.rst │ ├── brocade-zone-driver-friendly-zone-names.rst │ ├── brocade-zone-driver-virtualfabrics-support.rst │ ├── capacity-headroom.rst │ ├── cheesecake.rst │ ├── cinder-api-atomic-status-change.rst │ ├── cinder-volume-active-active-support.rst │ ├── ha-aa-tooz-locks.rst │ ├── nfs-snapshots.rst │ ├── online-schema-upgrades.rst │ ├── scalable-backup-service.rst │ ├── support-volume-glance-metadata-query.rst │ └── use-cinder-without-nova.rst ├── newton │ ├── ceph-volume-migrate.rst │ ├── delete-multiple-metadata-keys.rst │ ├── delete-parameters.rst │ ├── differentiate-thick-thin-in-scheduler.rst │ ├── discovering-system-capabilities.rst │ ├── generic-volume-group.rst │ ├── group-snapshots.rst │ ├── ha-aa-cleanup.rst │ ├── ha-aa-manager_locks.rst │ ├── improvement-to-query-consistency-group-detail.rst │ ├── linux-ficon-support.rst │ ├── list-manage-existing.rst │ ├── retype-encrypted-volumes.rst │ ├── stochastic-weighing-scheduler.rst │ ├── summarymessage.rst │ ├── support-backup-import-on-another-storage-database.rst │ └── use-castellan-key-manager.rst ├── ocata │ ├── add-new-attach-apis.rst │ ├── ha-aa-job-distribution.rst │ ├── ha-aa-replication.rst │ └── support-reset-generic-group-and-group-snapshot-status.rst ├── pike │ ├── add-like-filter.rst │ ├── add-volume-type-filter-to-get-pools.rst │ ├── add-volumegroup-into-quota-management.rst │ ├── backup-init.rst │ ├── capacity_based_qos.rst │ ├── cinder-volume-revert-by-snapshot.rst │ ├── client-reset-state.rst │ ├── dynamic-log-levels.rst │ ├── explicit-user-messages.rst │ ├── extend-attached-volume.rst │ ├── generalized_list_filters.rst │ ├── metadata-for-backup-resource.rst │ ├── replication-group.rst │ ├── shared-backend-config.rst │ └── support-get-volume-metadata-summary.rst ├── queens │ ├── add-count-info-in-list-response.rst │ ├── add-shared-targets-to-volume-ref.rst │ ├── create-volume-from-backup.rst │ ├── enable-multiattach.rst │ ├── inspection-mechanism-for-capacity-limited-host.rst │ ├── migrate-fixed-key-to-barbican.rst │ ├── provisioning-improvements.rst │ ├── rbd-encryption.rst │ ├── report-backend-state-in-service-list.rst │ ├── use-oslo_db-enginefacade.rst │ └── v3-api-validation.rst ├── rocky │ ├── .placeholder │ ├── cheesecake-promote-backend.rst │ ├── support-az-in-volume-type.rst │ ├── support-filter-backend-on-operation-type.rst │ ├── support-image-signature-verification.rst │ └── transfer-snapshots-with-volumes.rst ├── stein │ ├── add-project-id-to-group-and-group-snapshot-response.rst │ ├── add-user-id-attribute-to-backup-response.rst │ ├── delete-from-db.rst │ ├── driver-reinitialization-after-fail.rst │ ├── improve-volume-transfer-records.rst │ ├── support-deferred-deletion-in-rbd.rst │ ├── support-validate-image-certificate.rst │ └── update-backup-size-when-backup-is-created.rst ├── template.rst ├── train │ ├── .placeholder │ ├── leverage-compression-accelerator.rst │ ├── untyped-volumes-to-default-volume-type.rst │ └── volume-rekey.rst ├── untargeted │ ├── dynamic_reconfiguration.rst │ └── generic-backup-implementation.rst ├── ussuri │ ├── .placeholder │ ├── add_backup_id_to_volume.rst │ ├── copy-image-in-multiple-stores.rst │ ├── query-cinder-resources-filter-by-time-comparison-operators.rst │ └── support-glance-multiple-backend.rst ├── victoria │ ├── .placeholder │ ├── backup-backends-configuration.rst │ ├── default-volume-type-overrides.rst │ ├── support-modern-compression-algorithms-in-cinder-backup.rst │ └── support-volume-local-cache.rst ├── wallaby │ ├── include-encryption-key-id-in-details.rst │ ├── nvme-agent.rst │ ├── nvme-connector-md-support.rst │ ├── specify-volume_type-and-availability_zone-for-backup-restore-API.rst │ └── store-volume-format-info.rst ├── xena │ ├── expose-cinder-user-visible-extra-specs-spec.rst │ ├── nvme-agent.rst │ ├── reset-state-robustification.rst │ ├── s-rbac-ready.rst │ ├── sizing-encrypted-volumes.rst │ ├── snapshot-attached-volumes.rst │ └── temp-resources.rst ├── yoga │ ├── add-volume-re-image-api.rst │ ├── nvme-multipath.rst │ ├── optimize-upload-volume-to-rbd-store.rst │ ├── project-id-optional-in-urls.rst │ └── s-rbac-ready.rst └── zed │ ├── add-capacity-factors-to-get-pools.rst │ └── image-encryption.rst ├── test-requirements.txt └── tox.ini /.gitignore: -------------------------------------------------------------------------------- 1 | AUTHORS 2 | ChangeLog 3 | build 4 | .tox 5 | .venv 6 | *.egg* 7 | *.swp 8 | *.swo 9 | *.pyc 10 | .DS_Store 11 | -------------------------------------------------------------------------------- /.gitreview: -------------------------------------------------------------------------------- 1 | [gerrit] 2 | host=review.opendev.org 3 | port=29418 4 | project=openstack/cinder-specs.git 5 | -------------------------------------------------------------------------------- /.zuul.yaml: -------------------------------------------------------------------------------- 1 | - project: 2 | templates: 3 | - openstack-specs-jobs 4 | check: 5 | jobs: 6 | - openstack-tox-pep8 7 | gate: 8 | jobs: 9 | - openstack-tox-pep8 10 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | ======================== 2 | Team and repository tags 3 | ======================== 4 | 5 | .. image:: https://governance.openstack.org/tc/badges/cinder-specs.svg 6 | :target: https://governance.openstack.org/tc/reference/tags/index.html 7 | 8 | .. Change things from this point on 9 | 10 | ================================== 11 | OpenStack Cinder Specifications 12 | ================================== 13 | 14 | This git repository is used to hold approved design specifications for additions 15 | to the Cinder project. Reviews of the specs are done in gerrit, using a 16 | similar workflow to how we review and merge changes to the code itself. 17 | 18 | The layout of this repository is:: 19 | 20 | specs// 21 | 22 | You can find an example spec in `specs/template.rst`. 23 | 24 | Specifications are proposed for a given release by adding them to the 25 | `specs/` directory and posting it for review. The implementation 26 | status of a blueprint for a given release can be found by looking at the 27 | blueprint in launchpad. Not all approved blueprints will get fully implemented. 28 | 29 | **Previously approved specifications must be re-proposed for a new release.** 30 | The review will most likely be quick, but we need to make sure that everyone 31 | still understands the spec as written, and that it still fits in with the 32 | project's plans. 33 | 34 | Prior to the Juno development cycle, this repository was not used for spec 35 | reviews. Reviews prior to Juno were completed entirely through Launchpad 36 | blueprints:: 37 | 38 | https://blueprints.launchpad.net/cinder 39 | 40 | Please note, Launchpad blueprints are still used for tracking the 41 | current status of blueprints. For more information, see:: 42 | 43 | https://wiki.openstack.org/wiki/Blueprints 44 | 45 | For more information about working with gerrit, see:: 46 | 47 | https://docs.openstack.org/infra/manual/developers.html#development-workflow 48 | 49 | To validate that the specification is syntactically correct (i.e. get more 50 | confidence in the Jenkins result), please execute the following command:: 51 | 52 | $ tox 53 | 54 | After running ``tox``, the documentation will be available for viewing in HTML 55 | format in the ``doc/build/`` directory. Please do not checkin the generated 56 | HTML files as a part of your commit. 57 | -------------------------------------------------------------------------------- /bindep.txt: -------------------------------------------------------------------------------- 1 | # This is a cross-platform list tracking distribution packages needed for 2 | # install and tests; 3 | # see https://docs.openstack.org/infra/bindep/ for additional information. 4 | 5 | # gettext and graphviz are needed by doc builds only. For transition, 6 | # have them in both doc and test. 7 | gettext [!platform:suse doc test] 8 | gettext-runtime [platform:suse] 9 | graphviz [doc test] 10 | 11 | libpcre3-dev [platform:ubuntu] 12 | -------------------------------------------------------------------------------- /doc/requirements.txt: -------------------------------------------------------------------------------- 1 | # The order of packages is significant, because pip processes them in the order 2 | # of appearance. Changing the order has an impact on the overall integration 3 | # process, which may cause wedges in the gate later. 4 | 5 | openstackdocstheme>=3.2.0 # Apache-2.0 6 | sphinx>=2.0.0,!=2.1.0 # BSD 7 | yasfb>=0.5.1 8 | -------------------------------------------------------------------------------- /doc/source/_extra/.htaccess: -------------------------------------------------------------------------------- 1 | # This file contains redirects to handle existing URLs for specs 2 | # that have been moved 3 | Redirect 301 /openstack/cinder-specs/specs/train/image-encryption.html /openstack/cinder-specs/specs/zed/image-encryption.html 4 | Redirect 301 /openstack/cinder-specs/specs/ussuri/image-encryption.html /openstack/cinder-specs/specs/zed/image-encryption.html 5 | Redirect 301 /openstack/cinder-specs/specs/victoria/image-encryption.html /openstack/cinder-specs/specs/zed/image-encryption.html 6 | Redirect 301 /openstack/cinder-specs/specs/wallaby/image-encryption.html /openstack/cinder-specs/specs/zed/image-encryption.html 7 | Redirect 301 /openstack/cinder-specs/specs/xena/image-encryption.html /openstack/cinder-specs/specs/zed/image-encryption.html 8 | Redirect 301 /openstack/cinder-specs/specs/yoga/image-encryption.html /openstack/cinder-specs/specs/zed/image-encryption.html 9 | Redirect 301 /openstack/cinder-specs/specs/train/support-glance-multiple-backend.html /openstack/cinder-specs/specs/ussuri/support-glance-multiple-backend.html 10 | Redirect 301 /openstack/cinder-specs/specs/train/query-cinder-resources-filter-by-time-comparison-operators.html /openstack/cinder-specs/specs/ussuri/query-cinder-resources-filter-by-time-comparison-operators.html 11 | Redirect 301 /openstack/cinder-specs/specs/newton/specify-volume_type-and-availability_zone-for-backup-restore-API.html /openstack/cinder-specs/specs/wallaby/specify-volume_type-and-availability_zone-for-backup-restore-API.html 12 | Redirect 301 /openstack/cinder-specs/specs/wallaby/reset-state-robustification.html /openstack/cinder-specs/specs/xena/reset-state-robustification.html 13 | Redirect 301 /openstack/cinder-specs/specs/wallaby/sizing-encrypted-volumes.html /openstack/cinder-specs/specs/xena/sizing-encrypted-volumes.html 14 | Redirect 301 /openstack/cinder-specs/specs/train/add-volume-re-image-api.html /openstack/cinder-specs/specs/yoga/add-volume-re-image-api.html 15 | -------------------------------------------------------------------------------- /doc/source/index.rst: -------------------------------------------------------------------------------- 1 | .. cinder-specs documentation master file 2 | 3 | ============================================= 4 | Block Storage Service Specifications (cinder) 5 | ============================================= 6 | 7 | 2025.1 approved specs 8 | ===================== 9 | 10 | .. toctree:: 11 | :glob: 12 | :maxdepth: 1 13 | 14 | specs/2025.1/* 15 | 16 | Untargeted specs 17 | ================ 18 | 19 | .. note:: 20 | 21 | The following specs have been approved but have not been completed 22 | in the originally planned release. Rather than remove them, they 23 | have been made 'untargeted' for future reference. If you would like 24 | to work on one of these specs, please re-propose it for the appropriate 25 | release. The review will most likely be quick, but we need to make sure 26 | that everyone still understands the spec as written, and that it still 27 | fits in with the Cinder project's plans. 28 | 29 | 30 | .. toctree:: 31 | :glob: 32 | :maxdepth: 1 33 | 34 | specs/untargeted/* 35 | 36 | Previously approved specs 37 | ========================= 38 | 39 | 2024.2 40 | ------ 41 | 42 | .. toctree:: 43 | :glob: 44 | :maxdepth: 1 45 | 46 | specs/2024.2/* 47 | 48 | 2024.1 49 | ------ 50 | 51 | .. toctree:: 52 | :glob: 53 | :maxdepth: 1 54 | 55 | specs/2024.1/* 56 | 57 | 2023.2 58 | ------ 59 | 60 | .. toctree:: 61 | :glob: 62 | :maxdepth: 1 63 | 64 | specs/2023.2/* 65 | 66 | 2023.1 67 | ------ 68 | 69 | .. toctree:: 70 | :glob: 71 | :maxdepth: 1 72 | 73 | specs/2023.1/* 74 | 75 | Zed 76 | --- 77 | 78 | .. toctree:: 79 | :glob: 80 | :maxdepth: 1 81 | 82 | specs/zed/* 83 | 84 | Yoga 85 | ---- 86 | 87 | .. toctree:: 88 | :glob: 89 | :maxdepth: 1 90 | 91 | specs/yoga/* 92 | 93 | Xena 94 | ---- 95 | 96 | .. toctree:: 97 | :glob: 98 | :maxdepth: 1 99 | 100 | specs/xena/* 101 | 102 | Wallaby 103 | -------- 104 | 105 | .. toctree:: 106 | :glob: 107 | :maxdepth: 1 108 | 109 | specs/wallaby/* 110 | 111 | Victoria 112 | -------- 113 | 114 | .. toctree:: 115 | :glob: 116 | :maxdepth: 1 117 | 118 | specs/victoria/* 119 | 120 | Ussuri 121 | ------ 122 | 123 | .. toctree:: 124 | :glob: 125 | :maxdepth: 1 126 | 127 | specs/ussuri/* 128 | 129 | Train 130 | ----- 131 | 132 | .. toctree:: 133 | :glob: 134 | :maxdepth: 1 135 | 136 | specs/train/* 137 | 138 | Stein 139 | ----- 140 | 141 | .. toctree:: 142 | :glob: 143 | :maxdepth: 1 144 | 145 | specs/stein/* 146 | 147 | Rocky 148 | ----- 149 | 150 | .. toctree:: 151 | :glob: 152 | :maxdepth: 1 153 | 154 | specs/rocky/* 155 | 156 | Queens 157 | ------ 158 | 159 | .. toctree:: 160 | :glob: 161 | :maxdepth: 1 162 | 163 | specs/queens/* 164 | 165 | Pike 166 | ---- 167 | 168 | .. toctree:: 169 | :glob: 170 | :maxdepth: 1 171 | 172 | specs/pike/* 173 | 174 | Ocata 175 | ----- 176 | 177 | .. toctree:: 178 | :glob: 179 | :maxdepth: 1 180 | 181 | specs/ocata/* 182 | 183 | Newton 184 | ------ 185 | 186 | .. toctree:: 187 | :glob: 188 | :maxdepth: 1 189 | 190 | specs/newton/* 191 | 192 | Mitaka 193 | ------ 194 | 195 | .. toctree:: 196 | :glob: 197 | :maxdepth: 1 198 | 199 | specs/mitaka/* 200 | 201 | Liberty 202 | ------- 203 | 204 | .. toctree:: 205 | :glob: 206 | :maxdepth: 1 207 | 208 | specs/liberty/* 209 | 210 | Kilo 211 | ---- 212 | 213 | .. toctree:: 214 | :glob: 215 | :maxdepth: 1 216 | 217 | specs/kilo/* 218 | 219 | Juno 220 | ---- 221 | 222 | .. toctree:: 223 | :glob: 224 | :maxdepth: 1 225 | 226 | specs/juno/* 227 | 228 | Indices and tables 229 | ================== 230 | 231 | * :ref:`search` 232 | -------------------------------------------------------------------------------- /doc/source/specs: -------------------------------------------------------------------------------- 1 | ../../specs -------------------------------------------------------------------------------- /doc/test/redirect-tests.txt: -------------------------------------------------------------------------------- 1 | # This file contains tests for redirects to handle existing URLs for 2 | # specs that have been moved. See 3 | # https://docs.openstack.org/whereto/latest/ for details. 4 | /openstack/cinder-specs/specs/train/image-encryption.html 301 /openstack/cinder-specs/specs/zed/image-encryption.html 5 | /openstack/cinder-specs/specs/ussuri/image-encryption.html 301 /openstack/cinder-specs/specs/zed/image-encryption.html 6 | /openstack/cinder-specs/specs/victoria/image-encryption.html 301 /openstack/cinder-specs/specs/zed/image-encryption.html 7 | /openstack/cinder-specs/specs/wallaby/image-encryption.html 301 /openstack/cinder-specs/specs/zed/image-encryption.html 8 | /openstack/cinder-specs/specs/xena/image-encryption.html 301 /openstack/cinder-specs/specs/zed/image-encryption.html 9 | /openstack/cinder-specs/specs/yoga/image-encryption.html 301 /openstack/cinder-specs/specs/zed/image-encryption.html 10 | /openstack/cinder-specs/specs/train/support-glance-multiple-backend.html 301 /openstack/cinder-specs/specs/ussuri/support-glance-multiple-backend.html 11 | /openstack/cinder-specs/specs/train/query-cinder-resources-filter-by-time-comparison-operators.html 301 /openstack/cinder-specs/specs/ussuri/query-cinder-resources-filter-by-time-comparison-operators.html 12 | /openstack/cinder-specs/specs/newton/specify-volume_type-and-availability_zone-for-backup-restore-API.html 301 /openstack/cinder-specs/specs/wallaby/specify-volume_type-and-availability_zone-for-backup-restore-API.html 13 | /openstack/cinder-specs/specs/wallaby/reset-state-robustification.html 301 /openstack/cinder-specs/specs/xena/reset-state-robustification.html 14 | /openstack/cinder-specs/specs/wallaby/sizing-encrypted-volumes.html 301 /openstack/cinder-specs/specs/xena/sizing-encrypted-volumes.html 15 | /openstack/cinder-specs/specs/train/add-volume-re-image-api.html 301 /openstack/cinder-specs/specs/yoga/add-volume-re-image-api.html 16 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | name = cinder-specs 3 | description = Openstack Cinder Project Development Specifications 4 | long_description = file: README.rst 5 | author = OpenStack 6 | author_email = openstack-discuss@lists.openstack.org 7 | url = http://specs.openstack.org/openstack/cinder-specs/ 8 | classifiers = 9 | Environment :: OpenStack 10 | Intended Audience :: Information Technology 11 | Intended Audience :: System Administrators 12 | Intended Audience :: Developers 13 | License :: OSI Approved :: Apache Software License 14 | Operating System :: POSIX :: Linux 15 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # Copyright (c) 2013 Hewlett-Packard Development Company, L.P. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 13 | # implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT 18 | import setuptools 19 | 20 | setuptools.setup( 21 | setup_requires=['pbr'], 22 | pbr=True) 23 | -------------------------------------------------------------------------------- /specs/2023.1/placeholder.rst: -------------------------------------------------------------------------------- 1 | .. This file is a place holder. It should be removed by 2 | any patch proposing a spec for the 2023.1 release 3 | 4 | ============================================== 5 | No specs were approved for the 2023.1 release. 6 | ============================================== 7 | 8 | -------------------------------------------------------------------------------- /specs/2025.1/remove-me.rst: -------------------------------------------------------------------------------- 1 | .. This file is a place holder. It should be removed by 2 | any patch proposing a spec for the 2025.1 release 3 | 4 | ================================ 5 | No specs have yet been approved. 6 | ================================ 7 | 8 | -------------------------------------------------------------------------------- /specs/juno/cinder-storwize-driver-qos.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | ============================================= 8 | Add QoS capability to the IBM Storwize driver 9 | ============================================= 10 | 11 | https://blueprints.launchpad.net/cinder/+spec/cinder-storwize-driver-qos 12 | 13 | Storwize driver can provide the QoS capability with the parameter of 14 | IO throttling, which caps the amount of I/O that is accepted. This 15 | blueprints proposes to add the QoS support for Storwize driver. 16 | 17 | Problem description 18 | =================== 19 | 20 | Storwize backend storage can be enabled with the QoS support by limiting 21 | the amount of I/O for a specific volume. QoS has been implemented for 22 | some cinder storage drivers, and it is feasible for the storwize driver 23 | to add this feature as well. 24 | 25 | Use Cases 26 | ========= 27 | 28 | Proposed change 29 | =============== 30 | 31 | To enable the QoS for storwize driver, an extra spec with IOThrottling as the 32 | key needs to bind to a volume type. All the following changes apply to the 33 | Storwize driver code. 34 | 35 | Create volume: 36 | 37 | * If QoS is enabled and IOThrottling is available as the QoS key, set the I/O 38 | throttling of the volume into the specified IOThrottling value. 39 | 40 | Create clone volume: 41 | 42 | * If the QoS is set for the original volume, the target volume needs to set 43 | to the same QoS. 44 | 45 | Create snapshot: 46 | 47 | * The QoS attributes will be copied to the snapshot. 48 | 49 | Create volume from snapshot: 50 | 51 | * If QoS is enabled and IOThrottling is available as the QoS key, set the IO 52 | throttling of the volume into the specified IOThrottling value. 53 | 54 | Re-type volume: 55 | 56 | * If a volume type is changed, a different IOThrottling value will apply to 57 | the volume and the I/O of the volume needs to be set to the new IO 58 | throttling. 59 | 60 | 61 | Alternatives 62 | ------------ 63 | 64 | The proposed change follows the pattern, in which other drivers implement the 65 | QoS feature. 66 | 67 | Data model impact 68 | ----------------- 69 | 70 | None. 71 | 72 | REST API impact 73 | --------------- 74 | 75 | None. 76 | 77 | Security impact 78 | --------------- 79 | 80 | None. 81 | 82 | Notifications impact 83 | -------------------- 84 | 85 | None. 86 | 87 | Other end user impact 88 | --------------------- 89 | 90 | None. 91 | 92 | Performance Impact 93 | ------------------ 94 | 95 | None. 96 | 97 | Other deployer impact 98 | --------------------- 99 | 100 | The QoS for Storwize driver can be configurable with a configuration option 101 | in cinder.conf. An extra spec with the key of IOThrottling can bind to a 102 | volume type, so that the volumes with this volume type are guaranteed with 103 | an I/O throttling rate. 104 | 105 | Developer impact 106 | ---------------- 107 | 108 | None. 109 | 110 | 111 | Implementation 112 | ============== 113 | 114 | Assignee(s) 115 | ----------- 116 | 117 | Primary assignee: 118 | Vincent Hou 119 | 120 | Other contributors: 121 | TBD 122 | 123 | Work Items 124 | ---------- 125 | 126 | * Add a configuration option to allow the user the use the QoS for Storwize 127 | driver. 128 | * Add the QoS enablement check and set the I/O throttling in create volume. 129 | * Add the QoS enablement check and set the I/O throttling in create volume 130 | from snapshot. 131 | * Set the I/O throttling to the new volume according to the original volume 132 | in create clone volume. 133 | * Copy the QoS attributes to the snapshot in create snapshot. 134 | * Change the I/O throttling of the volume if the volume type is changed. 135 | 136 | Dependencies 137 | ============ 138 | 139 | None. 140 | 141 | Testing 142 | ======= 143 | 144 | * Unit tests need to be added to test the QoS for Storwize driver. 145 | 146 | Documentation Impact 147 | ==================== 148 | 149 | * Add how to configure the QoS for the Storwize driver in the document. 150 | 151 | References 152 | ========== 153 | 154 | * Add QoS capability to the IBM storwize driver 155 | https://blueprints.launchpad.net/cinder/+spec/cinder-storwize-driver-qos 156 | 157 | -------------------------------------------------------------------------------- /specs/juno/datera-driver.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | ========================================== 8 | Datera Driver 9 | ========================================== 10 | 11 | https://blueprints.launchpad.net/cinder/+spec/datera-driver 12 | 13 | Datera storage driver in Cinder. 14 | 15 | Problem description 16 | =================== 17 | 18 | Integration for Datera storage is not available in OpenStack. 19 | 20 | Use Cases 21 | ========= 22 | 23 | Proposed change 24 | =============== 25 | 26 | Add a Cinder driver that can allow OpenStack services communicate with Datera 27 | storage for both vHost and ISCSI. 28 | 29 | Alternatives 30 | ------------ 31 | 32 | n/a 33 | 34 | Data model impact 35 | ----------------- 36 | 37 | n/a 38 | 39 | REST API impact 40 | --------------- 41 | 42 | n/a 43 | 44 | Security impact 45 | --------------- 46 | 47 | n/a 48 | 49 | Notifications impact 50 | -------------------- 51 | 52 | n/a 53 | 54 | Other end user impact 55 | --------------------- 56 | 57 | n/a 58 | 59 | Performance Impact 60 | ------------------ 61 | 62 | n/a 63 | 64 | Other deployer impact 65 | --------------------- 66 | 67 | The deployer needs to set the cinder.conf to the right `volume_driver`. 68 | 69 | volume_driver=cinder.volume.drivers.datera.DateraDriver 70 | 71 | ISCSI would require setting up `san_ip`, `san_login` and `san_password` 72 | appropriately. 73 | 74 | vHost would have dependencies on using Linux-IO with the vHost fabric module. 75 | The target_helper in the cinder.conf needs to be set to lio_vhost. 76 | 77 | 78 | Developer impact 79 | ---------------- 80 | 81 | n/a 82 | 83 | Implementation 84 | ============== 85 | 86 | Assignee(s) 87 | ----------- 88 | 89 | Primary assignee: 90 | thingee 91 | 92 | Work Items 93 | ---------- 94 | 95 | * Write driver with ISCSI support. 96 | * Write unit tests for ISCSI support. 97 | * Provide cert tests with ISCSI support. 98 | * Write driver with vhost support. 99 | * Write unit tests for vhost support. 100 | * Provide cert tests with vhost support. 101 | * Provide CI with ISCSI and/or vhost support. 102 | 103 | Dependencies 104 | ============ 105 | 106 | * Need vHost connector [1]. 107 | 108 | Testing 109 | ======= 110 | 111 | * Unit tests 112 | * CI testing 113 | 114 | Documentation Impact 115 | ==================== 116 | 117 | n/a 118 | 119 | References 120 | ========== 121 | 122 | [1] - https://review.opendev.org/#/c/103048/ 123 | -------------------------------------------------------------------------------- /specs/juno/deprecate_v1_api.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | ========================================== 8 | Deprecate Cinder V1 API 9 | ========================================== 10 | 11 | https://blueprints.launchpad.net/cinder/+spec/deprecate-v1-api 12 | 13 | The Cinder V1 API should be deprecated as v2 has been developed back in 14 | Grizzly and stable. Newer features are being developed to v2, that v1 will 15 | never have. 16 | 17 | Problem description 18 | =================== 19 | 20 | The v2 API switch involves some changes from clients to make things more 21 | consistent like `display_name` becoming `name` and `display_description` and 22 | `description`. This is done on both volume and snapshot controllers. It is 23 | assumed there are many clients out there still supporting v1, as well as many 24 | deployed clouds still using v1 that would need to make some changes to ease the 25 | switch. 26 | 27 | Use Cases 28 | ========= 29 | 30 | Proposed change 31 | =============== 32 | 33 | Leave v1 enabled for Juno, give a warning for enabling it though in Cinder API 34 | service startup. Both /v1 and /v2 can work at the same time which can allow 35 | time to switch clients over in the user's pace. In K, turn off v1. 36 | 37 | Alternatives 38 | ------------ 39 | 40 | n/a 41 | 42 | Data model impact 43 | ----------------- 44 | 45 | n/a 46 | 47 | REST API impact 48 | --------------- 49 | 50 | /v1 will continue to work as normal and serve incoming requests. 51 | 52 | Security impact 53 | --------------- 54 | 55 | n/a 56 | 57 | Notifications impact 58 | -------------------- 59 | 60 | n/a 61 | 62 | Other end user impact 63 | --------------------- 64 | 65 | /v1 will continue to work as normal and serve incoming requests. If the end 66 | user hits :8776/ they will see v2 listed as current and v1 listed as 67 | deprecated. 68 | 69 | Performance Impact 70 | ------------------ 71 | 72 | n/a 73 | 74 | Other deployer impact 75 | --------------------- 76 | 77 | The deployer will have to make sure they have enable_v1_api=true in their 78 | cinder.conf. In versions older than Juno, enable_v1_api was default to true, 79 | but Juno will have this option set to false by default. 80 | 81 | Developer impact 82 | ---------------- 83 | 84 | n/a 85 | 86 | Implementation 87 | ============== 88 | 89 | Assignee(s) 90 | ----------- 91 | 92 | Primary assignee: 93 | 94 | 95 | Work Items 96 | ---------- 97 | 98 | * Have devstack set enable_v1_api=true in lib/cinder. 99 | * Add changes to grenade to set enable_v1_api=true. 100 | * Add v2 support to Nova when using Cinder client, but to also support v1 if 101 | still enabled. 102 | * Add deprecation warnings to Cinder API for enabling v1 API, and set 103 | enable_v1_api to default to false. 104 | * Update documentation API ref/spec pages. Update the ops guide where 105 | appropriate. 106 | 107 | Dependencies 108 | ============ 109 | 110 | * Devstack support for Cinder v2: https://review.openstack.org/#/c/22489/ 111 | * Nova support for Cinder for v2: https://review.openstack.org/#/c/43986/ 112 | * Devstack defaulting to enable_v1_api=true: 113 | https://review.openstack.org/#/c/102568 114 | * Make sure greneade tests still pass. 115 | 116 | 117 | Testing 118 | ======= 119 | 120 | Unit tests for v1 will still exist. Tempest will still do v1 tests in Juno. 121 | 122 | 123 | Documentation Impact 124 | ==================== 125 | 126 | V1 Cinder documentation will mention it's deprecated where it's appropriate. 127 | Instructions for upgrade and keeping v1 enabled can also be provided. This 128 | includes the reference, spec, and ops docs. 129 | 130 | References 131 | ========== 132 | 133 | n/a 134 | 135 | -------------------------------------------------------------------------------- /specs/juno/emc-vmax-driver-juno-update.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | ========================================== 8 | EMC VMAX Driver Update 9 | ========================================== 10 | 11 | https://blueprints.launchpad.net/cinder/+spec/emc-vmax-driver-juno-update 12 | 13 | This driver is an enhancement of the EMC SMI-S driver for VMAX. In Juno, 14 | the support for VNX will be removed from the SMI-S driver. Moving forward, 15 | this driver will support VMAX only. Some new features will be added for 16 | VMAX. 17 | 18 | Problem description 19 | =================== 20 | 21 | The existing EMC SMI-S iSCSI and FC driver has some missing features 22 | for VMAX. In previous release, support for Extend Volume and Create 23 | Volume from Snapshot were only implemented for VNX. In Juno, these 24 | features will be added for VMAX. 25 | 26 | In previous release, masking view, storage group, and initiator group 27 | need to be created ahead of time. In Juno, this will be automated. 28 | 29 | Use Cases 30 | ========= 31 | 32 | Proposed change 33 | =============== 34 | 35 | The following features will be added to the SMI-S based driver to support 36 | VMAX: 37 | 38 | * Extend volume 39 | * Create volume from snapshot 40 | * Dynamically creating masking views, storage groups, and initiator groups 41 | * Striped volumes 42 | * FAST policies 43 | 44 | Alternatives 45 | ------------ 46 | 47 | None 48 | 49 | Data model impact 50 | ----------------- 51 | 52 | None 53 | 54 | REST API impact 55 | --------------- 56 | 57 | None 58 | 59 | Security impact 60 | --------------- 61 | 62 | None 63 | 64 | Notifications impact 65 | -------------------- 66 | 67 | None 68 | 69 | Other end user impact 70 | --------------------- 71 | 72 | User will be able to use the new features. The feature that dynamically 73 | creates masking views, storage groups, and initiator groups will greatly 74 | improve user experience. 75 | 76 | Performance Impact 77 | ------------------ 78 | 79 | None 80 | 81 | Other deployer impact 82 | --------------------- 83 | 84 | None 85 | 86 | Developer impact 87 | ---------------- 88 | 89 | None 90 | 91 | Implementation 92 | ============== 93 | 94 | Assignee(s) 95 | ----------- 96 | 97 | Primary assignee: 98 | xing-yang 99 | 100 | Other contributors: 101 | None 102 | 103 | Work Items 104 | ---------- 105 | 106 | * Extend volume 107 | * Create volume from snapshot 108 | * Create masking views, storage groups, and initiator groups dynamically 109 | * Striped volumes 110 | * FAST policies 111 | 112 | Dependencies 113 | ============ 114 | 115 | None 116 | 117 | Testing 118 | ======= 119 | 120 | New features need to be tested. 121 | 122 | Documentation Impact 123 | ==================== 124 | 125 | Need to document the changes in the block storage manual. 126 | 127 | References 128 | ========== 129 | 130 | None 131 | -------------------------------------------------------------------------------- /specs/juno/i18n-enablement.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | ========================================== 8 | i18n Enablement for Cinder 9 | ========================================== 10 | 11 | Include the URL of your launchpad blueprint: 12 | 13 | https://blueprints.launchpad.net/cinder/+spec/i18n-enablement 14 | 15 | This BluePrint/Spec proposes completing the enablement of i18n 16 | (internationalization) support for Cinder. 17 | 18 | Internationalization implementation has been an on-going effort in OpenStack 19 | during recent releases. During the Icehouse release, much of the support 20 | for internationalization was already merged into Cinder. Specifically 21 | the update of Oslo's gettextutils (commit 22 | 1553a1e78ec262b044ce99b418103c91b7b580f6) completed much of 23 | the process. Removal of the use of str() in exceptions and messages 24 | was the other major piece of work that was implemented: (commit 25 | cbe1d5f5e22e5f792128643e4cdd6afb2ff2b5bf). 26 | 27 | To finalize this work in Juno we need to enable "lazy" translation. 28 | Enablement of lazy translation will allow end users to not only have 29 | logs produced in multiple languages, but adds the ability for REST 30 | API messages to also be returned in the language chosen by the user. 31 | This functionality is important to support the use of OpenStack by the 32 | international community. 33 | 34 | Use Cases 35 | ========= 36 | 37 | Problem description 38 | =================== 39 | 40 | Currently, Cinder does not have the all the code in place to support 41 | lazy translation. The code associated with this blueprint will add 42 | the appropriate code and enable translation of REST API responses. 43 | 44 | Proposed change 45 | =============== 46 | 47 | The code for this change will add 'gettextutils.enable_lazy() to each of 48 | the binaries in bin. 49 | 50 | It will also remove the use of gettextutils.install() in each of the 51 | binary files. Instead it will add the explicit import of _() in all 52 | files that are not already importing the _() function. The need for 53 | the change to explicitly import _() is documented 54 | in bug https://bugs.launchpad.net/cinder/+bug/1306275 . 55 | 56 | Alternatives 57 | ------------ 58 | 59 | None. 60 | 61 | Data model impact 62 | ----------------- 63 | 64 | None. 65 | 66 | REST API impact 67 | --------------- 68 | 69 | There is no additional changes to the REST API other than the fact 70 | that the change enables the customer to specify the language they 71 | wish REST API responses to be returned in using the Accept-Language 72 | option. 73 | 74 | Security impact 75 | --------------- 76 | 77 | None. 78 | 79 | Notifications impact 80 | -------------------- 81 | 82 | None. 83 | 84 | Other end user impact 85 | --------------------- 86 | 87 | None. 88 | 89 | Performance Impact 90 | ------------------ 91 | 92 | None. 93 | 94 | Other deployer impact 95 | --------------------- 96 | 97 | Once merged this feature is immediately available to users. 98 | 99 | 100 | Developer impact 101 | ---------------- 102 | 103 | The developer impacts have already been in place for some time. Developers 104 | have been using _() around messages that need translation. 105 | 106 | 107 | Implementation 108 | ============== 109 | 110 | Assignee(s) 111 | ----------- 112 | 113 | Primary assignee: 114 | (Jungleboyj) 115 | 116 | Other contributors: 117 | 118 | 119 | Work Items 120 | ---------- 121 | 122 | I am planning to implement this as two patches. The first will be the 123 | patch to ensure that _() is being explicitly imported. The dependent 124 | patch will then set enable_lazy(). 125 | 126 | 127 | Dependencies 128 | ============ 129 | 130 | None. 131 | 132 | 133 | Testing 134 | ======= 135 | 136 | There will be a tempest test added for Cinder that will ensure that 137 | lazy translation is working properly. 138 | 139 | 140 | Documentation Impact 141 | ==================== 142 | 143 | None. 144 | 145 | 146 | References 147 | ========== 148 | 149 | None. 150 | -------------------------------------------------------------------------------- /specs/juno/limit-volume-copy-bandwidth.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | ========================================== 8 | Limit Bandwidth of Volume Copy 9 | ========================================== 10 | 11 | Include the URL of your launchpad blueprint: 12 | 13 | https://blueprints.launchpad.net/cinder/+spec/limit-volume-copy-bandwidth 14 | 15 | This proposes adding a new config to limit bandwidth for volume copy to 16 | mitigate interference to other instance performance during: 17 | 18 | * new volume creation from a image 19 | * backup 20 | * volume deletion (when dd if=/dev/zero etc. is chosen to wipe) 21 | 22 | 23 | Problem description 24 | =================== 25 | 26 | Currently, volume copy operations consumes disk I/O bandwidth heavily and may 27 | slow down the other guest instances. 28 | 29 | "ionice" option is already implemented in some cases, but it is not always 30 | usable. e.g. When instances directly access to the storage and doesn't go 31 | through I/O scheduler of cinder control node, ionice cannot control I/O 32 | priority and instances access may slow down. 33 | 34 | Use Cases 35 | ========= 36 | 37 | Proposed change 38 | =============== 39 | 40 | A new config named 'volume_copy_bps_limit' will be added to determine max 41 | bandwidth (byte per second) consumed by volume copy. 42 | 43 | When CONF.volume_copy_bps_limit is zero (default), no limitation is applied, 44 | and no cgroup is created. 45 | 46 | Otherwise, bandwidth limitation is applied to volume copy. For example, if the 47 | volume copy is done by 'dd' command, it can be implemented by putting 'dd' 48 | into blkio cgroup for throttling. 49 | 50 | 51 | 52 | Alternatives 53 | ------------ 54 | 55 | When volume copy commands have an option for I/O throttling, the usage of such 56 | options are preferable. 57 | Putting whole cinder-volume processes into blkio cgroups could also be a 58 | solution for this, though it is required to provide a way to set rate limit to 59 | newly added block devices when new volume is created. 60 | 61 | 62 | Data model impact 63 | ----------------- 64 | 65 | None 66 | 67 | REST API impact 68 | --------------- 69 | 70 | None 71 | 72 | Security impact 73 | --------------- 74 | 75 | None 76 | 77 | Notifications impact 78 | -------------------- 79 | 80 | None 81 | 82 | Other end user impact 83 | --------------------- 84 | 85 | None 86 | 87 | Performance Impact 88 | ------------------ 89 | 90 | When volume copy I/O bandwidth is limited, it takes more time to complete 91 | volume copy. Users are required to balance between volume copy performance 92 | and interference to instances performance. 93 | 94 | 95 | Other deployer impact 96 | --------------------- 97 | 98 | * This feature is disabled by default. Users who want to use this feature need 99 | to set 'volume_copy_bps_limit' in cinder.conf. 100 | 101 | Developer impact 102 | ---------------- 103 | 104 | None 105 | 106 | Implementation 107 | ============== 108 | 109 | Assignee(s) 110 | ----------- 111 | 112 | Primary assignee: 113 | tomoki-sekiyama-g 114 | 115 | Work Items 116 | ---------- 117 | 118 | * Implement cgroup blkio setup functions 119 | * Implement I/O rate limit for volume_utils.copy_volume 120 | * Implement I/O rate limit for other image format such as qcow 121 | 122 | Dependencies 123 | ============ 124 | 125 | None 126 | 127 | Testing 128 | ======= 129 | 130 | None 131 | 132 | Documentation Impact 133 | ==================== 134 | 135 | The cinder client documentation will need to be updated to reflect the new 136 | config. 137 | 138 | 139 | References 140 | ========== 141 | 142 | None 143 | -------------------------------------------------------------------------------- /specs/juno/oracle-zfssa-cinder-driver.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | ========================================== 8 | Oracle ZFS Storage Appliance iSCSI Driver 9 | ========================================== 10 | 11 | Include the URL of your launchpad blueprint: 12 | 13 | https://blueprints.launchpad.net/cinder/+spec/oracle-zfssa-cinder-driver 14 | 15 | ZFSSA ISCSI Driver is designed for ZFS Storage Appliance product 16 | line (ZS3-2, ZS3-4, ZS3-ES, 7420 and 7320). The driver provides the ability 17 | to create iSCSI volumes which are exposed from the ZFS Storage Appliance 18 | for use by VM instantiated by Openstack's Nova module. 19 | 20 | 21 | Problem description 22 | =================== 23 | 24 | Currently there is no support for ZFS Storage Appliance product line from 25 | Openstack Cinder. 26 | 27 | Use Cases 28 | ========= 29 | 30 | Proposed change 31 | =============== 32 | iSCSI driver uses REST API to communicate out of band with the storage 33 | controller. 34 | The new driver would be located under cinder/volume/drivers/zfssa, and 35 | it would be able to perform the following: 36 | 37 | * Create/Delete Volume 38 | * Extend Volume 39 | * Create/Delete Snaphost 40 | * Create Volume from Snapshot 41 | * Delete Volume Snapshot 42 | * Attach/Detach Volume 43 | * Get Volume Stats 44 | 45 | Additionally a ZFS Storage Appliance workflow (cinder.akwf) is provided 46 | to help the admin to setup a user and role in the appliance with enought 47 | privileges to do cinder operations. 48 | Also, cinder.conf has to be configured properly with zfssa specific 49 | properties for the driver to work. 50 | 51 | Alternatives 52 | ------------ 53 | 54 | None 55 | 56 | Data model impact 57 | ----------------- 58 | 59 | None 60 | 61 | REST API impact 62 | --------------- 63 | 64 | None 65 | 66 | Security impact 67 | --------------- 68 | 69 | None 70 | 71 | Notifications impact 72 | -------------------- 73 | 74 | None 75 | 76 | Other end user impact 77 | --------------------- 78 | 79 | User will be able to use ZFS Storage Appliance product line with 80 | Openstack Cinder. 81 | 82 | Performance Impact 83 | ------------------ 84 | 85 | None 86 | 87 | Other deployer impact 88 | --------------------- 89 | 90 | None 91 | 92 | Developer impact 93 | ---------------- 94 | 95 | None 96 | 97 | Implementation 98 | ============== 99 | 100 | Assignee(s) 101 | ----------- 102 | 103 | Primary assignee: 104 | juan-c-zuluaga 105 | 106 | Other contributors: 107 | brian-ruff 108 | 109 | Work Items 110 | ---------- 111 | 112 | All the features that ZFS Storage Appliance iSCSI does. 113 | Add CI unit test for ZFS Storage Appliance iSCSI Cinder Driver 114 | 115 | Dependencies 116 | ============ 117 | 118 | Minimum ZFS Storage Appliance with OS8.2 119 | 120 | Testing 121 | ======= 122 | 123 | CI will be performed for ZFS Storage Appliance iSCSI Driver. 124 | 125 | Documentation Impact 126 | ==================== 127 | 128 | Cinder Support Matrix should be updated. 129 | https://wiki.openstack.org/wiki/CinderSupportMatrix 130 | 131 | 132 | References 133 | ========== 134 | 135 | http://www.oracle.com/us/products/servers-storage/storage/nas/overview/index.html 136 | 137 | **ZFS Storage Appliance Workflow.** 138 | 139 | http://docs.oracle.com/cd/E26765_01/html/E26397/maintenance__workflows.html#maintenance__workflows__bui 140 | -------------------------------------------------------------------------------- /specs/juno/support-GPFS-nas-ibmnas-driver.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | ===================================================================== 8 | Extending IBMNAS driver to support NAS based GPFS storage deployments 9 | ===================================================================== 10 | 11 | https://blueprints.launchpad.net/cinder/+spec/add-gpfs-nas-to-ibmnas 12 | 13 | Currently, the ibmnas driver works for an nfs export from Storwize V7000 14 | Unified and SONAS products. It does not have the capability to work with 15 | nfs exports provided from a gpfs server. 16 | 17 | 18 | Problem description 19 | =================== 20 | 21 | Currently, the ibmnas driver does not have the capability to work with nfs 22 | exports provided from a gpfs server. 23 | 24 | * Lacking this capability will limit the end users from using remote gpfs 25 | NAS servers as a backend in OpenStack environment. 26 | 27 | Use Cases 28 | ========= 29 | 30 | Proposed change 31 | =============== 32 | 33 | * Add/Reuse functions in ibmnas.py to support all minimum features listed 34 | (github.com/openstack/cinder/blob/master/doc/source/devref/drivers.rst) 35 | for NAS based GPFS server backends. 36 | 37 | 38 | Alternatives 39 | ------------ 40 | 41 | The existing gpfs driver can be extended to support NAS based gpfs storage 42 | deployments. But this implementation requires many other new funtions to be 43 | introduced, which are already existing and can be reused in ibmnas driver. 44 | Apart from this in future, we have planned to support all NFS/GPFS related 45 | IBM products via ibmnas driver. Hence extending ibmnas driver is more 46 | advantageous than extending gpfs driver. 47 | 48 | Data model impact 49 | ----------------- 50 | 51 | None 52 | 53 | REST API impact 54 | --------------- 55 | 56 | None 57 | 58 | Security impact 59 | --------------- 60 | 61 | No specific security issues needs to be considered. Insecure file permissions 62 | (OSSN-0014) is fixed in the driver and is addressed by 63 | https://review.openstack.org/#/c/101919/ 64 | 65 | Notifications impact 66 | -------------------- 67 | 68 | None 69 | 70 | Other end user impact 71 | --------------------- 72 | 73 | None 74 | 75 | Performance Impact 76 | ------------------ 77 | 78 | None 79 | 80 | Other deployer impact 81 | --------------------- 82 | 83 | This requires an additional option to be configured while deploying 84 | OpenStack with IBMNAS products (sonas, v7ku, gpfs-nas). 85 | 86 | * New configuration option needs to be filled in cinder.conf 87 | ibmnas_platform_type = | | 88 | 89 | * This change needs to be explicitly enabled on IBMNAS driver CI certification 90 | 91 | Developer impact 92 | ---------------- 93 | 94 | None 95 | 96 | 97 | Implementation 98 | ============== 99 | 100 | Assignee(s) 101 | ----------- 102 | 103 | Primary assignee: 104 | sasikanth 105 | 106 | Other contributors: 107 | nilesh-bhosale 108 | 109 | Work Items 110 | ---------- 111 | 112 | * Add/Reuse functions in ibmnas.py to support NAS based GPFS storage 113 | deployments. 114 | 115 | 116 | Dependencies 117 | ============ 118 | 119 | None 120 | 121 | 122 | Testing 123 | ======= 124 | 125 | * Unit tests - Existing test_ibmnas.py will be improved to handle the new 126 | code changes/functions. 127 | * Tempest tests - No additional testcases needs to be written, this feature 128 | can be tested with the existing tempest. 129 | * Cinder driver certification tests - Driver certification tests will be 130 | executed and results will be submitted to the community (as the changes will 131 | altogether enable a new storage platform). 132 | * CI tests - We are working towards 3rd party CI environment and will 133 | continuously run tests across the respective hardware platform. 134 | 135 | 136 | Documentation Impact 137 | ==================== 138 | 139 | ibmnas driver documentation needs to updated with this new configuration 140 | option. 141 | 142 | ibmnas_platform_type = | | 143 | 144 | This option is used for selecting the appropriate backend storage. 145 | Valid values are v7ku for using IBM Storwize V7000 Unified 146 | sonas for using IBM Scale Out NAS and 147 | gpfs-nas for using NAS based GPFS server deployment 148 | 149 | 150 | References 151 | ========== 152 | 153 | None 154 | -------------------------------------------------------------------------------- /specs/juno/support-reset-state-for-backup.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | ====================== 8 | Backup State Reset API 9 | ====================== 10 | 11 | Include the URL of your launchpad blueprint: 12 | 13 | https://blueprints.launchpad.net/cinder/+spec/support-reset-state-for-backup 14 | 15 | Provide an API to reset the state of a backup being stucked in creating or 16 | restoring. 17 | 18 | Problem description 19 | =================== 20 | 21 | Since there are volume reset-state function and snapshot reset-state function, 22 | backup also needs reset-state as well. 23 | When creating or restoring backup, it may leave backup stucked in creating or 24 | restoring status due to database down or rabbitmq down, etc.. 25 | Currently we could only solve these problems by restarting cinder-backup. This 26 | bp is to another mean for administrators to solve these problems by calling 27 | backup reset state API. 28 | 29 | 1. Resetting status from creating/restoring to available 30 | 31 | 1) restoring --> available 32 | Directly change the backup status to 'error', because the backup data is 33 | already existed in storage backend. 34 | 2) creating --> available 35 | Use backup-create routine as an example to illustrate what benefit we can 36 | get from backup-reset function. Backup-create routine first backup volume 37 | and metadatas, and then update the status of volume and backup. If database 38 | just went down after update the volume's status to 'available', leaving the 39 | backup's status to be 'creating' without having methods to deal with 40 | through API. 41 | 42 | If we have reset-state API and resetting status from creating to available, we 43 | first verify whether the backup is ok on storage backend. 44 | If so, we change backup status from creating to available. 45 | If not, we throw an exception and change backup status from creating to error. 46 | 47 | 2. Resetting status from creating/restoring to error 48 | Directly change the backup status to 'error' without restart cinder-backup. 49 | 50 | Use Cases 51 | ========= 52 | 53 | Proposed change 54 | =============== 55 | 56 | A new API function and corresponding cinder command will be added to reset 57 | the status of backups. 58 | 59 | The initial proposal is to provide a method for administrator to handle the 60 | backup item stucked in status like creating or restoring. 61 | 62 | Alternatives 63 | ------------ 64 | 65 | Login in the cinder database, use the following update sql to change the 66 | backup item. 67 | 68 | :: 69 | 70 | update backups set status='some status' where id='xxx-xxx-xxx-xxx'; 71 | 72 | Data model impact 73 | ----------------- 74 | None 75 | 76 | REST API impact 77 | --------------- 78 | 79 | Add a new REST API to reset backup states: 80 | * POST /v2/{tenant_id}/backups/{id}/action 81 | 82 | JSON request schema definition:: 83 | 84 | 'backup-reset_status': { 85 | 'status': 'available' 86 | } 87 | 88 | Normal http response code: 89 | 202 90 | 91 | Expected error http response code: 92 | 400 93 | 94 | Security impact 95 | --------------- 96 | None 97 | 98 | Notifications impact 99 | -------------------- 100 | None 101 | 102 | Other end user impact 103 | --------------------- 104 | 105 | A new command, backup-reset-state, will be added to python-cinderclient. This 106 | command mirrors the underlying API function. 107 | 108 | Resetting the status of a backup can be performed by: 109 | $ cinder backup-reset-state --state 110 | 111 | 112 | Performance Impact 113 | ------------------ 114 | None 115 | 116 | Other deployer impact 117 | --------------------- 118 | None 119 | 120 | Developer impact 121 | ---------------- 122 | None 123 | 124 | 125 | Implementation 126 | ============== 127 | 128 | Assignee(s) 129 | ----------- 130 | 131 | Primary assignee: 132 | ling-yun 133 | 134 | Work Items 135 | ---------- 136 | 137 | * Implement REST API 138 | * Implement cinder client functions 139 | * Implement cinder command 140 | 141 | Dependencies 142 | ============ 143 | None 144 | 145 | Testing 146 | ======= 147 | None 148 | 149 | 150 | Documentation Impact 151 | ==================== 152 | 153 | The cinder client documentation will need to be updated to reflect the new 154 | command. 155 | 156 | The cinder API documentation will need to be updated to reflect the REST API 157 | changes. 158 | 159 | 160 | References 161 | ========== 162 | 163 | None 164 | -------------------------------------------------------------------------------- /specs/juno/support-volume-backup-for-qcow2.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | ================================ 8 | Support volume backup for qcow2 9 | ================================ 10 | 11 | https://blueprints.launchpad.net/cinder/+spec/support-volume-backup-for-qcow2 12 | 13 | Currently, cinder-backup doesn't support qcow2 format disk. Add the support 14 | for it will make drivers which use qcow2 as volume, such as glusterfs etc, 15 | work together with cinder-backup, and can also make nfs driver use qcow2 as 16 | volume be possible. 17 | 18 | Problem description 19 | =================== 20 | 21 | Currently, cinder-backup doesn't support qcow2 format disk because the backup 22 | code assumes the source volume is a raw volume. The destination (i.e. swift, 23 | rbd) should absolutely remain universal across all volume back-ends. 24 | 25 | Use Cases 26 | ========= 27 | 28 | Proposed change 29 | =============== 30 | 31 | * Add qemu-nbd support to cinder-backup. Qemu-nbd can mount qcow2 volume as 32 | a raw device to the host 33 | * The backup_volume method in base class of remotefs driver (cinder.volume. 34 | drivers.nfs.RemoteFsDriver:backup_volume) will mount qcow2 volume as nbd 35 | device before call backup_service's backup method 36 | 37 | Alternatives 38 | ------------ 39 | 40 | None 41 | 42 | Data model impact 43 | ----------------- 44 | 45 | None 46 | 47 | REST API impact 48 | --------------- 49 | 50 | None 51 | 52 | Security impact 53 | --------------- 54 | 55 | None 56 | 57 | Notifications impact 58 | -------------------- 59 | 60 | None 61 | 62 | Other end user impact 63 | --------------------- 64 | 65 | None 66 | 67 | Performance Impact 68 | ------------------ 69 | 70 | None 71 | 72 | Other deployer impact 73 | --------------------- 74 | 75 | Storage node which running cinder-volume will contains nbd kernel module. 76 | 77 | Developer impact 78 | ---------------- 79 | 80 | None 81 | 82 | 83 | Implementation 84 | ============== 85 | 86 | Assignee(s) 87 | ----------- 88 | 89 | Primary assignee: 90 | Trump.Zhang 91 | 92 | Work Items 93 | ---------- 94 | 95 | * Add qemu-nbd support to cinder-backup. Qemu-nbd can mount qcow2 volume as 96 | a raw device to the host 97 | * The backup_volume method in base class of remotefs driver (cinder.volume. 98 | drivers.nfs.RemoteFsDriver:backup_volume) will mount qcow2 volume as nbd 99 | device before call backup_service's backup method 100 | 101 | 102 | Dependencies 103 | ============ 104 | 105 | None 106 | 107 | 108 | Testing 109 | ======= 110 | 111 | None 112 | 113 | 114 | Documentation Impact 115 | ==================== 116 | 117 | None 118 | 119 | 120 | References 121 | ========== 122 | 123 | None 124 | -------------------------------------------------------------------------------- /specs/juno/support-volume-num-weighter.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | =========================== 8 | Support Volume Num Weighter 9 | =========================== 10 | 11 | Include the URL of your launchpad blueprint: 12 | 13 | https://blueprints.launchpad.net/cinder/+spec/support-volume-num-weighter 14 | 15 | Provide a mean to help improve volume-backends' IO balance and volumes' IO 16 | performance. 17 | 18 | Problem description 19 | =================== 20 | Currently cinder support choosing volume backend according to free_capacity 21 | and allocated_capacity. 22 | Volume Num Weighter is that scheduler could choose volume backend based on 23 | volume number in volume backend, which could provide another mean to help 24 | improve volume-backends' IO balance and volumes' IO performance. 25 | 26 | Explain the benefit from volume number weighter by this use case. 27 | 28 | Assume we have volume-backend-A with 300G and volume-backend-B with 100G. 29 | Volume-backend-A's IO capabilities is the same volume-backend-B IO 30 | capabilities. 31 | Each volume's IO usage are almost the same. 32 | Use CapacityWeigher as weighter class. 33 | 34 | Concrete Use Case: 35 | If we create six 10G volumes, these volumes would placed in volume-backend A. 36 | All the six volume IO stream has been push on volume-backend-A, which would 37 | cause volume-backend-A does much IO scheduling work. At the same time, 38 | volume-backend-B has no volume and its IO capabilites has been wasted. 39 | 40 | If we have volume number weighter, scheduler could do proper initial placement 41 | for these volumes----three on volume-backend A, three on volume-backend-B. So 42 | that we can make full use of all volume-backends' IO capabilities to help 43 | improve volume-backends' IO balance and volumes' IO performance. 44 | 45 | Use Cases 46 | ========= 47 | 48 | Proposed change 49 | =============== 50 | 51 | Implement a volume number weighter:VolumeNumberWeighter. 52 | 1. _weigh_object function return volume-backend's non-deleted volume number by 53 | using db api volume_get_all_by_host. 54 | 2. Add a new config item volume_num_weight_multiplier and its default value is 55 | -1, which means to spread volume among volume backend according to 56 | volume-backend's non-deleted volume number. 57 | 58 | Since VolumeNumberWeighter is mutually exclusive with 59 | CapacityWeigher/AllocatedCapacityWeigher and cinder's 60 | scheduler_default_weighers is CapacityWeigher, we could set 61 | scheduler_default_weighers=VolumeNumberWeighter in 62 | /etc/cinder/cinder.conf and restart cinder-scheduler to make 63 | VolumeNumberWeighter effect. 64 | 65 | VolumeNumberWeighter, whichi provides a mean to help improve 66 | volume-backends' IO balance and volumes' IO performance, 67 | could not replace CapacityWeigher/AllocatedCapacityWeigher, 68 | because CapacityWeigher/AllocatedCapacityWeigher could be used to provide 69 | balance of volume-backends' free storage space when user focus more on free 70 | space balance between volume-bakends. 71 | 72 | 73 | 74 | Alternatives 75 | ------------ 76 | 77 | None. 78 | 79 | Data model impact 80 | ----------------- 81 | None 82 | 83 | REST API impact 84 | --------------- 85 | None 86 | 87 | Security impact 88 | --------------- 89 | None 90 | 91 | Notifications impact 92 | -------------------- 93 | None 94 | 95 | Other end user impact 96 | --------------------- 97 | None 98 | 99 | Performance Impact 100 | ------------------ 101 | None 102 | 103 | Other deployer impact 104 | --------------------- 105 | None 106 | 107 | Developer impact 108 | ---------------- 109 | None 110 | 111 | 112 | Implementation 113 | ============== 114 | 115 | Assignee(s) 116 | ----------- 117 | 118 | Primary assignee: 119 | ling-yun 120 | 121 | Work Items 122 | ---------- 123 | 124 | * Implement Volume Number Weighter 125 | * Add weighter option of Volume Number Weighter to OPENSTACK CONFIGURATION 126 | REFERENCE 127 | 128 | Dependencies 129 | ============ 130 | None 131 | 132 | Testing 133 | ======= 134 | Set up volume-backend-A with 300G and volume-backend-B with 100G. 135 | Create six 10G volumes, the expected result is 3 volumes in 136 | volume-backend A and 3 volumes in volume-backend B. 137 | 138 | 139 | Documentation Impact 140 | ==================== 141 | 142 | Add weighter option of Volume Number Weighter to OPENSTACK CONFIGURATION 143 | REFERENCE. 144 | 145 | 146 | References 147 | ========== 148 | 149 | None 150 | -------------------------------------------------------------------------------- /specs/juno/united-policy.json-in-cinder.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | ============================ 8 | United Policy.json In Cinder 9 | ============================ 10 | 11 | https://blueprints.launchpad.net/cinder/+spec/united-policy-in-cinder 12 | 13 | Currently, there is two policy.json files in cinder. One for cinder code, 14 | one for unit test code. It's not convenient for the developer and easy to 15 | miss one. This blueprint is aim to united them. Then unit test code will 16 | use the policy.json in the code. 17 | 18 | Problem description 19 | =================== 20 | 21 | Currently, there is two policy.json files in cinder. One for cinder code, 22 | one for unit test code. It's not convenient for the developer and easy to 23 | miss one. 24 | 25 | Use Cases 26 | ========= 27 | 28 | Proposed change 29 | =============== 30 | 31 | * Delete the policy.json under the test 32 | 33 | * Modify the unittest to use the file /etc/cinder/policy.json 34 | 35 | Alternatives 36 | ------------ 37 | 38 | None 39 | 40 | Data model impact 41 | ----------------- 42 | 43 | None 44 | 45 | REST API impact 46 | --------------- 47 | 48 | None 49 | 50 | Security impact 51 | --------------- 52 | 53 | None 54 | 55 | Notifications impact 56 | -------------------- 57 | 58 | None 59 | 60 | Other end user impact 61 | --------------------- 62 | 63 | None 64 | 65 | Performance Impact 66 | ------------------ 67 | 68 | None 69 | 70 | Other deployer impact 71 | --------------------- 72 | 73 | None 74 | 75 | Developer impact 76 | ---------------- 77 | 78 | None 79 | 80 | 81 | Implementation 82 | ============== 83 | 84 | Assignee(s) 85 | ----------- 86 | 87 | Primary assignee: 88 | xiaoding 89 | 90 | Work Items 91 | ---------- 92 | 93 | * Delete policy.json in the test 94 | 95 | * Modify the unittest to use /etc/cinder/policy.json 96 | 97 | 98 | Dependencies 99 | ============ 100 | 101 | None 102 | 103 | 104 | Testing 105 | ======= 106 | 107 | None 108 | 109 | 110 | Documentation Impact 111 | ==================== 112 | 113 | None 114 | 115 | 116 | References 117 | ========== 118 | 119 | None 120 | -------------------------------------------------------------------------------- /specs/juno/vmdk-backup.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | ============================================================ 8 | Support backup and restore of volumes created by VMDK driver 9 | ============================================================ 10 | 11 | https://blueprints.launchpad.net/cinder/+spec/vmdk-backup 12 | 13 | The volumes created by the VMDK driver are virtual disks stored in datastores 14 | managed by ESX or vCenter server. Currently, the ``backup-create`` and 15 | ``backup-restore`` operations are not supported for these volumes. This 16 | blueprint proposes adding support for these operations in VMDK driver. 17 | 18 | Problem description 19 | =================== 20 | 21 | The default implementation of ``backup-create``\\ ``backup-restore`` does the 22 | following steps: 23 | 24 | * Attach the volume as a block device or file. 25 | 26 | * Backup\\restore the file by calling backup service. 27 | 28 | * Detach the volume. 29 | 30 | It uses an instance of ``InitiatorConnector`` (determined by the back-end 31 | driver protocol) to do the actual\\detach. There is no ``InitiatorConnector`` 32 | for the ``vmdk`` protocol and hence the attach\\detach fails for volumes 33 | created by the VMDK driver. This blueprint proposes adding support for 34 | ``backup-create``\\ ``backup-restore`` for these volumes. 35 | 36 | Use Cases 37 | ========= 38 | 39 | Proposed change 40 | =============== 41 | 42 | The change involves overriding the default implementations of ``backup_volume`` 43 | and ``restore_backup`` methods in ``VMwareEsxVmdkDriver``. The steps in 44 | ``backup_volume`` are listed below: 45 | 46 | * Create the backing VM if it not found. 47 | 48 | * Download the stream-optimized version of the virtual disk corresponding to 49 | the volume to a temporary directory. 50 | 51 | * Call ``backup_service.backup()`` method to backup the stream-optimized 52 | virtual disk file. 53 | 54 | * Delete the temporary file. 55 | 56 | Following are the steps in ``restore_backup``: 57 | 58 | * Call ``backup_service.restore()`` to download the stream-optimized virtual 59 | disk file to a temporary directory. 60 | 61 | * If the backing VM doesn't exist (in the case of restoring the backup to 62 | create a new volume), import the stream-optimized virtual disk file to create 63 | a new backing VM. 64 | 65 | * If the backing VM exists, import the stream-optimized virtual disk file to 66 | create a temporary VM and reconfigure the backing VM to replace its virtual 67 | disk with that of the temporary VM. 68 | 69 | * Delete the temporary file and temporary VM. 70 | 71 | Alternatives 72 | ------------ 73 | 74 | **HTTP read/write**: It is possible to create an HTTP connection to read/write 75 | from/to a virtual disk file in vCenter/ESX and an adapter can be written for 76 | this connection to support some of the file operations required by the backup 77 | drivers. This implementation works for both Swift and Ceph backup drivers. But 78 | the TSM backup driver raises ``InvalidBackup`` exception if the volume to be 79 | backed up is not a block device or regular file. 80 | 81 | Data model impact 82 | ----------------- 83 | 84 | None 85 | 86 | REST API impact 87 | --------------- 88 | 89 | None 90 | 91 | Security impact 92 | --------------- 93 | 94 | None 95 | 96 | Notifications impact 97 | -------------------- 98 | 99 | None 100 | 101 | Other end user impact 102 | --------------------- 103 | 104 | None 105 | 106 | Performance Impact 107 | ------------------ 108 | 109 | None 110 | 111 | Other deployer impact 112 | --------------------- 113 | 114 | None 115 | 116 | Developer impact 117 | ---------------- 118 | 119 | None 120 | 121 | 122 | Implementation 123 | ============== 124 | 125 | Assignee(s) 126 | ----------- 127 | 128 | Primary assignee: 129 | vbala 130 | 131 | Other contributors: 132 | None 133 | 134 | Work Items 135 | ---------- 136 | 137 | * ``backup_volume`` method 138 | * ``restore_backup`` method 139 | 140 | Dependencies 141 | ============ 142 | 143 | None 144 | 145 | 146 | Testing 147 | ======= 148 | 149 | None 150 | 151 | Documentation Impact 152 | ==================== 153 | 154 | None 155 | 156 | References 157 | ========== 158 | 159 | None 160 | -------------------------------------------------------------------------------- /specs/kilo/backup-notification.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | ========================================== 8 | Backup Notification 9 | ========================================== 10 | 11 | https://blueprints.launchpad.net/cinder/+spec/backup-notification 12 | 13 | This blueprint proposes to add the notification support to the backup 14 | service in Cinder, so that cinder can report the usage status to Ceilometer 15 | in backup create, delete and restore. 16 | 17 | Problem description 18 | =================== 19 | 20 | Cinder is supposed to send the notifications to ceilometer to report the 21 | resource usage status. This notification support has been implemented for 22 | the volume and the volume snapshot, but not the backup. 23 | 24 | Use Cases 25 | ========= 26 | 27 | Proposed change 28 | =============== 29 | 30 | Create backup notification: 31 | 32 | * Send a notification to inform the Ceilometer the backup create begins. 33 | 34 | * Send a notification to inform the Ceilometer the backup create ends. 35 | 36 | Delete backup notification: 37 | 38 | * Send a notification to inform the Ceilometer the backup delete begins. 39 | 40 | * Send a notification to inform the Ceilometer the backup delete ends. 41 | 42 | Restore backup notification: 43 | 44 | * Send a notification to inform the Ceilometer the backup restore begins. 45 | 46 | * Send a notification to inform the Ceilometer the backup restore ends. 47 | 48 | Progress notification: 49 | 50 | * It is possible that some drivers can send a create.progress notifications 51 | periodically. We make it configurable if the backup driver supports to 52 | send periodic notifications. 53 | 54 | The backup information sent to Ceilometer includes backup id, project id, 55 | user id, available zone, host, display name, creation time, status, volume 56 | id, size, service metadata, service and fail reason. 57 | 58 | For the progress notification, an additional data indicating the percentage 59 | of the backup progress will be sent as well. 60 | 61 | Alternatives 62 | ------------ 63 | 64 | None. 65 | 66 | Data model impact 67 | ----------------- 68 | 69 | None. 70 | 71 | REST API impact 72 | --------------- 73 | 74 | None. 75 | 76 | Security impact 77 | --------------- 78 | 79 | None. 80 | 81 | Notifications impact 82 | -------------------- 83 | 84 | This blueprint will add the notification support for the backup service. 85 | 86 | Other end user impact 87 | --------------------- 88 | 89 | None. 90 | 91 | Performance Impact 92 | ------------------ 93 | 94 | None. 95 | 96 | Other deployer impact 97 | --------------------- 98 | 99 | * The configuration option backup_object_number_per_notification has 100 | been added to indicate how many chunks or objects have been sent 101 | to the Ceilometer. It applies to the object or chunk based backup 102 | service, e.g. Swift, Ceph. 103 | 104 | Developer impact 105 | ---------------- 106 | 107 | None. 108 | 109 | Implementation 110 | ============== 111 | 112 | Assignee(s) 113 | ----------- 114 | 115 | Primary assignee: 116 | Vincent Hou 117 | 118 | Other contributors: 119 | None 120 | 121 | Work Items 122 | ---------- 123 | 124 | * Add the notification for the backup usage when creating a backup 125 | * Add the notification for the backup usage when deleting a backup 126 | * Add the notification for the backup usage when restoring a backup 127 | * Add the progress notification for the backup usage when talking an object 128 | store as the backup service. 129 | 130 | Dependencies 131 | ============ 132 | 133 | None 134 | 135 | Testing 136 | ======= 137 | 138 | * Unit tests will be added for the backup notification calls. 139 | 140 | Documentation Impact 141 | ==================== 142 | 143 | * The configuration option to configure the progress notifications 144 | needs to be added for the backup driver, which supports to send 145 | this type of notification. 146 | 147 | References 148 | ========== 149 | 150 | * Backup notification blueprint and bug replication design session 151 | https://blueprints.launchpad.net/cinder/+spec/backup-notification 152 | https://bugs.launchpad.net/cinder/+bug/1326431 153 | 154 | -------------------------------------------------------------------------------- /specs/kilo/chiscsi-iscsi-helper.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | ===================================== 8 | Add support for chiscsi iscsi helper 9 | ===================================== 10 | 11 | https://blueprints.launchpad.net/cinder/+spec/chiscsi-iscsi-helper 12 | 13 | The Chelsio iSCSI target(chiscsi) serves as a drop in replacement for the IET 14 | target, aiming to provide the same functionality as IET. This spec aims at 15 | adding support for said target implementation as a pure iscsi_helper. 16 | 17 | chiscsi supports offloading of iSCSI PDU's when required hardware (supported 18 | Chelsio Network cards) is available but will work on any regular NIC as well. 19 | Offloading or lack thereof requires no user intervention once target drivers 20 | are installed. Implementation is initiator agnostic as well, no changes needed 21 | on initiator side. 22 | 23 | Problem description 24 | =================== 25 | 26 | chiscsi target is not currently supported by openstack 27 | 28 | * For a Deployer trying to use offloaded iSCSI support on target side, no 29 | option is currently available. 30 | * Manual intervention is currently required to export volumes, as cinder does 31 | not understand chiscsi target implementation. 32 | 33 | Use Cases 34 | ========= 35 | 36 | Proposed change 37 | =============== 38 | 39 | Add one more iscsi_helper option to cover chiscsi, the driver for this will 40 | interact with the chiscsi target implementation to provide same functionality 41 | as iet. 42 | 43 | Use of offloading is dependent on required hardware being present but is 44 | completely optional. No intervention is required to enable offload and 45 | offloading will happen in a manner completely transparent to initiator side. 46 | 47 | No initiator side changes are required to make use of chiscsi, with or without 48 | offload support. No extra configuration options are required. 49 | 50 | Alternatives 51 | ------------ 52 | 53 | None 54 | 55 | Data model impact 56 | ----------------- 57 | 58 | None 59 | 60 | REST API impact 61 | --------------- 62 | 63 | None 64 | 65 | Security impact 66 | --------------- 67 | 68 | None 69 | 70 | Notifications impact 71 | -------------------- 72 | 73 | None 74 | 75 | Other end user impact 76 | --------------------- 77 | 78 | None 79 | 80 | Performance Impact 81 | ------------------ 82 | 83 | If iSCSI offload is available, there is a significant performance boost to be 84 | gained. If offloading is not used, performance and resource usage would be 85 | roughly on par with IET or better. 86 | 87 | Other deployer impact 88 | --------------------- 89 | 90 | * No new config options are required besides an extra allowed value for 91 | 'iscsi_helper' that would need to be explicitly set to 'chiscsi'. 92 | * chiscsi target needs to be installed before it can be used. 93 | 94 | Developer impact 95 | ---------------- 96 | 97 | None 98 | 99 | Implementation 100 | ============== 101 | 102 | Assignee(s) 103 | ----------- 104 | 105 | Primary assignee: 106 | anish7 107 | 108 | Other contributors: 109 | kxie 110 | 111 | Work Items 112 | ---------- 113 | 114 | Using iet helper as base, create iscsi_helper for chiscsi, with equivalent 115 | commands for all required apis 116 | 117 | 118 | Dependencies 119 | ============ 120 | 121 | * Ability to use chiscsi target obviously depends on target driver being 122 | installed, and command utility available on path. No other dependencies 123 | 124 | Testing 125 | ======= 126 | 127 | Current test for IET target should work just fine for chiscsi 128 | 129 | Documentation Impact 130 | ==================== 131 | 132 | None except listing chiscsi as an available iscsi_helper 133 | 134 | References 135 | ========== 136 | 137 | * http://www.chelsio.com/iscsi-target-software/ 138 | -------------------------------------------------------------------------------- /specs/kilo/database-purge.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | ========================================== 8 | cinder db purge utility 9 | ========================================== 10 | 11 | https://blueprints.launchpad.net/cinder/+spec/database-purge 12 | 13 | This spec adds the ability to sanely and safely purge deleted rows from 14 | the cinder database for all relavent tables. Presently, we keep all deleted 15 | rows, or archive them to a 'shadow' table. I believe this is unmaintainable 16 | as we move towards more upgradable releases. Today, most users depend on 17 | manual DB queries to delete this data, but this opens up to human errors. 18 | 19 | The goal is to have this be an extension to the `cinder-manage db` command. 20 | Similar specs are being submitted to all the various projects that touch 21 | a database. 22 | 23 | Problem description 24 | =================== 25 | 26 | Very long lived Openstack installations will carry around database rows 27 | for years and years. To date, there is no "mechanism" to programmatically 28 | purge the deleted data. The archive rows feature doesn't solve this. 29 | 30 | Use Cases 31 | ========= 32 | 33 | Operators should have the ability to purge deleted rows, possibily on a 34 | schedule (cronjob) or as needed (Before an upgrade, prior to maintenance) 35 | The intended use would be to specify a number of days prior to today for 36 | deletion, e.g. "cinder-manage db purge 60" would purge deleted rows that 37 | have the "deleted_at" column greater than 60 days ago 38 | 39 | Project Priority 40 | ----------------- 41 | 42 | Low 43 | 44 | Proposed change 45 | =============== 46 | 47 | The proposal is to add a "purge" method to DbCommands in 48 | cinder/cinder/cmd/manage.py 49 | This will take a number of days argument, and use that for a data_sub match 50 | Like: 51 | delete from instances where deleted != 0 and deleted_at > data_sub(NOW()...) 52 | 53 | Alternatives 54 | ------------ 55 | 56 | Today, this can be accomplished manually with SQL commands, or via script. 57 | There is also the archive_deleted_rows method. However, this won't satisfy 58 | certain data destruction policies that may exist at some companies. 59 | 60 | Data model impact 61 | ----------------- 62 | 63 | None, all tables presently include a "deleted_at" column. 64 | 65 | REST API impact 66 | --------------- 67 | 68 | None, this would be run from cinder-manage 69 | 70 | Security impact 71 | --------------- 72 | 73 | Low, This only touches already deleted rows. 74 | 75 | Notifications impact 76 | -------------------- 77 | 78 | None 79 | 80 | Other end user impact 81 | --------------------- 82 | 83 | None 84 | 85 | Performance Impact 86 | ------------------ 87 | 88 | This has the potential to improve performance for very large databases. 89 | Very long-lived installations can suffer from inefficient operations on 90 | large tables. 91 | 92 | Other deployer impact 93 | --------------------- 94 | 95 | None 96 | 97 | Developer impact 98 | ---------------- 99 | 100 | None 101 | 102 | Implementation 103 | ============== 104 | 105 | Assignee(s) 106 | ----------- 107 | 108 | primary author and contact. 109 | Abel Lopez 110 | 111 | Primary assignee: 112 | 113 | 114 | Other contributors: 115 | 116 | 117 | Work Items 118 | ---------- 119 | 120 | Add purge functionality to manage.py db/api.py db/sqlalchemy/api.py 121 | Add tests to confirm functionality 122 | Add documentation of feature 123 | 124 | Dependencies 125 | ============ 126 | 127 | None 128 | 129 | Testing 130 | ======= 131 | 132 | The test will be written as such. Three rows will be inserted into a test db. 133 | Two will be "deleted=1", one will be "deleted=0" 134 | One of the deleted rows will have "deleted_at" be NOW(), the other will be 135 | "deleted_at" a few days ago, lets say 10. The test will call the new 136 | function with the argument of "7", to verify that only the row that was 137 | deleted at 10 days ago will be purged. The two other rows should remain. 138 | 139 | Documentation Impact 140 | ==================== 141 | 142 | will need to add documentation of this feature 143 | 144 | References 145 | ========== 146 | 147 | This was discussed on both the openstack-operators mailing list and the 148 | openstack-developers mailing lists with positive feedback from the group. 149 | 150 | http://lists.openstack.org/pipermail/openstack-dev/2014-October/049616.html 151 | -------------------------------------------------------------------------------- /specs/kilo/db-volume-filtering.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | ========================================== 8 | Generic filter support for volume queries 9 | ========================================== 10 | 11 | https://blueprints.launchpad.net/cinder/+spec/db-volume-filtering 12 | 13 | The filtering support of the volume DB APIs is inconsistent. For example, 14 | filtering is supported when querying all volumes and when querying volumes 15 | by project. However, filtering is not supported when querying volumes by host 16 | or by group. 17 | 18 | 19 | Problem description 20 | =================== 21 | 22 | DB functions exist to get all volumes, to get all volumes in a particular 23 | project, to get all volumes in a particular group, and to get all volumes 24 | hosted on a particular host. See the following functions in the DB API: 25 | 26 | * volume_get_all 27 | * volume_get_all_by_project 28 | * volume_get_all_by_group 29 | * volume_get_all_by_host 30 | 31 | Only the queries that get all volumes and that get all volumes by project 32 | support additional filtering. 33 | 34 | The purpose of this blueprint is to make the filtering support consistent 35 | across these APIs. 36 | 37 | Use Cases 38 | ========= 39 | 40 | Proposed change 41 | =============== 42 | 43 | The current Volume filtering logic is already encapsulated in the common 44 | _generate_paginate_query function. The filtering needs to be moved into 45 | a common function (something like _process_volume_filters) that would 46 | update a model query object with the filter information. 47 | 48 | Then, for example, the volume_get_all_by_host could utilize it as follows:: 49 | 50 | def volume_get_all_by_host(context, host, filters=None): 51 | """Retrieves all volumes hosted on a host.""" 52 | if host and isinstance(host, basestring): 53 | session = get_session() 54 | with session.begin(): 55 | host_attr = getattr(models.Volume, 'host') 56 | conditions = [host_attr == host, 57 | host_attr.op('LIKE')(host + '#%')] 58 | query = _volume_get_query(context).filter(or_(*conditions)) 59 | if filters: 60 | query = _process_volume_filters(query, filters) 61 | if not query: 62 | return None 63 | return query.all() 64 | elif not host: 65 | return [] 66 | 67 | Alternatives 68 | ------------ 69 | 70 | Instead of adding filter support to the other volume APIs, a caller could 71 | simply invoke the volume_get_all API with a filter that defines the host or 72 | the group information. The downside of this approach is that is requires the 73 | caller to understand how to form that query; this is especially problematic 74 | for the host API since the filter is actually an OR of: 75 | 76 | * A exact string match for the given host 77 | * A REGEX match where the host matches a value in the form of "#" 78 | 79 | Data model impact 80 | ----------------- 81 | 82 | None 83 | 84 | REST API impact 85 | --------------- 86 | 87 | None 88 | 89 | Security impact 90 | --------------- 91 | 92 | None 93 | 94 | Notifications impact 95 | -------------------- 96 | 97 | None 98 | 99 | Other end user impact 100 | --------------------- 101 | 102 | None 103 | 104 | Performance Impact 105 | ------------------ 106 | 107 | Complicated DB filters could affect query performance; however, this already 108 | exists for the volume_get_all and the volume_get_all_by_project APIs. 109 | 110 | Other deployer impact 111 | --------------------- 112 | 113 | None 114 | 115 | Developer impact 116 | ---------------- 117 | 118 | None 119 | 120 | 121 | Implementation 122 | ============== 123 | 124 | Assignee(s) 125 | ----------- 126 | 127 | Primary assignee: 128 | Steven Kaufer (kaufer) 129 | 130 | Other contributors: 131 | None 132 | 133 | Work Items 134 | ---------- 135 | 136 | * Refactor Volume filter logic in the sqlalchemy DB API into a common function 137 | and invoke it from the existing _generate_paginate_query function 138 | * Update the functions to get volumes by host and by group to use the common 139 | filtering function 140 | 141 | 142 | Dependencies 143 | ============ 144 | 145 | None 146 | 147 | 148 | Testing 149 | ======= 150 | 151 | Since common filter processing will be used for all volume DB queries, the 152 | existing test coverage is sufficient. 153 | 154 | 155 | Documentation Impact 156 | ==================== 157 | 158 | None 159 | 160 | 161 | References 162 | ========== 163 | 164 | None 165 | -------------------------------------------------------------------------------- /specs/kilo/remotefs-cfg-improvements.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | ========================================== 8 | RemoteFS Config Improvements 9 | ========================================== 10 | 11 | https://blueprints.launchpad.net/cinder/+spec/remotefs-share-cfg-improvements 12 | 13 | RemoteFS drivers (NFS, GlusterFS, etc.) are currently configured by adding 14 | a list of shares to a text config file which is referenced by cinder.conf. 15 | This means that one driver instance manages a handful of storage locations 16 | for the driver. This work will a) have these drivers configured like most 17 | other Cinder drivers are, and b) leverage the Cinder scheduler for selection 18 | between different storage backends rather than having the driver act as a 19 | pseudo-scheduler. 20 | 21 | 22 | Problem description 23 | =================== 24 | 25 | The configuration system for NFS/GlusterFS/etc drivers: 26 | * is different from other drivers 27 | * is more complex than necessary 28 | * limits functionality such as migration 29 | 30 | Use Cases 31 | ========= 32 | 33 | Proposed change 34 | =============== 35 | 36 | Replace the _shares_config setting with settings that can be used to login 37 | to the storage platforms. This means that an nfs_shares_config file such as:: 38 | 39 | 192.168.1.10:/export1 -o sync 40 | 192.168.1.11:/export2 -o vers=nfs4 41 | 42 | would become, in cinder.conf:: 43 | 44 | [nfs1] 45 | address = 192.168.1.10 46 | export_path = /export1 47 | options = -o sync 48 | 49 | [nfs2] 50 | address = 192.168.1.11 51 | export_path = /export2 52 | options = -o vers=nfs4 53 | 54 | Each Cinder backend will then only manage one export rather than a handful of 55 | exports. This brings the RemoteFS drivers closer to how other Cinder 56 | drivers operate. 57 | 58 | Alternatives 59 | ------------ 60 | 61 | Leave things as they are today. (Not desirable.) 62 | 63 | Data model impact 64 | ----------------- 65 | 66 | None 67 | 68 | REST API impact 69 | --------------- 70 | 71 | None 72 | 73 | Security impact 74 | --------------- 75 | 76 | None 77 | 78 | Notifications impact 79 | -------------------- 80 | 81 | None 82 | 83 | Other end user impact 84 | --------------------- 85 | 86 | It will be possible to use Cinder volume migration to move volumes between 87 | all NFS exports when previously this was not always possible (since the 88 | different exports were managed by the same driver instance). 89 | 90 | Performance Impact 91 | ------------------ 92 | 93 | None 94 | 95 | Other deployer impact 96 | --------------------- 97 | 98 | nfs_shares_config, glusterfs_shares_config, etc., will be deprecated 99 | (but still functional for Kilo). Setting the new options will cause 100 | these settings to be ignored. 101 | 102 | 103 | Developer impact 104 | ---------------- 105 | 106 | None 107 | 108 | Implementation 109 | ============== 110 | 111 | Assignee(s) 112 | ----------- 113 | 114 | Primary assignee: 115 | eharney 116 | 117 | Other contributors: 118 | Other interested parties? 119 | 120 | Work Items 121 | ---------- 122 | 123 | * Create new options for address, export, mount options 124 | * Mark options and code to be removed in L as deprecated in Kilo 125 | 126 | Dependencies 127 | ============ 128 | 129 | None 130 | 131 | 132 | Testing 133 | ======= 134 | 135 | The NFS driver and GlusterFS drivers will be gaining CI during the Kilo 136 | cycle which will cover this. 137 | 138 | Manual testing should cover both the current and new configuration paths. 139 | 140 | Documentation Impact 141 | ==================== 142 | 143 | New configuration options and possibly guide changes for configuring the NFS 144 | driver. 145 | 146 | References 147 | ========== 148 | 149 | None 150 | -------------------------------------------------------------------------------- /specs/kilo/support-iscsi-driver.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | =============================================== 8 | Support iSER driver within the ISCSIDriver flow 9 | =============================================== 10 | 11 | https://blueprints.launchpad.net/cinder/+spec/support-iscsi-driver 12 | 13 | This purpose of this BP is to avoid code duplications of classes and 14 | parameters, and to prevent instability in the iSER driver flow, 15 | for the TGT and LIO cases. 16 | 17 | Problem description 18 | =================== 19 | 20 | #. Currently the iSER driver is supported over TGT only, without LIO support. 21 | #. There are a couple of iSER classes that inherit from iSCSI driver/target 22 | classes, but most of their functionality is the same as the iSCSI classes. 23 | This code duplication causes instability in the iSER driver code, when new 24 | features or changes are added to the iSCSI driver flow. 25 | 26 | Use Cases 27 | ========= 28 | 29 | Proposed change 30 | =============== 31 | 32 | These two problems can be solved by adding a small fix, which includes a new 33 | enable_iser parameter within iSCSI Tgt/LIO classes. 34 | 35 | All that is needed for RDMA support over iSER, in the Tgt and LIO cases, is 36 | to set just one extra parameter in the volume creation stage. 37 | 38 | A deprecation alert will be added to ISERTgtAdm, since This change will act as 39 | a replacement to the current iSER Tgt code. 40 | 41 | The Nova part of this spec is specified at: 42 | https://review.openstack.org/#/c/130721/ 43 | 44 | Alternatives 45 | ------------ 46 | 47 | Leaving ISERTgtAdm, LVMISERDriver, ISERDriver and iser_opts the way they are, 48 | or just deprecating a part of them (but it will miss the purpose of this code 49 | refactoring). 50 | 51 | Data model impact 52 | ----------------- 53 | 54 | None 55 | 56 | REST API impact 57 | --------------- 58 | 59 | None 60 | 61 | Security impact 62 | --------------- 63 | 64 | None 65 | 66 | Notifications impact 67 | -------------------- 68 | 69 | None 70 | 71 | Other end user impact 72 | --------------------- 73 | 74 | None 75 | 76 | Performance Impact 77 | ------------------ 78 | 79 | None 80 | 81 | Other deployer impact 82 | --------------------- 83 | 84 | Adding a new "enable_iser" parameter, set to "False" by default. 85 | This parameter will be used in TGT or LIO volume creation for setting 86 | RDMA based portals. 87 | 88 | This single parameter will deprecate all iser_opts parameters, that are 89 | a duplication from iscsi parameters. 90 | 91 | Developer impact 92 | ---------------- 93 | 94 | This change will simplify the maintanance of the iSER driver flow, since no 95 | extra classes or duplicated parameters will be used. 96 | 97 | Implementation 98 | ============== 99 | 100 | Assignee(s) 101 | ----------- 102 | 103 | Aviram Bar-Haim 104 | 105 | Work Items 106 | ---------- 107 | 108 | #. Fix bug https://bugs.launchpad.net/cinder/+bug/1396265 and add the correct 109 | driver parameter, with a configurable value to VOLUME_CONF and 110 | VOLUME_CONF_WITH_CHAP_AUTH. 111 | #. Add a new "enable_iser" parameter that is set to false by default. 112 | #. Set the driver parameter at the VOLUME_CONFs template in TgtAdm for the 113 | TGT case. 114 | #. Add _set_iser(1) on the network portal object in rtslib for the LIO case, 115 | according to "enable_iser" value. 116 | #. Set ISCSIDriver's "driver_volume_type" to "iscsi" or "iser" value, according 117 | to the "enable_iser" value. 118 | 119 | Dependencies 120 | ============ 121 | 122 | None 123 | 124 | Testing 125 | ======= 126 | 127 | HW that supports RDMA is required in order to test volume attachment over 128 | iSER. 129 | 130 | A new unit test will be added with the new enable_iser parameter over 131 | iSCSI volume driver. 132 | 133 | Documentation Impact 134 | ==================== 135 | 136 | After adding the new enable_iser parameter, An updated iSER configuration 137 | guidelines will be added to: 138 | 139 | * https://wiki.openstack.org/wiki/Mellanox-Cinder 140 | * http://docs.openstack.org/juno/config-reference/content/lvm-volume-driver.html 141 | * http://community.mellanox.com/docs/DOC-1462 142 | 143 | References 144 | ========== 145 | 146 | None 147 | -------------------------------------------------------------------------------- /specs/kilo/unit-test-cases-for-cinder-scripts.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | ========================================== 8 | Unit test cases for cinder scripts 9 | ========================================== 10 | 11 | https://blueprints.launchpad.net/cinder/+spec/unit-test-cases-for-cinder-scripts 12 | 13 | Currently, there are no unit tests to test 14 | bin/cinder-{all, api, backup, manage, rtstool, scheduler, volume}. 15 | Adding unit tests for these scripts can help prevent issues similar to 16 | https://review.openstack.org/#/c/79791/, as well as increase test coverage. 17 | 18 | Problem description 19 | =================== 20 | 21 | There are no unit tests to test 22 | bin/cinder-{all, api, backup, manage, rtstool, scheduler, volume}. Adding unit 23 | tests for these scripts can help prevent issues similar to 24 | https://review.openstack.org/#/c/79791/, where a non-existent module was 25 | imported. Furthermore, it increases the test coverage for each cinder script. 26 | 27 | Use Cases 28 | ========= 29 | 30 | Proposed change 31 | =============== 32 | 33 | In order to create unit tests for 34 | bin/cinder-{all, api, backup, manage, rtstool, scheduler, volume}, we have to 35 | move them into cinder/cmd, and use pbr to setup the correct console scripts, 36 | which will call the respective main function of each script under cinder/cmd. 37 | It will allow us to import from cinder.cmd and individually test each command. 38 | 39 | nova already have their scripts under nova/cmd and uses pbr to setup the 40 | correct console scripts. It is also the same with glance, where it has 41 | unit tests similar to the one proposed, i.e. 42 | glance/tests/unit/api/test_cmd.py. 43 | 44 | Alternatives 45 | ------------ 46 | 47 | The existing setup can be left as-is and no modifications made. However, this 48 | alternative opens up the possibility of more issues similar to 49 | https://review.openstack.org/#/c/79791/ being introduced into the cinder code. 50 | 51 | Data model impact 52 | ----------------- 53 | 54 | None 55 | 56 | REST API impact 57 | --------------- 58 | 59 | None 60 | 61 | Security impact 62 | --------------- 63 | 64 | None 65 | 66 | Notifications impact 67 | -------------------- 68 | 69 | None 70 | 71 | Other end user impact 72 | --------------------- 73 | 74 | None 75 | 76 | Performance Impact 77 | ------------------ 78 | 79 | None 80 | 81 | Other deployer impact 82 | --------------------- 83 | 84 | None 85 | 86 | Developer impact 87 | ---------------- 88 | 89 | None 90 | 91 | 92 | Implementation 93 | ============== 94 | 95 | Assignee(s) 96 | ----------- 97 | 98 | Primary assignee: 99 | thangp 100 | 101 | Other contributors: 102 | eharney 103 | 104 | Work Items 105 | ---------- 106 | 107 | * Move bin/cinder-{all, api, backup, manage, rtstool, scheduler, volume} into 108 | cinder/cmd/cinder_{all, api, backup, manage, rtstool, scheduler, volume}.py. 109 | * Use pbr entry_points to manage the cinder scripts. 110 | * Create positive and negative unit test cases for each cinder command under 111 | cinder/cmd, i.e. 112 | cinder_{all, api, backup, manage, rtstool, scheduler, volume}. 113 | 114 | 115 | Dependencies 116 | ============ 117 | 118 | None 119 | 120 | 121 | Testing 122 | ======= 123 | 124 | The goal is to create positive and negative unit tests cases for each cinder 125 | script that is currently under bin/. 126 | 127 | 128 | Documentation Impact 129 | ==================== 130 | 131 | Packagers should be aware of the following changes to setup.cfg. 132 | 133 | cinder uses pbr to handle packaging. The cinder scripts that is under the 134 | [files] section will be moved to the [entry_points] section of setup.cfg. 135 | More specifically, this proposal adds console_scripts to the [entry_points] 136 | section of setup.cfg as follows: 137 | 138 | .. code-block:: ini 139 | 140 | [entry_points] 141 | console_scripts = 142 | cinder-all = cinder.cmd.cinder_all:main 143 | cinder-api = cinder.cmd.api:main 144 | cinder-backup = cinder.cmd.backup:main 145 | cinder-manage = cinder.cmd.manage:main 146 | cinder-rtstool = cinder.cmd.rtstool:main 147 | cinder-scheduler = cinder.cmd.scheduler:main 148 | cinder-volume = cinder.cmd.volume:main 149 | 150 | This will cause each console script to be installed that executes the main 151 | functions found in cinder.cmd. 152 | 153 | References 154 | ========== 155 | 156 | * Original code proposed by eharney: https://review.openstack.org/#/c/52229/ 157 | * Original issue: https://review.openstack.org/#/c/79791/ 158 | -------------------------------------------------------------------------------- /specs/kilo/vmdk-oslo.vmware.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | ============================================================ 8 | Integrate VMDK driver with oslo.vmware library 9 | ============================================================ 10 | 11 | https://blueprints.launchpad.net/cinder/+spec/vmdk-oslo.vmware 12 | 13 | The common code between various VMware drivers was moved to the oslo.vmware 14 | library during Icehouse release. The VMDK driver should be updated to use 15 | this library. 16 | 17 | Problem description 18 | =================== 19 | 20 | The oslo.vmware library (https://github.com/openstack/oslo.vmware) contains 21 | code for invoking VIM/SPBM APIs, session management, API retry and 22 | upload/download of virtual disks. The VMware drivers for nova, glance and 23 | ceilometer have already integrated with oslo.vmware. This spec proposes 24 | the integration of VMDK driver with oslo.vmware. 25 | 26 | Use Cases 27 | ========= 28 | 29 | Proposed change 30 | =============== 31 | 32 | * Changes are mostly replacing import statements for the following modules: 33 | 34 | * Replace api with oslo.vmware.api 35 | * Replace vim with oslo.vmware.vim 36 | * Replace pbm with oslo.vmware.pbm 37 | * Replace io_util with oslo.vmware.image_transfer 38 | * Replace vmware_images with oslo.vmware.image_transfer 39 | * Replace read_write_util with oslo.vmware.rw_handles 40 | 41 | * Remove duplicate exceptions in error_util and use oslo.vmware.exceptions 42 | 43 | Alternatives 44 | ------------ 45 | 46 | None 47 | 48 | Data model impact 49 | ----------------- 50 | 51 | None 52 | 53 | REST API impact 54 | --------------- 55 | 56 | None 57 | 58 | Security impact 59 | --------------- 60 | 61 | None 62 | 63 | Notifications impact 64 | -------------------- 65 | 66 | None 67 | 68 | Other end user impact 69 | --------------------- 70 | 71 | None 72 | 73 | Performance Impact 74 | ------------------ 75 | 76 | None 77 | 78 | Other deployer impact 79 | --------------------- 80 | 81 | The oslo.vmware version mentioned in the requirements file needs to be 82 | installed. 83 | 84 | Developer impact 85 | ---------------- 86 | 87 | None 88 | 89 | 90 | Implementation 91 | ============== 92 | 93 | Assignee(s) 94 | ----------- 95 | 96 | Primary assignee: 97 | vbala 98 | 99 | Other contributors: 100 | None 101 | 102 | Work Items 103 | ---------- 104 | 105 | * Add dependency on oslo.vmware and replace import statements 106 | * Remove duplicate exceptions and use the ones defined in oslo.vmware 107 | * Delete unused modules including their unit tests 108 | 109 | Dependencies 110 | ============ 111 | 112 | None 113 | 114 | 115 | Testing 116 | ======= 117 | 118 | Unit tests for the duplicate modules will be removed. There won't be any new 119 | tests as the changes are purely code reorganization. 120 | 121 | Documentation Impact 122 | ==================== 123 | 124 | None 125 | 126 | References 127 | ========== 128 | 129 | None 130 | -------------------------------------------------------------------------------- /specs/liberty/abc-driver-update.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | ========================================== 8 | Update drivers to new base class structure 9 | ========================================== 10 | 11 | https://blueprints.launchpad.net/cinder/+spec/abc-driver-update 12 | 13 | The new abc structure was introduced by ``bp/abc-volume-drivers`` [1]. All 14 | drivers needs to get updated in order to benefit from the new structure. 15 | 16 | 17 | Problem description 18 | =================== 19 | 20 | Instead of raising NotImplementedErrors during runtime this features 21 | allows to discovers the drivers feature set during startup and makes 22 | it discoverable for CI/code check systems. 23 | 24 | Use Cases 25 | ========= 26 | 27 | The support matrix (see [2] for a draft implementation) can be extracted 28 | to see the graduation process of new functionality moving to a common 29 | function implemented by all drivers. 30 | 31 | Proposed change 32 | =============== 33 | 34 | All cinder volume drivers needs to get updated with the following approach:: 35 | 36 | class FooDriver(driver.RetypeVD, driver.TransferVD, driver.ExtendVD, 37 | driver.CloneableVD, driver.CloneableImageVD, 38 | driver.SnapshotVD, driver.BaseVD) 39 | 40 | A drivers must inherit from BaseVD and implement the basic functions. In order 41 | to mark that a driver does implement further feature sets it must inherit from 42 | the corresponding class. 43 | 44 | If all drivers implement a certain feature set the functions will be moved to 45 | BasicVD at the end. 46 | 47 | 48 | Alternatives 49 | ------------ 50 | 51 | No porting at all, which would make the [1] pointless. 52 | 53 | Data model impact 54 | ----------------- 55 | 56 | None. 57 | 58 | REST API impact 59 | --------------- 60 | 61 | None. 62 | 63 | Security impact 64 | --------------- 65 | 66 | None. 67 | 68 | Notifications impact 69 | -------------------- 70 | 71 | None. 72 | 73 | Other end user impact 74 | --------------------- 75 | 76 | None. 77 | 78 | Performance Impact 79 | ------------------ 80 | 81 | See [1] 82 | 83 | Other deployer impact 84 | --------------------- 85 | 86 | None. 87 | 88 | Developer impact 89 | ---------------- 90 | 91 | This change will change all implemented drivers slightly. The functionality 92 | itself shouldn't be changed at all but all driver need to be adopted to the 93 | new class model. 94 | 95 | 96 | Implementation 97 | ============== 98 | 99 | Assignee(s) 100 | ----------- 101 | 102 | Primary assignee: 103 | Marc Koderer (m-koderer) 104 | 105 | Other contributors: 106 | All driver maintainers 107 | 108 | Work Items 109 | ---------- 110 | 111 | Etherpad if necessary. 112 | 113 | Dependencies 114 | ============ 115 | 116 | None. 117 | 118 | Testing 119 | ======= 120 | 121 | Individual driver unit tests needs to get adapted. 122 | 123 | 124 | Documentation Impact 125 | ==================== 126 | 127 | None. 128 | 129 | 130 | References 131 | ========== 132 | 133 | [1]: https://review.openstack.org/#/c/114168/ 134 | [2]: https://review.openstack.org/#/c/160346/ 135 | -------------------------------------------------------------------------------- /specs/liberty/db-archiving.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | =================== 8 | Cinder DB Archiving 9 | =================== 10 | 11 | https://blueprints.launchpad.net/cinder/+spec/db-archiving 12 | 13 | We actually don't delete rows from db, we mark it only as deleted 14 | (we have special column). So the DB is growing and growing, 15 | so this causes problem with performance. 16 | 17 | 18 | Problem description 19 | =================== 20 | 21 | A lot of unused data in the DB causes different kind of problems with 22 | performance and maintaining: 23 | 24 | * Need to filter 'marked as deleted' rows in every query 25 | 26 | * DB contains a lot of unused data in each table and operators 27 | need to filter it if they use database directly in an emergency case 28 | 29 | * Storage usage utilization (low priority) 30 | 31 | Use Cases 32 | ========= 33 | 34 | Proposed change 35 | =============== 36 | 37 | Create shadow table for each table and copy "deleted" rows from main to shadow 38 | table. 39 | 40 | We need to provide several method for archiving data: 41 | 42 | * archive not more than N "deleted" rows in each table 43 | 44 | * archive "deleted" rows older than specified data 45 | 46 | * archive all "deleted" data 47 | 48 | Shadowing could be started as a periodic task or admin management util. 49 | 50 | Alternatives 51 | ------------ 52 | 53 | Create event-based solution to make opportunity operators to subscribe on 54 | "deleting" events and store deleted data somewhere. 55 | 56 | Data model impact 57 | ----------------- 58 | 59 | * Create shadow tables 60 | 61 | * Implement migrations to store current "deleted" rows in shadow table 62 | 63 | * Shadow tables could have blob field to store some "deleted" data and to not 64 | impose restrictions on database schema changes. 65 | 66 | REST API impact 67 | --------------- 68 | 69 | None 70 | 71 | Security impact 72 | --------------- 73 | 74 | None 75 | 76 | Notifications impact 77 | -------------------- 78 | 79 | None 80 | 81 | Other end user impact 82 | --------------------- 83 | 84 | None 85 | 86 | Performance Impact 87 | ------------------ 88 | 89 | No performance hit from filtering out a potentially massive number of deleted 90 | records on every query. 91 | 92 | 93 | Other deployer impact 94 | --------------------- 95 | 96 | * Operator or deployer could use periodic task or Cinder management tool to 97 | archive "deleted" data. 98 | 99 | 100 | Developer impact 101 | ---------------- 102 | 103 | Developers should care about migrations for shadow tables as well, as for 104 | original tables: 105 | 106 | * table creation or deletion requires creating or deleting corresponding 107 | shadow tables 108 | 109 | * when a table is modified, the shadow tables have to get modified 110 | 111 | * when one or more columns are moved to a new table, columns from shadow table 112 | should also moved to a new shadow table with data migration 113 | 114 | * downgrades should be implemented for shadow tables too: new tables 115 | have to get removed and the migrated columns will have to get reverted 116 | 117 | 118 | Implementation 119 | ============== 120 | 121 | Assignee(s) 122 | ----------- 123 | 124 | Primary assignee: 125 | Ivan Kolodyazhny (e0ne) 126 | 127 | Other contributors: 128 | Boris Pavlovich (boris-42) 129 | 130 | Work Items 131 | ---------- 132 | 133 | None 134 | 135 | 136 | Dependencies 137 | ============ 138 | 139 | None 140 | 141 | 142 | Testing 143 | ======= 144 | 145 | Unit tests for both API and Tempest will be implemented, 146 | 147 | 148 | Documentation Impact 149 | ==================== 150 | 151 | Cloud Administration Guide will be updated to introduce new cinder-manage 152 | command: 153 | 154 | * http://docs.openstack.org/admin-guide/blockstorage-manage-volumes.html 155 | 156 | 157 | References 158 | ========== 159 | 160 | * Nova's spec for db archiving: https://review.openstack.org/#/c/18493/ 161 | 162 | * Discussion in openstack-dev mailing list: 163 | 164 | http://lists.openstack.org/pipermail/openstack-dev/2014-March/029952.html 165 | 166 | -------------------------------------------------------------------------------- /specs/liberty/incremental-backup-improvements-for-l.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | ===================================== 8 | Incremental backup improvements for L 9 | ===================================== 10 | 11 | https://blueprints.launchpad.net/cinder/+spec/cinder-incremental-backup-improvements-for-l 12 | 13 | This specification proposes to improve the current incremental backup by adding 14 | is_incremental and has_dependent_backups flags to indicate the type of backup 15 | and enriching the notification system via adding parent_id to report. 16 | 17 | 18 | Problem description 19 | =================== 20 | 21 | In Kilo release we supported incremental backup, but there are still some 22 | points that need to be improved. 23 | 1. From the API perspective, there is no place to show the backup is 24 | incremental or not. 25 | 2. User also doesn't know if the incremental backup can be deleted or not, 26 | It's important that Cinder doesn't allow this backup to be deleted since 27 | 'Incremental backups exist for this backup'. Currently, they must have a try to 28 | know it. So if there is a flag to indicate the backup can't be deleted or not, 29 | it will bring more convenience to user and reduce API call. 30 | 3. Enriching the notification system via reporting to Ceilometer, 31 | add parent_id to report 32 | 33 | Use Cases 34 | ========= 35 | 36 | It's useful for 3rd party billing system to distinguish the full backup and 37 | incremental backup, as using different size of storage space, they could have 38 | different fee for full and incremental backups. 39 | 40 | Proposed change 41 | =============== 42 | 43 | 1. When show single backup detail, cinder-api needs to judge if this backup is 44 | a full backup or not checking backup['parent_id']. 45 | 2. If it's an incremental backup, judge if this backup has dependent backups 46 | like we do in process of delete backup. 47 | 3. Then add 'is_incremental=True' and 'has_dependent_backups=True/False' to 48 | response body. 49 | 4. Add parent_id to notification system. 50 | 51 | Alternatives 52 | ------------ 53 | None. 54 | 55 | 56 | Data model impact 57 | ----------------- 58 | None. 59 | 60 | 61 | REST API impact 62 | --------------- 63 | The response body of show incremental backup detail is changed like this: 64 | 65 | :: 66 | { 67 | "backup": { 68 | ......, 69 | "is_incremental": True/False, 70 | "has_dependent_backups": True/False 71 | 72 | } 73 | 74 | } 75 | 76 | If there is full backup, the is_incremental flag will be False. 77 | And has_dependent_backups will be True if the full backup has dependent 78 | backups. 79 | 80 | Security impact 81 | --------------- 82 | None 83 | 84 | Notifications impact 85 | -------------------- 86 | Add parent_id to backup notification. 87 | 88 | 89 | Other end user impact 90 | --------------------- 91 | End user can get more info about incremental backup. Enhance user experience. 92 | 93 | 94 | Performance Impact 95 | ------------------ 96 | Because we add an additional judgment for dependent backups. We can eliminate 97 | performance impact by adding index to backup table and counting the number of 98 | dependent backups to make judgment in SQL. 99 | 100 | 101 | IPv6 Impact 102 | ----------- 103 | None. 104 | 105 | 106 | Other deployer impact 107 | --------------------- 108 | None. 109 | 110 | 111 | Developer impact 112 | ---------------- 113 | None. 114 | 115 | 116 | Community Impact 117 | ---------------- 118 | None. 119 | 120 | 121 | Implementation 122 | ============== 123 | 124 | Assignee(s) 125 | ----------- 126 | wanghao 127 | 128 | 129 | Work Items 130 | ---------- 131 | * Add querying and judging code in cinder-api. 132 | * Add parent_id to notification system. 133 | * Add unit tests. 134 | 135 | 136 | Dependencies 137 | ============ 138 | None 139 | 140 | 141 | Testing 142 | ======= 143 | Unit tests are needed to ensure response is working correctly. 144 | 145 | 146 | Documentation Impact 147 | ==================== 148 | 1. Cloud admin documentations will be updated to introduce the changes: 149 | http://docs.openstack.org/admin-guide/blockstorage_volume_backups.html 150 | 151 | 2. API ref will be also updated for backups: 152 | http://developer.openstack.org/api-ref-blockstorage-v2.html 153 | 154 | 155 | References 156 | ========== 157 | None 158 | -------------------------------------------------------------------------------- /specs/liberty/non-eventlet-wsgi-app.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | ============================================== 8 | Cinder API WSGI application under Apache/Nginx 9 | ============================================== 10 | 11 | https://blueprints.launchpad.net/cinder/+spec/non-eventlet-wsgi-app. 12 | 13 | Cinder API uses eventelt as a webserver and WSGI application managing. Eventlet 14 | provides own WSGI application to provide web server functionality. 15 | 16 | 17 | Problem description 18 | =================== 19 | 20 | * Cinder API is deployed in other way as a common web application. Apache/Nginx 21 | is generally used web servers for REST API application. 22 | 23 | * Cinder API is runned as a separate service. It means that cloud operator need 24 | to configure some software to monitor that Cinder API is running. 25 | 26 | * Apache/Nginx works better under the real heavy load then eventlet. 27 | 28 | 29 | 30 | Use Cases 31 | ========= 32 | 33 | * Deploy Cinder API with Apache/Nginx like an any other web applicaction. 34 | 35 | * Deploy Cinder API with Apache/Nginx to have load balancing and web applicaion 36 | monitoring out of the box. E.g. Nginx/uWSGI can restart backend service if 37 | it stopped by any case. 38 | 39 | 40 | Proposed change 41 | =============== 42 | 43 | Provide WSGI application based on used web-framework instead of eventlet. Leave 44 | eventlet-based WSGI application as a default option and make it configurable. 45 | 46 | Alternatives 47 | ------------ 48 | 49 | Leave as is and use eventlet for REST API web serving. Use something like 50 | haproxy for API requests load balancing and some watchdog to restart Cinder API 51 | service after shutdown. 52 | 53 | Data model impact 54 | ----------------- 55 | 56 | None. 57 | 58 | REST API impact 59 | --------------- 60 | 61 | None. 62 | 63 | Security impact 64 | --------------- 65 | 66 | None 67 | 68 | Notifications impact 69 | -------------------- 70 | 71 | None 72 | 73 | Other end user impact 74 | --------------------- 75 | 76 | None 77 | 78 | Performance Impact 79 | ------------------ 80 | 81 | Potential performance impact could be present if we will have a lot of requests 82 | to Cinder API. Performance impact will be tested with Rally. 83 | 84 | Other deployer impact 85 | --------------------- 86 | 87 | Deployers should configure Apache/Nginx/etc and WSGI module to handle requests 88 | to Cinder API. By default, Cinder API will use eventlet and no deployer impact 89 | will be. 90 | 91 | No new configuration options for Cinder will be introduced. 92 | 93 | New deployment mode should be supported by Chef cookbooks and Puppet manifests. 94 | 95 | Developer impact 96 | ---------------- 97 | 98 | None 99 | 100 | 101 | Implementation 102 | ============== 103 | 104 | Assignee(s) 105 | ----------- 106 | 107 | Primary assignee: 108 | Ivan Kolodyazhny 109 | 110 | Other contributors: 111 | Anton Arefiev 112 | 113 | Work Items 114 | ---------- 115 | 116 | * Implement WSGI application based on webob framework. 117 | 118 | * Test performance impact with Rally. 119 | 120 | * Write documentation how to run Cinder API with Apache/Nginx. 121 | 122 | * Implement configuration option in Devstack to support new deployment mode. 123 | 124 | * Make sure usage of eventlet doesn't break WSGI in Nginx/Apache. 125 | 126 | * Start cross-project initiative to implement this in oslo. 127 | 128 | 129 | Dependencies 130 | ============ 131 | 132 | None 133 | 134 | 135 | Testing 136 | ======= 137 | 138 | Functional tests for new deployment mode will be implemented. We need to test 139 | this feature on every commit on infra with CI. 140 | 141 | 142 | Documentation Impact 143 | ==================== 144 | 145 | Administrators Guide will be updated. 146 | 147 | 148 | References 149 | ========== 150 | 151 | 152 | * https://review.openstack.org/#/c/154642/ 153 | 154 | * https://review.openstack.org/#/c/164035/ 155 | 156 | * https://review.openstack.org/#/c/196088/ 157 | 158 | * http://lists.openstack.org/pipermail/openstack-dev/2015-February/057359.html 159 | -------------------------------------------------------------------------------- /specs/liberty/standard-capabilities.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | ===================== 8 | Standard Capabilities 9 | ===================== 10 | 11 | https://blueprints.launchpad.net/cinder/+spec/standard-capabilities 12 | 13 | Problem description 14 | =================== 15 | 16 | For the Liberty release there is a proposal [1] to allow storage backends to 17 | push capabilities of their pools [1]. Eventually we would want some common 18 | capabilities to graduate into becoming a ``well defined`` capability. The 19 | point of this spec is to agree on the initial ``well defined`` capabilities. 20 | 21 | Use Cases 22 | ========= 23 | 24 | Having the ``well defined`` capabilities will allow the deployer to see what 25 | common capabilities are shared beyond their deployed backends in Cinder. 26 | 27 | Proposed change 28 | =============== 29 | 30 | The initial ``well defined`` capabilities are: 31 | 32 | * QOS 33 | * Compression 34 | * Replication 35 | * Thin provisioning 36 | 37 | Keep in mind this is just an agreement that these are common features that 38 | a backend could support. As shown in the proposal of how this will work [1], 39 | backends will still be able to push up the specific keys they look for in 40 | volume types for these capabilities. 41 | 42 | Alternatives 43 | ------------ 44 | 45 | None 46 | 47 | Data model impact 48 | ----------------- 49 | 50 | None. This information comes directly from the volume drivers, reported to the 51 | scheduler, to the Cinder API. 52 | 53 | REST API impact 54 | --------------- 55 | 56 | None 57 | 58 | Security impact 59 | --------------- 60 | 61 | None 62 | 63 | Notifications impact 64 | -------------------- 65 | 66 | None 67 | 68 | Other end user impact 69 | --------------------- 70 | 71 | None 72 | 73 | Performance Impact 74 | ------------------ 75 | 76 | None 77 | 78 | Other deployer impact 79 | --------------------- 80 | 81 | None 82 | 83 | Developer impact 84 | ---------------- 85 | 86 | Volume driver maintainers will need report capabilities from their driver to 87 | the scheduler. They can get this information directly from the backend and pass 88 | it right up to the scheduler if it already follows the format specified 89 | earlier. If not, it's up to the driver to parse the response from the backend 90 | in a format the scheduler will understand. If capabilities are not being 91 | reported, the default **False** on features will be done. 92 | 93 | Implementation 94 | ============== 95 | 96 | Assignee(s) 97 | ----------- 98 | 99 | Primary assignee: 100 | thingee 101 | 102 | Work Items 103 | ---------- 104 | 105 | * Standardize on the Capabilities here. Right here, right now. 106 | 107 | Dependencies 108 | ============ 109 | 110 | None. 111 | 112 | Testing 113 | ======= 114 | 115 | None 116 | 117 | Documentation Impact 118 | ==================== 119 | 120 | The developer docs for driver maintainers will need to be updated to include 121 | the list of common capabilities the maintainer needs to have their driver push 122 | that they support. By default capabilities are marked as False, as in not being 123 | supported by the driver. 124 | 125 | References 126 | ========== 127 | 128 | [1] - https://review.openstack.org/#/c/183947/ 129 | -------------------------------------------------------------------------------- /specs/liberty/support-force-delete-backup.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | ======================= 8 | Backup Force Delete API 9 | ======================= 10 | 11 | https://blueprints.launchpad.net/cinder/+spec/support-force-delete-backup 12 | 13 | Provide an API to force delete a backup being stucked in creating or 14 | restoring, etc.. 15 | 16 | Problem description 17 | =================== 18 | 19 | Currently there are volume force-delete and snapshot force-delete functions, 20 | but there is not a force-delete function for backups. Force-delete for backups 21 | would be beneficial when backup-create fails and the backup status is stuck in 22 | 'creating'. This situation occurs when database just went down after backup 23 | volume and metadata and update the volume's status to 'available', leaving the 24 | backup's status to be 'creating' without having methods to deal with through 25 | API, because backup-delete api could only delete backup item in status of 26 | 'available' and 'error'. 27 | 28 | Use Cases 29 | ========= 30 | 31 | If backup create successfully in object storage, but become stuck in update 32 | backup's status because database just went down. Then use force-delete API, 33 | we could directly delete the backup item(include all the stuff in storage 34 | backend and db entry info) without manually change the backup's status in 35 | db to error or restart cinder-backup and call backup-delete function, 36 | which is very useful for administrators. 37 | 38 | Proposed change 39 | =============== 40 | 41 | A new API function and corresponding cinder command will be added to force 42 | delete backups. 43 | 44 | The proposal is to provide a method for administrator to quickly delete the 45 | backup item that is not in the status of 'available' or 'error'. 46 | 47 | * It's an admin-only operation. 48 | 49 | Alternatives 50 | ------------ 51 | 52 | First, login in the cinder database, use the following update sql to change 53 | the backup item status to 'available' or 'error'. 54 | 55 | update backups set status='available'(or 'error') where id='xxx-xxx-xxx-xxx'; 56 | 57 | Second, call backup delete api to delete the backup item. 58 | 59 | Data model impact 60 | ----------------- 61 | None 62 | 63 | REST API impact 64 | --------------- 65 | 66 | Add a new REST API to delete backup in v2: 67 | 68 | .. code-block:: console 69 | 70 | POST /v2/{tenant_id}/backups/{id}/action 71 | 72 | .. code-block:: python 73 | 74 | { 75 | "os-force_delete": {} 76 | } 77 | 78 | Normal http response code: 79 | 202 80 | 81 | Expected error http response code: 82 | 404 83 | 84 | Security impact 85 | --------------- 86 | None 87 | 88 | Notifications impact 89 | -------------------- 90 | Delete notification should include whether force was used or not 91 | 92 | Other end user impact 93 | --------------------- 94 | 95 | A new command, backup-force-delete, will be added to python-cinderclient. This 96 | command mirrors the underlying API function. 97 | 98 | Force delete a backup item can be performed by: 99 | $ cinder backup-force-delete 100 | 101 | 102 | Performance Impact 103 | ------------------ 104 | None 105 | 106 | Other deployer impact 107 | --------------------- 108 | None 109 | 110 | Developer impact 111 | ---------------- 112 | None 113 | 114 | 115 | Implementation 116 | ============== 117 | 118 | Assignee(s) 119 | ----------- 120 | 121 | Primary assignee: 122 | ling-yun 123 | 124 | Work Items 125 | ---------- 126 | 127 | * Implement REST API 128 | * Implement cinder client functions 129 | * Implement cinder command 130 | 131 | Dependencies 132 | ============ 133 | None 134 | 135 | Testing 136 | ======= 137 | Need to test the force delete with an in-progress backup and ensure that it 138 | deletes successfully and cleans up correctly. 139 | 140 | 141 | Documentation Impact 142 | ==================== 143 | 144 | The cinder client documentation will need to be updated to reflect the new 145 | command. 146 | 147 | http://docs.openstack.org/admin-guide/blockstorage-manage-volumes.html 148 | 149 | The cinder API documentation will need to be updated to reflect the REST API 150 | changes. 151 | 152 | 153 | References 154 | ========== 155 | 156 | None 157 | -------------------------------------------------------------------------------- /specs/liberty/valid-states-api.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | ================ 8 | Valid States API 9 | ================ 10 | 11 | Include the URL of your launchpad blueprint: 12 | 13 | https://blueprints.launchpad.net/cinder/+spec/valid-states-api 14 | 15 | Provide an API to obtain the set of valid states that are permissible to be 16 | used in the function to reset the state of a volume and snapshot. 17 | 18 | Problem description 19 | =================== 20 | 21 | The purpose of this feature is to facilite exposing the reset-state API in 22 | horizon in a meaningful way by restricting the set of permissible states that 23 | the administrator can specify for a volume. There is no API for this, and it 24 | is undesirable to hardcode this information into horizon. 25 | 26 | Use Cases 27 | ========= 28 | 29 | Proposed change 30 | =============== 31 | 32 | A new API function and corresponding cinder command will be added to determine 33 | the set of valid states for volumes or snapshots. 34 | 35 | The initial proposal is to create a single function, get_valid_states, to 36 | obtain the valid states for any type of resource (volume, snapshot). 37 | 38 | Alternatives 39 | ------------ 40 | 41 | For consistency with the rest of cinder, get_valid_states may be renamed and/or 42 | split into multiple functions, one per resource type; this decision will be 43 | left as an implementation detail and will be finalized as part of the normal 44 | code review process. 45 | 46 | Data model impact 47 | ----------------- 48 | None 49 | 50 | REST API impact 51 | --------------- 52 | 53 | Add a new REST API to retrieve valid states: 54 | * GET /v2/{tenant_id}/states 55 | 56 | JSON response schema definition:: 57 | 58 | 'valid_states': { 59 | 'type': 'array', 60 | 'items' : { 61 | 'type': 'string' 62 | } 63 | } 64 | 65 | Security impact 66 | --------------- 67 | None 68 | 69 | Notifications impact 70 | -------------------- 71 | None 72 | 73 | Other end user impact 74 | --------------------- 75 | 76 | A new command, get-valid-states, will be added to python-cinderclient. This 77 | command mirrors the underlying API function. 78 | 79 | Obtaining the list of valid states for a volume or snapshot can be performed 80 | by: 81 | $ cinder get-valid-states 82 | 83 | 84 | Performance Impact 85 | ------------------ 86 | None 87 | 88 | Other deployer impact 89 | --------------------- 90 | None 91 | 92 | Developer impact 93 | ---------------- 94 | None 95 | 96 | 97 | Implementation 98 | ============== 99 | 100 | Assignee(s) 101 | ----------- 102 | 103 | Primary assignee: 104 | thingee 105 | 106 | Work Items 107 | ---------- 108 | 109 | * Implement REST API 110 | * Implement cinder client functions 111 | * Implement cinder command 112 | 113 | Dependencies 114 | ============ 115 | 116 | Horizon blueprints that will depend on this one: 117 | 118 | * https://blueprints.launchpad.net/horizon/+spec/cinder-reset-volume-state 119 | 120 | * https://blueprints.launchpad.net/horizon/+spec/cinder-reset-snapshot-state 121 | 122 | Testing 123 | ======= 124 | None 125 | 126 | 127 | Documentation Impact 128 | ==================== 129 | 130 | The cinder client documentation will need to be updated to reflect the new 131 | command. 132 | 133 | The cinder API documentation will need to be updated to reflect the REST API 134 | changes. 135 | 136 | 137 | References 138 | ========== 139 | 140 | None 141 | -------------------------------------------------------------------------------- /specs/liberty/vhost-support.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | ========================================== 8 | Add vHost Executor 9 | ========================================== 10 | 11 | https://blueprints.launchpad.net/cinder/+spec/vhost-support 12 | 13 | The vHost driver was added in the 3.6 Linux kernel. The Linux-IO Target vHost 14 | fabric module implements I/O processing based on the Linux virtio mechanism. It 15 | provides virtually bare-metal local storage performance for KVM guests. 16 | Currently Linux guest VMs are supported. 17 | 18 | Problem description 19 | =================== 20 | 21 | The vHost driver is not a self-contained virtio device, as it depends on 22 | userspace to handle the control plane while the data plane is done in kernel. 23 | This means the data plane does not go through emulations, which can slow down 24 | I/O performance. Cinder today does not provide an option for taking advantage 25 | of the Linux vHost driver. 26 | 27 | Use Cases 28 | ========= 29 | 30 | Proposed change 31 | =============== 32 | 33 | Add an additional brick executor that knows how to work with vHost. The 34 | executor will have a first pass implementation of creating, deleting, listing 35 | vHost endpoints through LIO. 36 | 37 | Creating the endpoint requires a block device to be available on the machine 38 | that is creating the vHost target. The vHost executor would pass the block 39 | device path to to rtstools, and rtstools will create vHost endpoint with a lun 40 | to the block device. 41 | 42 | Alternatives 43 | ------------ 44 | 45 | n/a 46 | 47 | Data model impact 48 | ----------------- 49 | 50 | n/a 51 | 52 | REST API impact 53 | --------------- 54 | 55 | n/a 56 | 57 | Security impact 58 | --------------- 59 | 60 | n/a 61 | 62 | Notifications impact 63 | -------------------- 64 | 65 | n/a 66 | 67 | Other end user impact 68 | --------------------- 69 | 70 | n/a 71 | 72 | Performance Impact 73 | ------------------ 74 | 75 | Cinder itself being the control plane will not experience any different 76 | performance. The data plane should experience a greater deal of performance 77 | [1]. 78 | 79 | Other deployer impact 80 | --------------------- 81 | 82 | n/a 83 | 84 | Developer impact 85 | ---------------- 86 | 87 | n/a 88 | 89 | 90 | Implementation 91 | ============== 92 | 93 | Assignee(s) 94 | ----------- 95 | 96 | Primary assignee: 97 | thingee 98 | 99 | Work Items 100 | ---------- 101 | 102 | * Add vHost executor to brick 103 | 104 | Dependencies 105 | ============ 106 | 107 | n/a 108 | 109 | Testing 110 | ======= 111 | 112 | There will be appropriate unit tests available making sure target creation, 113 | deletion, listing works. 114 | 115 | Documentation Impact 116 | ==================== 117 | 118 | n/a 119 | 120 | References 121 | ========== 122 | 123 | [1] - http://linux-iscsi.org/wiki/VHost#Linux_performance 124 | -------------------------------------------------------------------------------- /specs/liberty/volume-types-public-update.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | =========================================== 8 | Ability to update volume type public status 9 | =========================================== 10 | 11 | https://blueprints.launchpad.net/cinder/+spec/volume-types-public-update 12 | 13 | This proposal is to add the ability to update volume type is_public status. 14 | 15 | Problem description 16 | =================== 17 | 18 | Currently, the v2 volume type update api doesn't support updating volume type's 19 | public status. All volume types created are public by default if not specified, 20 | It is not possible to update a existing volume_type is_public status. 21 | It is necessary to add updating public status for volume type. If a volume 22 | type updated from public to private, the volumes created with this type will 23 | not be affected, but the user without access will not be able to create volume 24 | with this type anymore. 25 | 26 | Use Cases 27 | ========= 28 | 29 | Suppose the admin created a volume type. And he/she wants to make the volume 30 | type not public and add access to specified projects. 31 | 32 | Proposed change 33 | =============== 34 | 35 | * Modify volume_type update API adding is_public property support. 36 | 37 | Alternatives 38 | ------------ 39 | 40 | 41 | Data model impact 42 | ----------------- 43 | 44 | 45 | REST API impact 46 | --------------- 47 | 48 | Volume type update change 49 | 50 | * Update volume type API 51 | * V2//types/volume_type_id 52 | * Method: PUT 53 | * JSON schema definition for V2:: 54 | 55 | { 56 | "volume_type": 57 | { 58 | "name": "test_type", 59 | "description": "Test volume type", 60 | "is_public": "False" # new 61 | } 62 | } 63 | 64 | * In the existing update volume type API, add a new parameter "is_public" to 65 | allow updating public status for volume type. 66 | 67 | Security impact 68 | --------------- 69 | 70 | 71 | Notifications impact 72 | -------------------- 73 | 74 | 75 | Other end user impact 76 | --------------------- 77 | 78 | python-cinderclient needs to be changed to support the modified API. 79 | 80 | * Update volume type 81 | cinder type-update --name --description 82 | --is-public 83 | 84 | Performance Impact 85 | ------------------ 86 | 87 | 88 | Other deployer impact 89 | --------------------- 90 | 91 | 92 | Developer impact 93 | ---------------- 94 | 95 | 96 | Implementation 97 | ============== 98 | 99 | Assignee(s) 100 | ----------- 101 | 102 | Primary assignee: 103 | liyingjun 104 | 105 | Other contributors: 106 | 107 | Work Items 108 | ---------- 109 | 110 | 1. API change: 111 | * Modify Update Volume Type API. 112 | 113 | Dependencies 114 | ============ 115 | 116 | Testing 117 | ======= 118 | 119 | New unit tests will be added to test the changed code. 120 | 121 | Documentation Impact 122 | ==================== 123 | 124 | Documentation changes are needed. 125 | 126 | References 127 | ========== 128 | -------------------------------------------------------------------------------- /specs/mitaka/add_pagination_to_other_resources.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | ========================================== 8 | Add Pagination To Other Resources 9 | ========================================== 10 | 11 | https://blueprints.launchpad.net/cinder/+spec/add-pagination-to-other-resource 12 | 13 | This spec aims to continue the work that we did in Liberty, adding pagination 14 | support to other cinder resources. 15 | 16 | 17 | Problem description 18 | =================== 19 | 20 | In Liberty release, we have added pagination to backups and snapshots 21 | according bp[1]. There are still some work that hasn't been done yet. 22 | In Mitaka, we intend to add pagination support to CG, Volume Type and 23 | Qos specs. 24 | 25 | Use Cases 26 | ========= 27 | 28 | In large scale cloud systems, end users and manage system that's on top of 29 | Cinder could make quick querying by using pagination, filter and sort 30 | functions to improve performance of querying. 31 | 32 | Proposed change 33 | =============== 34 | 35 | * Consistency Group: Refactor current implementation, using DB pagination 36 | querying, and add support to filter and sort in querying request. 37 | * Volume Type: Add pagination and sort support in querying request. 38 | * Qos Specs: Add pagination, filter and sort in querying request. 39 | * Add sql pagination querying support as we did in backup and snapshot. 40 | 41 | Alternatives 42 | ------------ 43 | 44 | None 45 | 46 | Data model impact 47 | ----------------- 48 | 49 | None 50 | 51 | 52 | REST API impact 53 | --------------- 54 | 55 | According the API-wg guideline about pagination, filter and sort[2]:: 56 | 57 | GET /v2/{project_id}/{resource}?limit=xxx&marker=xxx&sort=xxx&{filter}=xxx 58 | RESP BODY: {"resource_links": [{xxx}], 59 | "resource": [{xxx}, {xxx}, ..., {xxx}] 60 | } 61 | 62 | 63 | Security impact 64 | --------------- 65 | 66 | None 67 | 68 | Notifications impact 69 | -------------------- 70 | 71 | None 72 | 73 | Other end user impact 74 | --------------------- 75 | 76 | None 77 | 78 | Performance Impact 79 | ------------------ 80 | 81 | None 82 | 83 | Other deployer impact 84 | --------------------- 85 | 86 | None 87 | 88 | 89 | Developer impact 90 | ---------------- 91 | 92 | None 93 | 94 | 95 | Implementation 96 | ============== 97 | 98 | Assignee(s) 99 | ----------- 100 | 101 | Primary assignee: 102 | wanghao 103 | 104 | Other contributors: 105 | None 106 | 107 | Work Items 108 | ---------- 109 | 110 | * Add pagination support to three resources. 111 | * Implement code in db pagination query. 112 | * Implement code in list querying api. 113 | * Test code. 114 | * Update cinderclient to support this functionality for those resources. 115 | * Add change to API doc. 116 | 117 | 118 | Dependencies 119 | ============ 120 | 121 | None 122 | 123 | 124 | Testing 125 | ======= 126 | 127 | Both unit and Tempest tests are needed to be created to cover the code change. 128 | 129 | 130 | Documentation Impact 131 | ==================== 132 | 133 | The cinder API documention will need to be updated to reflect the REST 134 | API changes. 135 | 136 | 137 | References 138 | ========== 139 | [1]https://blueprints.launchpad.net/cinder/+spec/extend-limit-implementations 140 | [2]https://github.com/openstack/api-wg/blob/master/guidelines/pagination_filter_sort.rst 141 | -------------------------------------------------------------------------------- /specs/mitaka/support-volume-glance-metadata-query.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | ================================================ 8 | Support query volume detail with glance metadata 9 | ================================================ 10 | 11 | https://blueprints.launchpad.net/cinder/+spec/support-volume-glance-metadata-query 12 | 13 | Provide a function to support query volume detail filter by glance metadata. 14 | 15 | Problem description 16 | =================== 17 | 18 | The purpose of this feature is to make user to query volume detail more 19 | conveniently. User can query a specific bootable volume quickly filtering by 20 | image_name or other glance metadata. 21 | 22 | Use Cases 23 | ========= 24 | 25 | At large scale deployment, there could be many bootable volumes in a tenant. 26 | So if user want query some bootable volume detail filtering by the image name 27 | or other info came from glance metadata, they could use this feature to query 28 | it more conveniently. 29 | No need to list all volumes and find what you want tiring. 30 | 31 | Proposed change 32 | =============== 33 | 34 | * Add DB query filter using volume_glance_metadata in api of sqlaclchemy. 35 | 36 | * User can use glance metadata to filter volume detail in cinder api. 37 | The query url is like this:: 38 | 39 | "volumes/detail?glance_metadata={"image_name":"xxx"}" 40 | 41 | Alternatives 42 | ------------ 43 | 44 | None 45 | 46 | Data model impact 47 | ----------------- 48 | 49 | None 50 | 51 | REST API impact 52 | --------------- 53 | 54 | Add query filter support using glance metadata: 55 | * GET /v2/{project_id}/volumes/detail?glance_metadata={"image_name":"xxx"} 56 | 57 | Security impact 58 | --------------- 59 | 60 | None 61 | 62 | Notifications impact 63 | -------------------- 64 | 65 | None. 66 | 67 | Other end user impact 68 | --------------------- 69 | 70 | None 71 | 72 | Performance Impact 73 | ------------------ 74 | 75 | Search a lot of glance metadata may take longer than other querying. 76 | It may need add new index to column key and value in volume_glance_metadata 77 | table to improve searching performance. 78 | 79 | Other deployer impact 80 | --------------------- 81 | 82 | None 83 | 84 | Developer impact 85 | ---------------- 86 | 87 | None 88 | 89 | 90 | Implementation 91 | ============== 92 | 93 | Assignee(s) 94 | ----------- 95 | 96 | Primary assignee: 97 | wanghao 98 | 99 | 100 | Work Items 101 | ---------- 102 | 103 | * Implement code in db query filter. 104 | * Update cinderclient to support this function. 105 | * Add change API doc. 106 | 107 | 108 | Dependencies 109 | ============ 110 | 111 | None 112 | 113 | 114 | Testing 115 | ======= 116 | 117 | Both unit and Tempest tests need to be created to cover the code change that 118 | mentioned in "Proposed change". 119 | 120 | 121 | Documentation Impact 122 | ==================== 123 | 124 | 1. The cinder API documentation will need to be updated to reflect the REST 125 | API changes. 126 | 127 | References 128 | ========== 129 | 130 | None 131 | -------------------------------------------------------------------------------- /specs/newton/delete-multiple-metadata-keys.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | 8 | ==================================== 9 | Delete multiple volume metadata keys 10 | ==================================== 11 | 12 | https://blueprints.launchpad.net/cinder/+spec/delete-multiple-metadata-keys 13 | 14 | Problem description 15 | ==================== 16 | The current implementation of Cinder API functionality supports deleting of 17 | multiple volume metadata as multiple requests for deleting only one metadata 18 | key with a request. The more keys are necessary to remove, the more API 19 | requests and DB queries are needed. It would be more efficient to delete 20 | multiple metadata keys with a single request at a time. 21 | 22 | Use Cases 23 | ========= 24 | 25 | Deleting multiple volume metadata keys with a single request. 26 | 27 | Proposed change 28 | ================ 29 | To delete multiple metadata items without affecting the remaining ones, 30 | just update the metadata items with the updated complete list of ones 31 | (without items to delete) in the body of the request. On success, the 32 | server responds with a 200 status code. 33 | 34 | .. note:: A PUT request should use etags to avoid the lost update problem. 35 | 36 | Alternatives 37 | ------------ 38 | Have a request that deletes all keys rather than a specified list as an 39 | option. 40 | 41 | Data model impact 42 | ----------------- 43 | None 44 | 45 | REST API impact 46 | --------------- 47 | The existing API for update metadata will be used in this case. The API 48 | call will fail in the case some of the items that are specified to delete 49 | does not exist. The API call will send the set of keys that are supposed 50 | to be updated and ignore missing keys. 51 | 52 | Security impact 53 | --------------- 54 | None 55 | 56 | Notifications impact 57 | -------------------- 58 | One delete notification will be emitted per metadata item deleted as today. 59 | 60 | Other end user impact 61 | --------------------- 62 | A user will have new API. 63 | 64 | Performance Impact 65 | ------------------ 66 | The deleting of a volume metadata keys with a single request allows to 67 | improve performance by reducing DB calls. It's very important in case with 68 | many keys. Better performance due to fewer request round trips. 69 | 70 | Other deployer impact 71 | --------------------- 72 | None 73 | 74 | Developer impact 75 | ---------------- 76 | None 77 | 78 | Implementation 79 | ============== 80 | 81 | Assignee(s) 82 | ----------- 83 | Primary assignee: 84 | Yuriy Nesenenko(ynesenenko@mirantis.com) 85 | 86 | Work Items 87 | ---------- 88 | * Extend python-cinderclient to support new API. 89 | * Add unit and tempest tests. 90 | 91 | Dependencies 92 | ============ 93 | 94 | Depends on Cinder API microversion. 95 | Depends on API WG patch https://review.openstack.org/281511/ 96 | Depends on API WG patch https://review.openstack.org/301846/ 97 | 98 | Testing 99 | ======= 100 | 101 | Unit and functional tests are needed to ensure response is working correctly. 102 | 103 | Documentation Impact 104 | ==================== 105 | 106 | Documents concerning the API will need to reflect these changes. 107 | 108 | References 109 | ========== 110 | 111 | None 112 | -------------------------------------------------------------------------------- /specs/newton/delete-parameters.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | =================================================== 8 | Parameter combinations for delete (vol, snap, etc.) 9 | =================================================== 10 | 11 | https://blueprints.launchpad.net/cinder/+spec/volume-delete-parameters 12 | 13 | This spec outlines how to improve our volume delete functionality 14 | with regards to optional parameters which request non-default 15 | behaviors. 16 | 17 | Problem description 18 | =================== 19 | 20 | It is not possible to combine both volume force delete and cascade delete. 21 | 22 | It is also difficult to add more parameters to volume delete, since 23 | they have to interact in a reasonable way with things like os-force-delete, 24 | which is part of volume delete from a user's point of view, but not the 25 | same API action. 26 | 27 | Use Cases 28 | ========= 29 | 30 | This makes it easier to delete a volume which may be in an odd state. 31 | 32 | It also simplifies our API by passing optional parameters to delete 33 | rather than separate action calls. This allows us to combine the parameters 34 | in meaningful ways (force + cascade), as well as extend the same combinations 35 | to snapshot-delete, cg-delete, etc., without having to make X delete API 36 | actions for each parameter. (eg. os-force-delete, os-force-delete-snapshot, 37 | os-force-delete-cg, etc.) 38 | 39 | Also consider if we wish to add the option later to delete an 40 | attached volume with a "force-detach" parameter, etc. 41 | 42 | This also should reduce the number of cases where an admin/user needs to use 43 | reset-state operations. 44 | 45 | Proposed change 46 | =============== 47 | 48 | Deprecate use of os-force-delete and make "force" a parameter passed to 49 | volume delete like "cascade" is. 50 | 51 | The ability to default "force" to be an admin-only operation via config 52 | will be maintained. 53 | 54 | Alternatives 55 | ------------ 56 | 57 | Change nothing. 58 | 59 | Data model impact 60 | ----------------- 61 | 62 | None 63 | 64 | REST API impact 65 | --------------- 66 | 67 | * New boolean "force" parameter for volume delete, 68 | which defaults to False. 69 | 70 | This will behave the same as os-force-delete if not 71 | combined with other arguments. 72 | 73 | If combined with cascade, a cascade delete which 74 | ignores the volume and snapshot states will be performed. 75 | 76 | Security impact 77 | --------------- 78 | 79 | None 80 | 81 | Notifications impact 82 | -------------------- 83 | 84 | None 85 | 86 | Other end user impact 87 | --------------------- 88 | 89 | $ cinder delete --force --cascade 90 | 91 | will be accepted. This gives a "delete this volume regardless 92 | of the state of things" operation which does not exist today. 93 | 94 | Performance Impact 95 | ------------------ 96 | 97 | None 98 | 99 | Other deployer impact 100 | --------------------- 101 | 102 | None 103 | 104 | Developer impact 105 | ---------------- 106 | 107 | None 108 | 109 | Implementation 110 | ============== 111 | 112 | Assignee(s) 113 | ----------- 114 | 115 | Primary assignee: 116 | eharney 117 | 118 | Work Items 119 | ---------- 120 | 121 | * Add "force" as a parameter to volume delete API 122 | * Add logic to handle combination of force and cascade 123 | * (eventually) remove os-force-delete with a new API microversion 124 | * Look at what to do, if anything, in this same regard for 125 | "unmanage", e.g., "unmanage --cascade". 126 | 127 | 128 | Dependencies 129 | ============ 130 | 131 | None 132 | 133 | 134 | Testing 135 | ======= 136 | 137 | New tempest test for volume delete which uses the parameterized 138 | version rather than os-force-delete. 139 | 140 | Tempest test for cascade + force volume delete. 141 | 142 | 143 | Documentation Impact 144 | ==================== 145 | 146 | New arguments for cinderclient volume delete. 147 | 148 | 149 | References 150 | ========== 151 | 152 | * Cascade delete 153 | https://review.openstack.org/#/c/201748/ 154 | 155 | -------------------------------------------------------------------------------- /specs/newton/retype-encrypted-volumes.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | ============================================= 8 | Retype volumes with different encryptions 9 | ============================================= 10 | 11 | https://blueprints.launchpad.net/cinder/+spec/retype-encrypted-volume 12 | 13 | Enable the function that changing volume type of a volume [1] to another 14 | type with different encryptions. 15 | 16 | Problem description 17 | =================== 18 | 19 | Currently Cinder prevents retyping volumes to a volume type with different 20 | encryptions. 21 | 22 | Use Cases 23 | ========= 24 | 25 | * Customers use unencrypted volumes, but later they would like 26 | to change the volumes to encrypted. 27 | * Customers use encrypted volumes and later want to change to unencrypted. 28 | * Customers want to change encryptions of a volume. 29 | 30 | Proposed change 31 | =============== 32 | 33 | Allow retype between encrypted and unencrypted volumes. 34 | Same as current retype mechanism, it allows to retype volumes in available 35 | and in-use volumes. 36 | 37 | If a volume is in available status, the detailed process will be: 38 | 39 | * Create a new volume according to new volume_type. 40 | * Map the two volumes to the volume host. 41 | * Open the device with dm-crypt if volumes are encrypted. This is done 42 | through os-brick/encryptors [2]. 43 | * Copy data from original volume to new volume. 44 | * Close dm-crypt and detach the volumes. 45 | * Delete original volume in backend storage. 46 | 47 | If a volume is in-use status, nothing needs to change except the bug fix [3]. 48 | 49 | Alternatives 50 | ------------ 51 | 52 | None 53 | 54 | Data model impact 55 | ----------------- 56 | 57 | None 58 | 59 | REST API impact 60 | --------------- 61 | 62 | With the feature, it allows users to retype a volume to different 63 | encryptions. 64 | 65 | Security impact 66 | --------------- 67 | 68 | Cinder needs to access encryption keys and decrypt the data. 69 | 70 | Notifications impact 71 | -------------------- 72 | 73 | A flag will be added to current retype notification to show whether 74 | it needs encryption change. 75 | 76 | Other end user impact 77 | --------------------- 78 | During retyping volumes with different encryptions, Cinder needs to get key. 79 | But Barbican can be configued only to give key materials to tenants, not admin. 80 | This may lead that admin can't retype volumes successfully. In such cases, 81 | Cinder will catch the exception, log the error. The volume to retype will be 82 | set to original state. 83 | As os-brick/encryptors doesn't work on RBD, Sheepdog volumes, the function to 84 | retype such volumes to different encryptions will fail, and volumes will be 85 | set to original state. 86 | 87 | Performance Impact 88 | ------------------ 89 | 90 | It adds the step of encrypting/decrypting data during retype process, 91 | and the impact is dependent on the performance of encryption. 92 | 93 | Other deployer impact 94 | --------------------- 95 | 96 | The feature is dependent on Castellan [4]. Meanwhile, Barbican [5] is currently 97 | the only key manager backend supported by Castellan. Both the two packages 98 | are needed. 99 | 100 | Developer impact 101 | ---------------- 102 | 103 | None 104 | 105 | 106 | Implementation 107 | ============== 108 | 109 | Assignee(s) 110 | ----------- 111 | 112 | Primary assignee: 113 | LisaLi 114 | 115 | 116 | Work Items 117 | ---------- 118 | 119 | * Remove current limitation which disallows the retype. 120 | * Attach/detach encrypted volume through dm-crypt. 121 | 122 | 123 | Dependencies 124 | ============ 125 | 126 | None 127 | 128 | 129 | Testing 130 | ======= 131 | 132 | Unit tests need to be created to cover the code change that 133 | mentioned in "Proposed change". 134 | New tempest test cases will be added after current retype test [6]. 135 | 136 | 137 | Documentation Impact 138 | ==================== 139 | 140 | The cinder API documentation will need to be updated to describe the change. 141 | 142 | References 143 | ========== 144 | 145 | * [1]: http://docs.openstack.org/cli-reference/cinder.html#cinder-retype 146 | * [2]: https://review.openstack.org/#/c/247372/ 147 | * [3]: https://review.openstack.org/#/c/252809/ 148 | * [4]: https://github.com/openstack/castellan 149 | * [5]: https://github.com/openstack/barbican 150 | * [6]: https://review.openstack.org/#/c/195443/ 151 | -------------------------------------------------------------------------------- /specs/newton/support-backup-import-on-another-storage-database.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | ==================================================================== 8 | Support backup import on another Storage database 9 | ==================================================================== 10 | 11 | URL of launchpad blueprint: 12 | 13 | https://blueprints.launchpad.net/cinder/+spec/support-backup-import-on-another-storage-database 14 | 15 | * This backup service can use a backup extra metadata in order 16 | to import a backup on a different Block Storage. 17 | 18 | Problem description 19 | =================== 20 | 21 | * Currently a volume backup can only be restored on the same Block Storage 22 | service. 23 | This is because restoring a volume from a backup requires metadata available 24 | on the database used by the Block Storage service. 25 | 26 | * In order to import backup metadata on another Block Storage database 27 | (i.e disaster recovery site), one has to save the metadata of a volume 28 | backup available on the source database, and then replicate it together 29 | with the data to the other Block Storage site. 30 | 31 | * Combination of the local backup service that exports and stores 32 | this metadata, together with replication to another Block Storage site, 33 | allows you to completely restore the backup even in the event of 34 | a catastrophic database failure. 35 | 36 | * In addition, having a backup metadata together with a volume backup, 37 | also provides volume portability. 38 | Specifically, backing up a volume and exporting its metadata will allow you 39 | to restore the volume on a completely different Block Storage database, 40 | or even on a different cloud service. 41 | 42 | Use Cases 43 | ========= 44 | 45 | * When a user wants to save backup metadata together with volume backup for 46 | volume portability purpose, or for replication purpose to disaster 47 | recovery site, and be able to restore a volume from a backup on the other 48 | Block Storage site. 49 | 50 | Proposed change 51 | =============== 52 | 53 | * In order to support backup import on a different Block Storage database, 54 | we need to extend chunked backup driver: 55 | 56 | * Enabling to save backup metadata together with the data. 57 | * Add a cinder client api command, that will parse backup metadata from 58 | backup_metadata file, and import that metadata to the other 59 | Block Storage database. 60 | * User will be able to restore a volume backup on the other 61 | Block Storage, using cinder backup restore api. 62 | 63 | Alternatives 64 | ------------ 65 | 66 | Support only storage on local storage. 67 | Use slow manual backup and restore methods. 68 | 69 | Data model impact 70 | ----------------- 71 | 72 | None. 73 | 74 | REST API impact 75 | --------------- 76 | 77 | None. 78 | 79 | Security impact 80 | --------------- 81 | 82 | None. 83 | 84 | Notifications impact 85 | -------------------- 86 | 87 | None. 88 | 89 | Other end user impact 90 | --------------------- 91 | 92 | None. 93 | 94 | Performance Impact 95 | ------------------ 96 | 97 | None. 98 | 99 | Other deployer impact 100 | --------------------- 101 | 102 | None. 103 | 104 | Developer impact 105 | ---------------- 106 | 107 | None. 108 | 109 | Implementation 110 | ============== 111 | 112 | Assignee(s) 113 | ----------- 114 | 115 | Primary assignee: 116 | ronen-mesonzhnik 117 | 118 | Other contributors: 119 | None 120 | 121 | Work Items 122 | ---------- 123 | 124 | - Implement get_extra_metadata that will return backup's corresponding 125 | database information as encoded string metadata. 126 | - Implement a cinder client api command that can take a backup metadata 127 | file, and do the import from it. 128 | 129 | Dependencies 130 | ============ 131 | 132 | None. 133 | 134 | Testing 135 | ======= 136 | 137 | None. 138 | 139 | Documentation Impact 140 | ==================== 141 | 142 | * In addition to the existing command 'cinder backup-import ', 143 | there will be a command that can accept a file: 144 | 'backup-import-record-from-backup-metadata-file ' 145 | 146 | References 147 | ========== 148 | 149 | * Link to Export and import backup metadata documentation: 150 | http://docs.openstack.org/admin-guide-cloud/blockstorage_volume_backups_export_import.html 151 | -------------------------------------------------------------------------------- /specs/pike/add-volume-type-filter-to-get-pools.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | ============================================== 8 | Support retrieve pools filtered by volume-type 9 | ============================================== 10 | 11 | https://blueprints.launchpad.net/cinder/+spec/add-volume-type-filter-to-get-pool 12 | 13 | Add feature that administrators can get back-end storage pools filtered by 14 | volume-type, cinder will return the pools filtered by volume-type's 15 | extra-specs. 16 | 17 | Problem description 18 | =================== 19 | 20 | Now cinder's ``get-pools`` API doesn't support filtering pools by volume type, 21 | and this is inconvenient when administrators want to know the specified pool's 22 | state which also can meet volume type's requirements, the administrators also 23 | can get all pools and filter them on their own, but it's more complicated and 24 | inefficient. This change intends to cover this situation and bring more 25 | convenience to administrators. 26 | 27 | Use Cases 28 | ========= 29 | 30 | In production environment, administrators often need to have an overall 31 | pool available statistics filtered by volume type, this will help them to make 32 | an adjustment before resources run out. 33 | 34 | Proposed change 35 | =============== 36 | 37 | As we will introduce generalized resource filter in cinder, from the view of 38 | outside cinder, the only thing we should do is to advertise that we can 39 | support this filter now: 40 | 41 | From the view of inside cinder. We will mark ``volume-type`` recognizable, and 42 | support for this filter in logic: 43 | 44 | * once the volume-type (name or id) is detected, the volume type object will be 45 | retrieved before scheduler api is called, and will be passed as a filter 46 | item. 47 | 48 | * the ``scheduler.get_pools`` called, which will call the 49 | ``host_manager.get_pools`` in result. 50 | 51 | * the ``host_manager.get_pools`` will collect the pools information as normal, 52 | and before it returns, the result will be filtered by 53 | ``host.get_filtered_hosts``. 54 | 55 | * the filter properties of ``get_filtered_hosts`` only consists of volume-type 56 | properties. 57 | 58 | * As already proposed by generalized resource filtering [1]_, the changes on 59 | cinder-client for this feature are not needed. 60 | 61 | 62 | Alternatives 63 | ------------ 64 | 65 | Administrators also can retrieve and filter on their own, but it's more 66 | complicated and inefficient. This change can reduce the request amount and 67 | filter unnecessary data transmitted from server to client. 68 | 69 | Data model impact 70 | ----------------- 71 | 72 | None 73 | 74 | REST API impact 75 | --------------- 76 | 77 | Get-Pool API will accept new query string parameter volume-type. 78 | Administrators can pass name or ID to retrieve pools filtered. 79 | 80 | * ``GET /v3/{tenant_id}/scheduler-stats/get_pools?volume-type=lvm-default`` 81 | 82 | Security impact 83 | --------------- 84 | 85 | None 86 | 87 | Notifications impact 88 | -------------------- 89 | 90 | None. 91 | 92 | Other end user impact 93 | --------------------- 94 | 95 | Within generalized resource filtering, we would ultimately have a 96 | ``get-pools`` command like this below:: 97 | 98 | cinder get-pools --filters volume-type='volume type' 99 | 100 | Performance Impact 101 | ------------------ 102 | 103 | None 104 | 105 | Other deployer impact 106 | --------------------- 107 | 108 | None 109 | 110 | Developer impact 111 | ---------------- 112 | 113 | None 114 | 115 | Implementation 116 | ============== 117 | 118 | Assignee(s) 119 | ----------- 120 | 121 | Primary assignee: 122 | TommyLike(tommylikehu@gmail.com) 123 | 124 | Work Items 125 | ---------- 126 | 127 | * Add Get-Pools's filter. 128 | * Add filter logic when retrieving pools. 129 | * Add related tests. 130 | 131 | 132 | Dependencies 133 | ============ 134 | 135 | Depended on generalized resource filtering [1]_ 136 | 137 | Testing 138 | ======= 139 | 140 | 1. Unit test to test whether volume-type filter can be correctly applied. 141 | 2. Tempest test whether volume-type filter work correctly from API 142 | perspective. 143 | 144 | Documentation Impact 145 | ==================== 146 | 147 | 1. The cinder API documentation will need to be updated to reflect the 148 | REST API changes. 149 | 150 | References 151 | ========== 152 | 153 | .. [1] https://review.openstack.org/#/c/441516/ 154 | -------------------------------------------------------------------------------- /specs/pike/backup-init.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | ============================ 8 | Backup driver initialization 9 | ============================ 10 | 11 | https://blueprints.launchpad.net/cinder/+spec/backup-init 12 | 13 | We don't initialize Cinder Backup driver during service startup. It means that 14 | we've got cinder-backup service up and running even if it can't connect to the 15 | backup storage. 16 | 17 | Problem description 18 | =================== 19 | 20 | Cinder backup manager does not verify that backup driver is initialized 21 | successfully. If cinder-backup is started successfully we can create a volume 22 | backup. Such backups always will be in 'error' status and tenant user won't be 23 | able to delete them. 24 | 25 | Use Cases 26 | ========= 27 | 28 | Cinder backup service should be marked as 'down' if it can't connect to the 29 | backup storage. 30 | 31 | Proposed change 32 | =============== 33 | 34 | We should introduce for cinder backups the same mechanism as we've got for 35 | volume manager and drivers: 36 | 37 | * Introduce 'init_host' method in backup manager which will be called on 38 | service startup and verify that backup driver is initialized: verify driver's 39 | configuration is correct, depends on driver, we could check connection to 40 | storage, list of available backups. etc. 41 | 42 | * If backup driver initialization fails, manager will mark backup service 43 | as 'down'. 44 | 45 | In case of initialization failure, cinder will try to do it periodically 46 | depends on 'periodic_interval' config option value. 47 | 48 | Backup service should be initialized in 'service_down_time' time 49 | interval or will be marked as 'down'. 50 | 51 | Alternatives 52 | ------------ 53 | 54 | Check for backup storage is available on backup create call. If storage is not 55 | available, remove 'host' field from backup object. We could try to re-schedule 56 | backup creation on the other host. 57 | 58 | Data model impact 59 | ----------------- 60 | 61 | None 62 | 63 | REST API impact 64 | --------------- 65 | 66 | None 67 | 68 | Security impact 69 | --------------- 70 | 71 | None 72 | 73 | Notifications impact 74 | -------------------- 75 | 76 | New notifications for backup initialization failure and success will be added. 77 | 78 | Other end user impact 79 | --------------------- 80 | 81 | User will be able to delete backup in error state if it was not created on 82 | backend. 83 | 84 | Performance Impact 85 | ------------------ 86 | 87 | None 88 | 89 | Other deployer impact 90 | --------------------- 91 | 92 | New 'backup_periodic_initialization' and 'backup_initialization_timeout' 93 | config option will be added. Deployer have to enable 94 | 'backup_periodic_initialization' if needed. 95 | 96 | Developer impact 97 | ---------------- 98 | 99 | Backup driver developers should implement new APIs. 100 | 101 | 102 | Implementation 103 | ============== 104 | 105 | Assignee(s) 106 | ----------- 107 | 108 | Primary assignee: 109 | Ivan Kolodyazhny 110 | 111 | Other contributors: 112 | Backup drivers maintainers. 113 | 114 | Work Items 115 | ---------- 116 | 117 | * Implement 'do_setup' method in a base backup driver which won't do anything 118 | 119 | * Implement 'do_setup' in each backup driver. 120 | 121 | * Call driver's 'do_setup' during backup-manager 'init_host' call. 122 | 123 | 124 | 125 | Dependencies 126 | ============ 127 | 128 | None 129 | 130 | 131 | Testing 132 | ======= 133 | 134 | Both unit and Tempest tests should be implemented to cover new feature. 135 | 136 | 137 | Documentation Impact 138 | ==================== 139 | 140 | None 141 | 142 | 143 | References 144 | ========== 145 | 146 | * https://bugs.launchpad.net/cinder/+bug/1598709 147 | -------------------------------------------------------------------------------- /specs/pike/capacity_based_qos.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | ========================================== 8 | Capacity-based QoS 9 | ========================================== 10 | 11 | Include the URL of your launchpad blueprint: 12 | 13 | https://blueprints.launchpad.net/cinder/+spec/capacity-based-qos 14 | 15 | QoS values in Cinder currently are able to be set to static 16 | values. This work proposes a way to derive QoS limit values 17 | based on volume capacities rather than static values. 18 | 19 | 20 | Problem description 21 | =================== 22 | 23 | This proposes a mechanism to provision IOPS on a per-volume basis with 24 | the IOPS values adjusted based on the volume's size. (IOPS per GB) 25 | 26 | 27 | Use Cases 28 | ========= 29 | 30 | A deployer wishes to cap "usage" of this system to limits based 31 | on space usage as well as throughput, in order to bill customers 32 | and not exceed limits of the backend. 33 | 34 | Associating IOPS and size allows you to provide tiers such as 35 | 36 | Gold: 1000 GB at 10000 IOPS per GB 37 | Silver: 1000 GB at 5000 IOPS per GB 38 | Bronze: 500 GB at 5000 IOPS per GB 39 | 40 | 41 | Proposed change 42 | =============== 43 | 44 | Allow creation of qos_keys: 45 | read_iops_sec_per_gb 46 | write_iops_sec_per_gb 47 | total_iops_sec_per_gb 48 | 49 | These function the same as our current _iops_sec keys, 50 | except they are scaled by the volume size. 51 | 52 | 53 | Alternatives 54 | ------------ 55 | 56 | None 57 | 58 | Data model impact 59 | ----------------- 60 | 61 | None 62 | 63 | REST API impact 64 | --------------- 65 | 66 | None 67 | 68 | Security impact 69 | --------------- 70 | 71 | None 72 | 73 | Notifications impact 74 | -------------------- 75 | 76 | None 77 | 78 | Other end user impact 79 | --------------------- 80 | 81 | None 82 | 83 | Performance Impact 84 | ------------------ 85 | 86 | None 87 | 88 | Other deployer impact 89 | --------------------- 90 | 91 | New optional qos spec values. 92 | 93 | Off by default, opt-in. 94 | 95 | Developer impact 96 | ---------------- 97 | 98 | None 99 | 100 | 101 | Implementation 102 | ============== 103 | 104 | 105 | Assignee(s) 106 | ----------- 107 | 108 | Primary assignee: 109 | eharney 110 | 111 | 112 | Work Items 113 | ---------- 114 | 115 | * https://review.openstack.org/#/c/447127/ 116 | 117 | 118 | Dependencies 119 | ============ 120 | 121 | 122 | Testing 123 | ======= 124 | 125 | 126 | Documentation Impact 127 | ==================== 128 | 129 | Document new fields available in qos types. 130 | 131 | 132 | References 133 | ========== 134 | 135 | Code: https://review.openstack.org/#/c/447127/ 136 | -------------------------------------------------------------------------------- /specs/pike/metadata-for-backup-resource.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | =========================== 8 | Support metadata for backup 9 | =========================== 10 | 11 | https://blueprints.launchpad.net/cinder/+spec/metadata-for-backup 12 | 13 | Add a new "metadata" property for backup resource. 14 | 15 | Problem description 16 | =================== 17 | 18 | Backup resource lost the ability for getting/setting metadata property. 19 | 20 | 21 | Use Cases 22 | ========= 23 | 24 | The metadata here for backup is the descriptive metadata. It's used for 25 | discovery and identification. Users could add key-value pairs for the backups 26 | to describe them. And users can also filter backups with specified metadata. 27 | 28 | 29 | Proposed change 30 | =============== 31 | 32 | 1. The "metadata" property will be added to backup object. 33 | 34 | 2. A new DB table "backup_metadata" will be created. 35 | :: 36 | 37 | ----------------------------------------------------------------------- 38 | | created_at | updated_at | deleted_at | id | backup_id | key | value | 39 | ----------------------------------------------------------------------- 40 | | | | | | | | | 41 | ----------------------------------------------------------------------- 42 | 43 | The primary key is "id". 44 | 45 | 3. The backup create/update API will be updated to support "metadata". 46 | :: 47 | 48 | POST /v3/{project_id}/backups 49 | PUT /v3/{project_id}/backups/{backup_id} 50 | 51 | the request body can contain "metadata". 52 | { 53 | "metadata":{ 54 | "key1": "value1", 55 | "key2": "value2" 56 | } 57 | } 58 | 59 | 4. A set of new APIs will be created. It's used for backup metadata's CRUD. 60 | :: 61 | 62 | GET /v3/{project_id}/backups/{backup_id}/metadata 63 | show a backup's metadata 64 | 65 | POST /v3/{project_id}/backups/{backup_id}/metadata 66 | create or replaces metadata for a backup 67 | 68 | PUT /v3/{project_id}/backups/{backup_id}/metadata 69 | replace all the backup's metadata 70 | 71 | GET /v3/{project_id}/backups/{backup_id}/metadata/{key} 72 | show a backup's metadata for a specific key 73 | 74 | DELETE /v3/{project_id}/backups/{backup_id}/metadata/{key} 75 | delete a specified metadata 76 | 77 | PUT /v3/{project_id}/backups/{backup_id}/metadata/{key} 78 | update a specified metadata 79 | 80 | Alternatives 81 | ------------ 82 | 83 | Leave as it is. 84 | 85 | Data model impact 86 | ----------------- 87 | 88 | Backup model will be updated with new property "metadata". 89 | 90 | REST API impact 91 | --------------- 92 | 93 | * The backup create/update API's request body will be updated. 94 | * A set of new APIs related to backup metadata will be created. 95 | 96 | Security impact 97 | --------------- 98 | 99 | None 100 | 101 | Notifications impact 102 | -------------------- 103 | 104 | The new APIs will send new notifications as well. 105 | 106 | Other end user impact 107 | --------------------- 108 | 109 | None 110 | 111 | Performance Impact 112 | ------------------ 113 | 114 | A new "backup_metadata" table will be created so that the DB conjunction action 115 | may let the search performance reduce a little. 116 | 117 | Other deployer impact 118 | --------------------- 119 | 120 | None 121 | 122 | Developer impact 123 | ---------------- 124 | 125 | None 126 | 127 | 128 | Implementation 129 | ============== 130 | 131 | Assignee(s) 132 | ----------- 133 | 134 | Primary assignee: 135 | wangxiyuan(wxy) 136 | 137 | Work Items 138 | ---------- 139 | 140 | * Add metadata property to backup object and bump its version. 141 | * Create a new DB table "backup_metadata" and add db upgrade script. 142 | * Update backup create/update API. 143 | * Add a tuple of new APIs for backup metadata. 144 | 145 | 146 | Dependencies 147 | ============ 148 | 149 | None 150 | 151 | 152 | Testing 153 | ======= 154 | 155 | * Unit tests 156 | 157 | 158 | Documentation Impact 159 | ==================== 160 | 161 | * Api-ref need update. 162 | 163 | 164 | References 165 | ========== 166 | 167 | None 168 | -------------------------------------------------------------------------------- /specs/pike/support-get-volume-metadata-summary.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | =================================== 8 | Support get volume metadata summary 9 | =================================== 10 | 11 | https://blueprints.launchpad.net/cinder/+spec/metadata-for-volume-summary 12 | 13 | 14 | Support get volumes' metadata through volume summary API. 15 | 16 | Problem description 17 | =================== 18 | 19 | Currently, Cinder supports filter volumes with metadata. But in some case, 20 | users don't know what metadata all the volumes contain or what metadata is 21 | valid to filter volumes. Then users need to show the volumes one by one to get 22 | the correct metadata, this is really a heavy and unfriendly way. Imaging that 23 | if there are hundreds of volumes, admin users will take a very long time to 24 | query all metadata. 25 | 26 | Use Cases 27 | ========= 28 | 29 | 1. For users, they can get all the metadata easily just through one API 30 | request. 31 | 2. For dashboard, such as Horizon, it can use this metadata information to show 32 | end users a dropdown list. 33 | 34 | Proposed change 35 | =============== 36 | 37 | 1. DB layer change: 38 | All the volumes' metadata can be got by the sql query. 39 | 40 | 2. API layer change: 41 | Add a new micro version. Add "metadata" to volume-summary API response 42 | body. The body will be like:: 43 | 44 | "metadata": {"key1": ["value1"],"key2": ["value2", "value3"]} 45 | 46 | Alternatives 47 | ------------ 48 | 49 | Leave as it is. Let the operators get volumes metadata by themselves through 50 | some peripheral ways. Such as, create a script to call volume-list-detail API 51 | and then analyse the result one by one. 52 | 53 | 54 | Data model impact 55 | ----------------- 56 | 57 | None 58 | 59 | REST API impact 60 | --------------- 61 | 62 | A new microversion will be created. 63 | 64 | Cinder-client impact 65 | -------------------- 66 | 67 | Now both OpenStackClient and CinderClient don't support volume-summary 68 | command. We can add them as well. 69 | 70 | Security impact 71 | --------------- 72 | 73 | None 74 | 75 | Notifications impact 76 | -------------------- 77 | 78 | None 79 | 80 | Other end user impact 81 | --------------------- 82 | 83 | None 84 | 85 | Performance Impact 86 | ------------------ 87 | 88 | There is a little performance influence about volume-summary API since a new 89 | sql query action will be added. 90 | 91 | Other deployer impact 92 | --------------------- 93 | 94 | None 95 | 96 | Developer impact 97 | ---------------- 98 | 99 | None 100 | 101 | Implementation 102 | ============== 103 | 104 | Assignee(s) 105 | ----------- 106 | 107 | Primary assignee: 108 | wangxiyuan(wangxiyuan@huawei.com) 109 | 110 | Work Items 111 | ---------- 112 | 113 | * Add a new microversion. 114 | * Add volumes' metadata to the volume-summary API's response body. 115 | * Add client side support. 116 | 117 | Dependencies 118 | ============ 119 | 120 | None 121 | 122 | Testing 123 | ======= 124 | 125 | * Add unit tests. 126 | 127 | Documentation Impact 128 | ==================== 129 | 130 | Update API documentation. 131 | 132 | References 133 | ========== 134 | 135 | None 136 | -------------------------------------------------------------------------------- /specs/queens/rbd-encryption.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | ========================================== 8 | RBD Volume Encryption 9 | ========================================== 10 | 11 | https://blueprints.launchpad.net/nova/+spec/libvirt-qemu-native-luks 12 | 13 | This feature adds support to the Cinder RBD volume driver 14 | to support Cinder's volume encryption. 15 | 16 | This requires a few changes in Cinder and Nova due to the fact that 17 | RBD volumes are attached by qemu directly and not as block devices 18 | on the host. 19 | 20 | This fills a feature gap for the RBD driver in Cinder. 21 | 22 | 23 | Problem description 24 | =================== 25 | 26 | The RBD driver does not support volume encryption. 27 | 28 | Use Cases 29 | ========= 30 | 31 | Volume encryption is a common requirement for deployments, 32 | particularly where a deployer needs to meet particular security 33 | standards. 34 | 35 | Proposed change 36 | =============== 37 | 38 | Enable volume encryption for RBD via qemu's LUKS block layer. 39 | 40 | This means that Nova has to support libvirt operations to manage 41 | this qemu feature. This is done here: 42 | 43 | * https://review.openstack.org/#/c/523958/ 44 | 45 | We also need Cinder to format volumes upon creation with a LUKS 46 | header. This is currently done by os-brick for iSCSI drivers, 47 | but can't be done in the same way for RBD since there is no 48 | block device on the compute host, and dm-crypt is not used. 49 | 50 | (Note: this will also be true when using qemu's iSCSI initiator 51 | with Nova) 52 | 53 | Alternatives 54 | ------------ 55 | 56 | None 57 | 58 | Data model impact 59 | ----------------- 60 | 61 | None 62 | 63 | REST API impact 64 | --------------- 65 | 66 | None 67 | 68 | Security impact 69 | --------------- 70 | 71 | This is a security-focused feature, but it uses the already existing 72 | infrastructure of Cinder volume encryption. 73 | 74 | The way encryption works when using RBD is slightly different from 75 | other Cinder drivers. Decryption/encryption is handled inside of 76 | qemu rather than at the device-mapper layer on the host via dm-crypt. 77 | 78 | This means fewer operations having to be run as root, and less exposure 79 | of decrypted data to the rest of the system via block devices. 80 | 81 | But, the feature in general has the same security implications as 82 | cinder volume encryption does for other drivers. 83 | 84 | Notifications impact 85 | -------------------- 86 | 87 | None 88 | 89 | Other end user impact 90 | --------------------- 91 | 92 | None 93 | 94 | Performance Impact 95 | ------------------ 96 | 97 | Using encryption could result in slightly higher CPU usage on compute 98 | nodes. Should be comparable to using encryption with any other Cinder 99 | driver. 100 | 101 | Other deployer impact 102 | --------------------- 103 | 104 | None 105 | 106 | Developer impact 107 | ---------------- 108 | 109 | None 110 | 111 | 112 | Implementation 113 | ============== 114 | 115 | Assignee(s) 116 | ----------- 117 | 118 | Primary assignee: 119 | eharney 120 | 121 | Other contributors: 122 | lyarwood 123 | 124 | Work Items 125 | ---------- 126 | 127 | * https://review.openstack.org/534811/ 128 | * https://review.openstack.org/523958/ 129 | 130 | Dependencies 131 | ============ 132 | 133 | * Nova changes here: 134 | - https://blueprints.launchpad.net/nova/+spec/libvirt-qemu-native-luks 135 | 136 | * QEMU 2.6 137 | * libvirt 2.2.0 138 | 139 | Testing 140 | ======= 141 | 142 | This feature will be covered by the standard tempest tests used for all 143 | volume drivers. 144 | 145 | Gate configuration issues are being sorted out here: 146 | https://review.openstack.org/#/c/536350/ 147 | 148 | 149 | Documentation Impact 150 | ==================== 151 | 152 | * Document that volume encryption now works for the RBD volume driver 153 | * Current limitation: attached volume migration is not supported 154 | 155 | References 156 | ========== 157 | 158 | * https://review.openstack.org/#/q/topic:bp/libvirt-qemu-native-luks 159 | 160 | * https://blueprints.launchpad.net/nova/+spec/libvirt-qemu-native-luks 161 | 162 | * http://lists.openstack.org/pipermail/openstack-dev/2018-January/126440.html 163 | -------------------------------------------------------------------------------- /specs/queens/report-backend-state-in-service-list.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | ==================================== 8 | Report backend state in service list 9 | ==================================== 10 | 11 | https://blueprints.launchpad.net/cinder/+spec/report-backend-state-in-service-list 12 | 13 | Storage driver reports state of backend storage device and let admin operator 14 | know it via service list for maintenance purpose. 15 | 16 | Problem description 17 | =================== 18 | 19 | Currently, Cinder couldn't report backend state to service, operators only 20 | know that cinder-volume process is up, but isn't aware of whether the backend 21 | storage device is ok. Users still can create volume and go to fail over and 22 | over again. To make maintenance easier, operator could query storage device 23 | state via service list and fix the problem more quickly. If device state is 24 | *down*, that means volume creation will fail. 25 | 26 | 27 | Use Cases 28 | ========= 29 | 30 | In large scale cloud system, there could be many backends existing in the 31 | system. If volume, snapshot or other resources creation goes to failure, 32 | operators or cloud management system could query the service first and get 33 | the backend device state in every service. If device state is *down*, specify 34 | that storage device has got some problems. Give operators/management system 35 | more information to locate bug more quickly. 36 | 37 | Proposed change 38 | =============== 39 | 40 | * Each driver reports the backend state in "get_volume_stats" by adding 41 | key/value: "backend_state: up/down"[1]. 42 | * When calling 'service list', get this information from scheduler for every 43 | backend. 44 | * Add 'backend_state: up/down' in response body of service list API if context 45 | is admin. 46 | * Before all drivers support this feature, if the result of get_volume_stats 47 | doesn't include the backend state, Cinder will set backend_state to 'up' by 48 | default. 49 | 50 | 51 | Alternatives 52 | ------------ 53 | 54 | Add cinder manage command to query backend device state from driver directly. 55 | 56 | 57 | Data model impact 58 | ----------------- 59 | 60 | None 61 | 62 | REST API impact 63 | --------------- 64 | 65 | Add backend_state: up/down into response body of service list and also need 66 | a microversions for this feature: 67 | 68 | .. code-block:: console 69 | 70 | GET /v3/{project_id}/os-services 71 | 72 | RESP BODY: 73 | 74 | .. code-block:: python 75 | 76 | {"services": [{"host": "host@backend1", 77 | ..., 78 | "backend_status": "up", 79 | }, 80 | {"host": "host@backend2", 81 | ..., 82 | "backend_status": "down", 83 | }] 84 | } 85 | 86 | Security impact 87 | --------------- 88 | 89 | None 90 | 91 | Notifications impact 92 | -------------------- 93 | 94 | None. 95 | 96 | Other end user impact 97 | --------------------- 98 | 99 | None 100 | 101 | Performance Impact 102 | ------------------ 103 | 104 | None 105 | 106 | Other deployer impact 107 | --------------------- 108 | 109 | None 110 | 111 | 112 | Developer impact 113 | ---------------- 114 | 115 | Driver maintainer needs to add backend state when reporting 116 | volume stats. 117 | 118 | 119 | Implementation 120 | ============== 121 | 122 | Assignee(s) 123 | ----------- 124 | 125 | Primary assignee: 126 | wanghao 127 | 128 | 129 | Work Items 130 | ---------- 131 | 132 | * Implement code in Cinder API and scheduler. 133 | * Update cinderclient to support this function. 134 | * Add change to API doc. 135 | 136 | 137 | Dependencies 138 | ============ 139 | 140 | None 141 | 142 | 143 | Testing 144 | ======= 145 | 146 | Both unit and Tempest tests need to be created to cover the code change that 147 | mentioned in "Proposed change". 148 | 149 | 150 | Documentation Impact 151 | ==================== 152 | 153 | 1. The cinder API documentation will need to be updated to reflect the REST 154 | API changes. 155 | 156 | References 157 | ========== 158 | 159 | [1]https://docs.openstack.org/cinder/latest/contributor/drivers.html 160 | -------------------------------------------------------------------------------- /specs/rocky/.placeholder: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/cinder-specs/ae9076bdf8651d351e093d5e58b595ed34de4ff9/specs/rocky/.placeholder -------------------------------------------------------------------------------- /specs/rocky/transfer-snapshots-with-volumes.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | ========================================== 8 | Transfer snapshots with volumes 9 | ========================================== 10 | 11 | https://blueprints.launchpad.net/cinder/+spec/transfer-snps-with-vols 12 | 13 | This spec aims to extend transfer function that can transfer snapshots 14 | with volumes at same time. 15 | 16 | Problem description 17 | =================== 18 | 19 | Currently, Cinder can transfer volumes to another project's owner, but 20 | if this volume has snapshots before transferring, after this operation, 21 | new owner can't delete this volume successfully since it has snapshots. 22 | 23 | Use Cases 24 | ========= 25 | 26 | When transferring volume with snapshots, a user will now have a choice whether 27 | or not to transfer those snapshots along with a volume. By default, snapshots 28 | are transferred, but selecting this option will allow snapshots not to be 29 | transferred to another project. 30 | Note those snapshots can be deleted by the remote user if necessary. 31 | 32 | Proposed change 33 | =============== 34 | 35 | In normal deleting, Cinder will disallow the request since the volume has 36 | snapshots, but unfortunately, Cinder has cascade deleting operation now, 37 | the request will be passed down to driver, some driver will raise 38 | exception since snapshot is still existing. So it also should be changed in 39 | cascade deletion process. 40 | 41 | * Add an optional argument "--no-snapshots" in transfer API and CLI. If user 42 | didn't specify it, cinder will transfer snapshots that a volume has by 43 | default. 44 | * Add a new field in transfer DB model to record this option. 45 | * Update snapshot's information in DB like user id, project id, etc. 46 | * Check if the volume still has some snapshots in other project when cascade 47 | deleting. 48 | 49 | 50 | Alternatives 51 | ------------ 52 | 53 | Another option is cinder transfer the snapshots if user specifies an option 54 | argument '--with-snapshots'. 55 | 56 | This option can be minimal with the change, in order to make the client code 57 | more simple. 58 | 59 | Data model impact 60 | ----------------- 61 | 62 | Add a new field "with_snapshots(boolean)" in transfer model. 63 | 64 | 65 | REST API impact 66 | --------------- 67 | 68 | * New microversion in Cinder API. 69 | 70 | * Add a new V3 API and an optional argument "no_snapshots":: 71 | 72 | POST /v3/{project_id}/volume-transfers 73 | RESP BODY: {"transfer": { 74 | ... 75 | no_snapshots: [True/False], 76 | } 77 | } 78 | 79 | 80 | Security impact 81 | --------------- 82 | 83 | If users didn't transfer snapshots with volume, there could be kind of 84 | security impact that remote users may be able to act upon the untransferred 85 | snapshots. For example, by leveraging COW, that remote user can change the 86 | snapshot size in backend. 87 | 88 | Notifications impact 89 | -------------------- 90 | 91 | Add 'with_snapshots' information to transfer notificaiton. 92 | 93 | Other end user impact 94 | --------------------- 95 | 96 | None 97 | 98 | Performance Impact 99 | ------------------ 100 | 101 | There could be a db performance issue if a lot of snapshots associated with a 102 | given volume since cinder need to change those snapshots' project id, user id, 103 | etc. 104 | 105 | Other deployer impact 106 | --------------------- 107 | 108 | None 109 | 110 | 111 | Developer impact 112 | ---------------- 113 | 114 | Drivers that implement some form of volume ownership tracking on 115 | the backend will need to be fixed to track this change too. 116 | 117 | 118 | Implementation 119 | ============== 120 | 121 | Assignee(s) 122 | ----------- 123 | 124 | Primary assignee: 125 | wanghao 126 | 127 | Other contributors: 128 | None 129 | 130 | Work Items 131 | ---------- 132 | 133 | * Implement feature in Cinder tree. 134 | * Update cinderclient to support this functionality. 135 | * Add change to API doc. 136 | 137 | 138 | Dependencies 139 | ============ 140 | 141 | None 142 | 143 | 144 | Testing 145 | ======= 146 | 147 | Both unit and Tempest tests are needed to be created to cover the code change. 148 | 149 | 150 | Documentation Impact 151 | ==================== 152 | 153 | The cinder API documention will need to be updated to reflect the REST 154 | API changes. 155 | 156 | Devref entry on the volume transfer driver entry point should be created. 157 | 158 | 159 | References 160 | ========== 161 | None 162 | -------------------------------------------------------------------------------- /specs/stein/add-user-id-attribute-to-backup-response.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | ======================================== 8 | Add user_id attribute to backup response 9 | ======================================== 10 | https://blueprints.launchpad.net/cinder/+spec/add-user-id-attribute-to-backup-response 11 | 12 | This blueprint proposes to add ``user_id`` attribute to 13 | the response body of list backup with detail and show backup detail APIs. 14 | 15 | Problem description 16 | =================== 17 | 18 | Currently, there is a ``user_id`` field in the ``backups`` table. These 19 | fields are very useful for admin to manage the backup file, but this 20 | is not returned in response body. So it is difficult to manage the resources 21 | under the project. If there are multiple users under one project, it is 22 | impossible to distinguish which user the backup file belongs to. 23 | 24 | Use Cases 25 | ========= 26 | 27 | In large scale environment, lots of backups resources were created in system, 28 | that we can only see the project to which the backup file belongs, but we 29 | cannot know to which user the backup belongs. 30 | 31 | Administrators would like the ability to identify the users that have created 32 | backups. 33 | 34 | Proposed change 35 | =============== 36 | 37 | This spec proposes to add ``user_id`` attribute to the 38 | response body of list backup with detail and show backup detail APIs. 39 | 40 | Add a new microverion API to add ``user_id`` attribute 41 | to the response body of list backup with detail and show backup detail APIs: 42 | 43 | - List backups with detail GET /v3/{project_id}/backups/detail 44 | 45 | - Show backup detail GET /v3/{project_id}/backups/{backup_id} 46 | 47 | Alternatives 48 | ------------ 49 | 50 | The admin/user could get ``user_id`` from the context as a log print, but 51 | it's difficult to find it out easily, especially when the user wants to find 52 | a very old backup file. 53 | 54 | REST API impact 55 | --------------- 56 | 57 | Add a new microversion in Cinder API. 58 | 59 | List backups with detail:: 60 | 61 | GET /v3/{project_id}/backups/detail 62 | Response BODY: 63 | { 64 | "backups": [{ 65 | ... 66 | "user_id": "515ba0dd59f84f25a6a084a45d8d93b2" 67 | }] 68 | } 69 | 70 | Show backup detail:: 71 | 72 | GET /v3/{project_id}/backups/{backup_id} 73 | Response BODY: 74 | { 75 | "backups": [{ 76 | ... 77 | "user_id": "515ba0dd59f84f25a6a084a45d8d93b2" 78 | }] 79 | } 80 | 81 | Calling this method shows a ``user_id`` for volume backup. 82 | It is intended for admins to use, which is used to display the user to which 83 | the backup file belongs, and controlled by ``BACKUP_ATTRIBUTES_POLICY``. 84 | 85 | Data model impact 86 | ----------------- 87 | 88 | None 89 | 90 | Security impact 91 | --------------- 92 | 93 | None 94 | 95 | Notifications impact 96 | -------------------- 97 | 98 | None 99 | 100 | Other end user impact 101 | --------------------- 102 | 103 | None 104 | 105 | Performance Impact 106 | ------------------ 107 | 108 | None 109 | 110 | Other deployer impact 111 | --------------------- 112 | 113 | None 114 | 115 | Developer impact 116 | ---------------- 117 | 118 | None 119 | 120 | Implementation 121 | ============== 122 | 123 | Assignee(s) 124 | ----------- 125 | 126 | Primary assignee: 127 | Brin Zhang 128 | 129 | Work Items 130 | ---------- 131 | 132 | * Add a new microversion. 133 | * Add ``user_id`` to the response body of list backup 134 | with detail and show backup detail APIs. 135 | * Add the related unit tests. 136 | * Update related list backup with detail and show detail api doc. 137 | 138 | Dependencies 139 | ============ 140 | 141 | None 142 | 143 | Testing 144 | ======= 145 | 146 | * Unit-tests, tempest and other related test should be implemented 147 | 148 | Documentation Impact 149 | ==================== 150 | 151 | None 152 | 153 | References 154 | ========== 155 | 156 | None 157 | -------------------------------------------------------------------------------- /specs/stein/delete-from-db.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | ================================================== 8 | Object deletion from DB without driver interaction 9 | ================================================== 10 | 11 | https://blueprints.launchpad.net/cinder/+spec/cinder-delete-from-db 12 | 13 | 14 | Problem description 15 | =================== 16 | 17 | It's impossible to delete a volume, a backup or a snapshot when the service is 18 | not available. There's no way to bypass driver interaction. 19 | 20 | Use Cases 21 | ========= 22 | 23 | Sometimes, OpenStack admins are pulling the plugs off some storage backends to 24 | use a new one. They realize later that they need to cleanup the various 25 | objects. 26 | 27 | 28 | Proposed change 29 | =============== 30 | 31 | With cinder-manage, we will have a --db-only switch under the volume delete 32 | command. The snapshots are deleted in cascade. We will also implement backup 33 | subcommand that acts like the volume subcommand. 34 | 35 | Alternatives 36 | ------------ 37 | 38 | The only known workaround is to manually update multiple tables and set the 39 | status to 'deleted' to various objects. 40 | 41 | Data model impact 42 | ----------------- 43 | 44 | None 45 | 46 | REST API impact 47 | --------------- 48 | 49 | None 50 | 51 | Security impact 52 | --------------- 53 | 54 | None 55 | 56 | Active/Active HA impact 57 | ----------------------- 58 | 59 | None 60 | 61 | Notifications impact 62 | -------------------- 63 | 64 | None 65 | 66 | Other end user impact 67 | --------------------- 68 | 69 | These commands will be available to operators: 70 | 71 | .. code-block:: console 72 | 73 | cinder-manage volume delete [--db-only] 74 | cinder-manage backup delete [--db-only] 75 | 76 | Performance Impact 77 | ------------------ 78 | 79 | None 80 | 81 | Other deployer impact 82 | --------------------- 83 | 84 | None 85 | 86 | Developer impact 87 | ---------------- 88 | 89 | None 90 | 91 | Implementation 92 | ============== 93 | 94 | Assignee(s) 95 | ----------- 96 | 97 | Primary assignee: David Vallee Delisle 98 | 99 | Work Items 100 | ---------- 101 | 102 | The following changes will be done under cinder-manage command: 103 | * Add a --db-only switch to the volume delete command. 104 | * Add a backup delete subcommand with --db-only support. 105 | * The snapshots are deleted in cascade when a volume is deleted. 106 | 107 | The following changes will be done in the rpcapis: 108 | * Add a db_only argument in the delete function 109 | 110 | The following changes will be done in the manages: 111 | * Add a db_only argument in the delete function 112 | 113 | Dependencies 114 | ============ 115 | 116 | None 117 | 118 | Testing 119 | ======= 120 | 121 | Create tests to validate the delete volume and the delete backup are really 122 | deleting. 123 | 124 | Documentation Impact 125 | ==================== 126 | 127 | Man page for cinder-manage and any associated documentation will be updated. 128 | 129 | References 130 | ========== 131 | 132 | None 133 | -------------------------------------------------------------------------------- /specs/stein/driver-reinitialization-after-fail.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | ================================================== 8 | Driver reinitialization after failure 9 | ================================================== 10 | 11 | https://blueprints.launchpad.net/cinder/+spec/driver-initialization-after-fail 12 | 13 | This spec proposes support for reintialization of volume drivers after it fails 14 | during startup. 15 | 16 | Problem description 17 | =================== 18 | 19 | During Cinder initialization, for many reasons, the storage backend might not 20 | be ready and responding. In this case, the driver will not be loaded even if 21 | the array becomes available right after. 22 | 23 | As there is no retry in Cinder volume service, even later the backend storage 24 | is ready, Cinder volume service can't recover by itself. It needs users 25 | to restart the volume service manually. 26 | 27 | Use Cases 28 | ========= 29 | 30 | When a Cinder volume service starts, sometimes its corresponding storage 31 | services are not ready. But later the storage services become ready. As a 32 | result the volume service can't work properly and can't recover by itself. 33 | But the administrators probably prefer Cinder to automatically recover from 34 | the temporary failures without manual intervention of restarting the service. 35 | 36 | Proposed change 37 | =============== 38 | 39 | The proposal is to 40 | 41 | - Allow reinitialization of a volume driver when it failed to initialize. 42 | 43 | - Provide a configuration to set the maximum retry numbers. 44 | 45 | - The interval of retry will exponentially backoff. Every interval is the 46 | exponentiation of retry count. The first interval is 1s, second interval 47 | is 2s, third interval is 4s, and so on. 48 | 49 | - Retry will be handled in init_host. 50 | 51 | For this, the following additional config option would be needed: 52 | 53 | - 'reinit_driver_count' (default: 3) 54 | Set the maximum times to reintialize the driver if volume initialization fails. 55 | Default number is 3. 56 | 57 | Alternatives 58 | ------------ 59 | 60 | - We also can differentiate whether it should retry with an exception. Like 61 | import error, config error, it may not retry. But the benefit is not 62 | very impressive, and implementing the differentiation needs work in every 63 | driver. As drivers don't differentiate such errors from backend storage 64 | errors. 65 | 66 | Data model impact 67 | ----------------- 68 | 69 | None 70 | 71 | REST API impact 72 | --------------- 73 | 74 | None. 75 | 76 | Cinder-client impact 77 | -------------------- 78 | 79 | None. 80 | 81 | Security impact 82 | --------------- 83 | 84 | None. 85 | 86 | Notifications impact 87 | -------------------- 88 | 89 | None. 90 | 91 | Other end user impact 92 | --------------------- 93 | 94 | Users don't need to restart volume service when the initialization of 95 | drivers fail on recoverable errors. 96 | 97 | Performance Impact 98 | ------------------ 99 | 100 | None. 101 | 102 | Other deployer impact 103 | --------------------- 104 | 105 | None. 106 | 107 | Developer impact 108 | ---------------- 109 | 110 | None 111 | 112 | Implementation 113 | ============== 114 | 115 | Assignee(s) 116 | ----------- 117 | 118 | Primary assignee: 119 | Lisa Li (xiaoyan.li@intel.com) 120 | 121 | Work Items 122 | ---------- 123 | 124 | * Add the option 'reinit_driver_count'. 125 | * Retry to initialize volume drivers when it fails. 126 | * Add related unit test cases. 127 | 128 | Dependencies 129 | ============ 130 | 131 | None 132 | 133 | Testing 134 | ======= 135 | 136 | * Add unit tests to cover this change. 137 | 138 | Documentation Impact 139 | ==================== 140 | 141 | * Add administrator documentation to advertise the option of 'reinit_driver_count' 142 | for driver reinitialization and explain how this should be used. 143 | 144 | References 145 | ========== 146 | 147 | * None 148 | -------------------------------------------------------------------------------- /specs/stein/improve-volume-transfer-records.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | =============================== 8 | Improve volume transfer records 9 | =============================== 10 | https://blueprints.launchpad.net/cinder/+spec/improve-volume-transfer-records 11 | 12 | This blueprint proposes to improve volume transfer records by adding 13 | ``source_project_id`` and ``destination_project_id``, ``accepted`` fields to 14 | ``transfer`` table and related api responses, makes it easier for users to 15 | trace the volume transfer history. 16 | 17 | Problem description 18 | =================== 19 | 20 | Currently, the volume transfer record does not include the destination 21 | project_id after transferring and the source project_id before transferring. 22 | These fields are very useful for admins and operators to trace the transfer 23 | history. 24 | 25 | And also once the transfer is deleted, the user can't determine if this 26 | transfer had been accepted or not before it was deleted. 27 | 28 | It is bad for admins and users to trace volume historical track between 29 | project and audit the volume records. 30 | 31 | Use Cases 32 | =================== 33 | * In order to trace the volume transfer history, the admin wants to know who 34 | was the volume owner before transferring. 35 | * The admin wants to know whether a deleted transfer had been accepted or not. 36 | 37 | Proposed change 38 | =============== 39 | This spec proposes to do 40 | 41 | 1. Add three new fields to ``transfer`` table: 42 | 43 | * ``source_project_id``, this field records the source project_id 44 | before volume transferring. 45 | 46 | * ``destination_project_id``, this field records the destination project_id 47 | after volume transferring. 48 | 49 | * ``accepted``, this field records if this transfer was accepted or not. 50 | 51 | 2. Add a new microverion API to add above fields to the response of follow 52 | API: 53 | 54 | - Create a volume transfer POST /v3/{project_id}/volume-transfers 55 | 56 | - Show volume transfer detail GET /v3/{project_id}/volume-transfers 57 | 58 | - List volume transfer and detail GET 59 | /v3/{project_id}/volume-transfers/detail 60 | 61 | And the response of "List volume transfer (non-detail)" API will not 62 | include these fields. 63 | 64 | Alternatives 65 | ------------ 66 | 67 | The admins could find part of volume transferring info from log, but it's 68 | difficult to find it out easily, especially when the user wants to audit a 69 | very old volume transfer. 70 | 71 | 72 | REST API impact 73 | --------------- 74 | 75 | A new microversion will be created to add these new added fields to transfer 76 | related API responses. 77 | 78 | Data model impact 79 | ----------------- 80 | 81 | None 82 | 83 | Security impact 84 | --------------- 85 | 86 | None 87 | 88 | Notifications impact 89 | -------------------- 90 | 91 | Notifications will be changed to add these new added fields. 92 | 93 | Other end user impact 94 | --------------------- 95 | 96 | None 97 | 98 | Performance Impact 99 | ------------------ 100 | 101 | None 102 | 103 | Other deployer impact 104 | --------------------- 105 | 106 | None 107 | 108 | Developer impact 109 | ---------------- 110 | 111 | None 112 | 113 | Implementation 114 | ============== 115 | 116 | Assignee(s) 117 | ----------- 118 | 119 | Primary assignee: 120 | Yikun Jiang 121 | 122 | Work Items 123 | ---------- 124 | * Add ``source_project_id`` and ``destination_project_id``, ``accepted`` 125 | fields to ``transfer`` table 126 | * Add ``source_project_id`` and ``destination_project_id``, ``accepted`` 127 | fields to related API. 128 | * Implement changes for python-cinderclient to support list transfer with 129 | ``--detail``. 130 | * Update related transfer api doc. 131 | 132 | Dependencies 133 | ============ 134 | 135 | None 136 | 137 | Testing 138 | ======= 139 | 140 | * Unit-tests, tempest and other related test should be implemented 141 | 142 | Documentation Impact 143 | ==================== 144 | 145 | None 146 | 147 | References 148 | ========== 149 | 150 | None 151 | -------------------------------------------------------------------------------- /specs/train/.placeholder: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/cinder-specs/ae9076bdf8651d351e093d5e58b595ed34de4ff9/specs/train/.placeholder -------------------------------------------------------------------------------- /specs/train/untyped-volumes-to-default-volume-type.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | ====================================== 8 | Untyped volumes to default volume type 9 | ====================================== 10 | 11 | https://blueprints.launchpad.net/cinder/+spec/untyped-volumes-default-volume-type 12 | 13 | This blueprint proposes to use a default volume type instead of allowing users 14 | to create untyped volumes. 15 | 16 | Problem description 17 | =================== 18 | 19 | Currently a user is able to create a volume without any volume type, since 20 | a volume's characteristics are defined by a volume type, creating untyped 21 | volumes shouldn't be allowed. 22 | 23 | Use Cases 24 | ========= 25 | 26 | Most users use volume types, and our code is simpler and less buggy if we 27 | just always attach volume types to volumes, so we should just force all 28 | deployments to use volume types. 29 | 30 | Proposed change 31 | =============== 32 | 33 | This spec proposes the following changes : 34 | 35 | * Create a default volume type during cinder database migration or on cinder 36 | services start time. The default volume type will have no extra specs 37 | assigned to it and will be named ``__DEFAULT__`` 38 | * If a volume type named ``__DEFAULT__`` already exists, the deployer 39 | needs to rename or remove it before upgrading 40 | * Add an online migration to convert all untyped volumes and snapshots to 41 | ``__DEFAULT__`` volume type 42 | * Set a default value ``__DEFAULT__`` for ``default_volume_type`` config 43 | option so the default value is picked when it is unset in cinder.conf 44 | * Don't allow deletion of the ``__DEFAULT__`` volume type via type-delete 45 | * Updating of volume type (``__DEFAULT__``) will be handled by MANAGE_POLICY 46 | which defaults to admin-only 47 | 48 | Alternatives 49 | ------------ 50 | 51 | 1. Mandate specifying volume type while creating volumes. 52 | 2. Do this as a behind-the-scenes DB migration rather than relying on manual 53 | intervention with upgrade checkers. 54 | 3. Do this as a best-effort-if-it's-safe operation in the volume manager, 55 | which would migrate most deployments, and skip those that are ruled out for whatever reason. 56 | 57 | REST API impact 58 | --------------- 59 | 60 | None 61 | 62 | Data model impact 63 | ----------------- 64 | 65 | None 66 | 67 | Security impact 68 | --------------- 69 | 70 | None 71 | 72 | Notifications impact 73 | -------------------- 74 | 75 | None 76 | 77 | Other end user impact 78 | --------------------- 79 | 80 | * All volumes created without specifying the volume-type parameter 81 | will be associated with the default volume type. 82 | * Untyped Volumes and Snapshots will be assigned ``__DEFAULT__`` 83 | volume type 84 | * Users will no longer be able to create a volume with None volume type. 85 | 86 | Performance Impact 87 | ------------------ 88 | 89 | None 90 | 91 | Other deployer impact 92 | --------------------- 93 | 94 | None 95 | 96 | Developer impact 97 | ---------------- 98 | 99 | None 100 | 101 | Implementation 102 | ============== 103 | 104 | Assignee(s) 105 | ----------- 106 | 107 | Primary assignee: 108 | Rajat Dhasmana 109 | 110 | Other assignee: 111 | Eric Harney 112 | 113 | Work Items 114 | ---------- 115 | By restricting untyped volumes, we need to do the following changes: 116 | 117 | * Add an upgrade check to verify that current deployment doesn't contain 118 | any volume type named ``__DEFAULT__`` else the upgrade will fail 119 | 120 | * Create ``__DEFAULT__`` volume type at the time of cinder db migration or 121 | service start time 122 | 123 | * Add upgrade check to verify no type named ``__DEFAULT__`` exists before 124 | upgrading 125 | 126 | * Add an online migration to convert all untyped volumes and snapshots to 127 | ``__DEFAULT__`` volume type 128 | 129 | * Related code changes to associate default_volume_type to volumes if no 130 | volume type is specified by user 131 | 132 | Dependencies 133 | ============ 134 | 135 | None 136 | 137 | Testing 138 | ======= 139 | 140 | Unit-tests, tempest and other related tests will be implemented. 141 | 142 | Documentation Impact 143 | ==================== 144 | 145 | Need to update volume type related docs. 146 | 147 | References 148 | ========== 149 | 150 | None 151 | -------------------------------------------------------------------------------- /specs/train/volume-rekey.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | ========================================== 8 | Volume Rekey 9 | ========================================== 10 | 11 | https://blueprints.launchpad.net/cinder/+spec/volume-rekey 12 | 13 | Cinder supports volume encryption with keys stored in Barbican. 14 | 15 | This spec tracks some improvements we can make in how we handle 16 | encryption keys. 17 | 18 | 19 | Problem description 20 | =================== 21 | 22 | When cloning volumes, cinder clones the encryption key as well, 23 | so multiple volumes are unlockable with the same encryption key. 24 | 25 | We can make this encryption scheme more robust by changing the 26 | encryption key upon volume clone, so that cloned volumes are not 27 | unlockable with the key of the source volume. 28 | 29 | This is possible since we encrypt volumes using LUKS. Changing the 30 | key does not require re-encrypting the volume. 31 | 32 | Use Cases 33 | ========= 34 | 35 | Security hardening for volume encryption. 36 | 37 | Proposed change 38 | =============== 39 | 40 | When a volume is cloned, attach it as part of the clone process, 41 | and change the encryption key using LUKS tools. 42 | 43 | The rest of the clone process continues as normal afterward. 44 | 45 | My current implementation does this by calling a new 46 | rekey_volume() method in the create_volume flow, which uses 47 | "cryptsetup luksChangeKey". This should work for any iSCSI/FC 48 | drivers, which already must perform a similar attachment when 49 | creating a volume from an image. 50 | 51 | Some work (planned for after Train) is still needed to make this 52 | work for RBD, because there does not seem to be a qemu-img tool 53 | that can change encryption keys, and cryptsetup requires a local 54 | block device. This leaves us with two options for RBD: 55 | a) use krbd mapping to get a block device 56 | b) use rbd-nbd to get a block device 57 | 58 | NBD is not widely supported in relevant OSes, so krbd looks like 59 | the choice there. 60 | 61 | Alternatives 62 | ------------ 63 | 64 | None 65 | 66 | Data model impact 67 | ----------------- 68 | 69 | None 70 | 71 | 72 | REST API impact 73 | --------------- 74 | 75 | None 76 | 77 | Security impact 78 | --------------- 79 | 80 | * Volume encryption is better hardened against threats due to 81 | compromise of a single encryption key. 82 | 83 | 84 | Active/Active HA impact 85 | ----------------------- 86 | 87 | None 88 | 89 | Notifications impact 90 | -------------------- 91 | 92 | None 93 | 94 | Other end user impact 95 | --------------------- 96 | 97 | None 98 | 99 | Performance Impact 100 | ------------------ 101 | 102 | Slightly more time to clone encrypted volumes. 103 | 104 | Other deployer impact 105 | --------------------- 106 | 107 | None 108 | 109 | Developer impact 110 | ---------------- 111 | 112 | None 113 | 114 | Implementation 115 | ============== 116 | 117 | Assignee(s) 118 | ----------- 119 | 120 | Primary assignee: 121 | eharney 122 | 123 | Work Items 124 | ---------- 125 | 126 | * Implement this for iSCSI/FC drivers 127 | * Test with the LVM driver 128 | 129 | In a later release... 130 | * Implement this for RBD 131 | - Requires some additional effort 132 | * Consider additional cases where this concept would be useful 133 | - Volume transfer 134 | - Backup restoration (?) 135 | 136 | 137 | Dependencies 138 | ============ 139 | 140 | None 141 | 142 | 143 | Testing 144 | ======= 145 | 146 | Will be on by default and therefore tested by tempest tests 147 | that clone encrypted volumes. 148 | 149 | Documentation Impact 150 | ==================== 151 | 152 | None 153 | 154 | References 155 | ========== 156 | 157 | None 158 | -------------------------------------------------------------------------------- /specs/untargeted/generic-backup-implementation.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | ============================= 8 | Generic backup implementation 9 | ============================= 10 | 11 | https://blueprints.launchpad.net/cinder/+spec/generic-backup-implementation 12 | 13 | Generic backup implementation will allow us to use any supported volumes 14 | backend as backup storage. So we won't need to implement specific backup driver 15 | for supported backends. 16 | 17 | 18 | Problem description 19 | =================== 20 | 21 | We've got different backup and volume drivers now. That's why we have to 22 | re-implement the part of volume driver as backup driver if we want to use 23 | the same storage for backups. 24 | 25 | Use Cases 26 | ========= 27 | 28 | 1. Be able to use any cinder-supported storage backend for backups. 29 | 30 | Proposed change 31 | =============== 32 | 33 | Implmement generic backup implementation in a base volume driver like we did 34 | for volume migrations. After this vendors could easily implement 35 | storage-assistant backups for their drivers. 36 | 37 | We will have base backup drivers class for storages like Swift, Google Cloud 38 | Storage, etc which will implement only backup-related features. 39 | 40 | It will allow a volume backed up to storage A to be restored to a volume 41 | on any storage B. 42 | 43 | Cinder should not allow to use the same storage both for volume and backups. 44 | In such case, backup driver initialization should fail. If storage supports 45 | different pools Cinder will allow to create backup on the same storage but in 46 | the different pool. 47 | 48 | We don't need to have 'backup and volumes in the same storage' feature in 49 | Cinder even configurable because we can use snapshots or clone volume for it. 50 | Backups should be in different storage or at least in another pool. 51 | 52 | As a generic implementation, cinder will use the same mechanism as generic 53 | volume migraion: 54 | 55 | * create volume on a destination storage 56 | * attach both source and destination volumes to a cinder node 57 | * use 'dd' tool to copy volume data 58 | * detach both volumes from a cinder node 59 | 60 | In case if volume is 'in-use' we'll create temporary snapshot and do backup 61 | from it. 62 | 63 | We can't use 'clone volume' feature between different storages. 64 | 65 | Vendor-specific changes 66 | ----------------------- 67 | Vendors or drivers maintainers could implement vendor-specific backup 68 | implementation to use storage API for faster backup process. 69 | 70 | Alternatives 71 | ------------ 72 | 73 | Follow the current approach with separate volumes and backups drivers. 74 | 75 | Data model impact 76 | ----------------- 77 | 78 | None 79 | 80 | REST API impact 81 | --------------- 82 | 83 | None 84 | 85 | Security impact 86 | --------------- 87 | 88 | None 89 | 90 | Notifications impact 91 | -------------------- 92 | 93 | None 94 | 95 | Other end user impact 96 | --------------------- 97 | 98 | None 99 | 100 | Performance Impact 101 | ------------------ 102 | 103 | Backups are likely to be faster to block backends than to swift. It means 104 | backup to block storage could be faster than a backup to object storage. 105 | 106 | 107 | Other deployer impact 108 | --------------------- 109 | 110 | Operator should be able to configure backups storage and cinder backup driver. 111 | 112 | Developer impact 113 | ---------------- 114 | 115 | * Volume drivers could implement vendor-specific backup implementation 116 | 117 | 118 | Implementation 119 | ============== 120 | 121 | Assignee(s) 122 | ----------- 123 | 124 | 125 | Primary assignee: 126 | Ivan Kolodyazhny 127 | 128 | Other contributors: 129 | Volume Driver maintainers 130 | 131 | Work Items 132 | ---------- 133 | 134 | TDB 135 | 136 | 137 | Dependencies 138 | ============ 139 | 140 | None 141 | 142 | 143 | Testing 144 | ======= 145 | 146 | * Unit tests 147 | * Tempest tests should be implemented in a new feature group 148 | 149 | 150 | Documentation Impact 151 | ==================== 152 | 153 | Operators documentation should be updated according to spec implementation. 154 | 155 | 156 | References 157 | ========== 158 | 159 | * http://eavesdrop.openstack.org/meetings/cinder/2016/cinder.2016-08-10-15.59.html 160 | -------------------------------------------------------------------------------- /specs/ussuri/.placeholder: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/cinder-specs/ae9076bdf8651d351e093d5e58b595ed34de4ff9/specs/ussuri/.placeholder -------------------------------------------------------------------------------- /specs/ussuri/add_backup_id_to_volume.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | ================================== 8 | Add backup id to volume's metadata 9 | ================================== 10 | 11 | https://blueprints.launchpad.net/cinder/+spec/add-volume-backup-id 12 | 13 | Problem description 14 | =================== 15 | Currently, the end user can see the source (source by snapshot or image etc.) 16 | from the new volume created. But when the end user has restored a backup 17 | volume, in the volume show response, we cannot see its source. This can 18 | cause a lot of confusion for end users. 19 | 20 | Use Cases 21 | ========= 22 | As an end user, I would like to know the restored volume comes from which 23 | backup resource. 24 | 25 | Proposed change 26 | =============== 27 | * Add the property ``src_backup_id`` to the volume's metadata, 28 | to record from which backup the new volume was created from. 29 | When restoring from a chain of incremental backups, ``src_backup_id`` 30 | is set to the last incremental backup used for the restore. 31 | 32 | Once added to the volume metadata, the ``src_backup_id`` will appear on 33 | any API response that displays volume metadata: 34 | 35 | * the volume-show response (GET /v3/{project_id}/volumes/{volume_id}) 36 | * the volume-list-detail response (GET /v3/{project_id}/volumes/detail) 37 | * the volume-metadata-show response 38 | (GET /v3/{project_id}/volumes/{volume_id}/metadata) 39 | * the volume-metadata-show-key response, only if the key is ``src_backup_id`` 40 | (GET /v3/{project_id}/volumes/{volume_id}/metadata/{key}) 41 | 42 | Vendor-specific changes 43 | ----------------------- 44 | None 45 | 46 | Alternatives 47 | ------------ 48 | None 49 | 50 | Data model impact 51 | ----------------- 52 | None 53 | 54 | REST API impact 55 | --------------- 56 | None. 57 | The volume-show, volume-list-detail, and volume-metadata-show 58 | responses are currently defined to contain a ``metadata`` element that 59 | is either JSON null or a JSON object consisting of a list of key/value pairs. 60 | The ``src_backup_id`` will appear in this list for appropriate volumes, 61 | but this respects the current API and does not require a new microversion. 62 | 63 | Security impact 64 | --------------- 65 | None 66 | 67 | Notifications impact 68 | -------------------- 69 | None 70 | 71 | Other end user impact 72 | --------------------- 73 | None 74 | 75 | Performance Impact 76 | ------------------ 77 | None 78 | 79 | Other deployer impact 80 | --------------------- 81 | None 82 | 83 | Developer impact 84 | ---------------- 85 | None 86 | 87 | Implementation 88 | ============== 89 | Assignee(s) 90 | ----------- 91 | Xuan Yandong 92 | 93 | Work Items 94 | ---------- 95 | * Add ``src_backup_id`` to the ``volume`` metadata 96 | 97 | Dependencies 98 | ============ 99 | None 100 | 101 | Testing 102 | ======= 103 | * Add related unit tests 104 | * Add related functional test 105 | * Add tempest tests 106 | 107 | Documentation Impact 108 | ==================== 109 | Release note should point out that since this is stored in the volume 110 | metadata, it can be modified or removed by end users, so operators should 111 | not rely upon it being present for administrative or auditing purposes. 112 | 113 | Add a similar note somewhere in the admin docs, probably a page about 114 | volume metadata written by Cinder (which may not currently exist). 115 | In addition to reminding admins that end users can overwrite volume 116 | metadata, should explain how to read the ``src_backup_id`` (particularly 117 | the part about what id is used when restoring from incremental backups). 118 | 119 | References 120 | ========== 121 | None 122 | -------------------------------------------------------------------------------- /specs/victoria/.placeholder: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/cinder-specs/ae9076bdf8651d351e093d5e58b595ed34de4ff9/specs/victoria/.placeholder -------------------------------------------------------------------------------- /specs/xena/reset-state-robustification.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | =========================== 8 | Reset State Robustification 9 | =========================== 10 | 11 | https://blueprints.launchpad.net/cinder/+spec/reset-state-robustification 12 | 13 | Problem description 14 | =================== 15 | 16 | I have seen a number of users get volumes into "invalid" states by 17 | not understanding how resetting the state of a volume interacts 18 | with resetting the state of attachments. 19 | 20 | For example, on a volume with an attachment, run 21 | "cinder reset-state --state available " 22 | 23 | What now? 24 | 25 | Use Cases 26 | ========= 27 | 28 | Help prevent users who aren't Cinder experts from shooting themselves 29 | in the foot with reset-state. 30 | 31 | Proposed change 32 | =============== 33 | 34 | If a user requests to reset the state of a volume to something that 35 | Cinder knows is not a valid state of the universe, reject the request 36 | with a reason. 37 | 38 | If volume1 has an attachment active. 39 | $ cinder reset-state --state available volume1 40 | ERROR: Cannot reset-state to available because attachments still exist. 41 | 42 | $ cinder reset-state --state available volume1 --attach-status detached 43 | (command succeeds) 44 | 45 | Cases to block: 46 | 1. volume reset to available w/ attachments 47 | 2. snapshot reset to in-use with volume in available 48 | 49 | 50 | Sometimes a knowledgeable operator may need to reset the state anyway 51 | and then manually make the current state valid. cinder-manage is the 52 | place where forcing a change will be allowed instead of '--force' 53 | flag in the API. 54 | 55 | Alternatives 56 | ------------ 57 | 58 | - Hope users don't do these things. 59 | - Handle the "reset to available while attached case" by forcefully 60 | detaching the volumes instead of rejecting the request. 61 | 62 | Data model impact 63 | ----------------- 64 | 65 | None 66 | 67 | REST API impact 68 | --------------- 69 | 70 | os-reset-status actions on volumes, snaps, backups, groups, 71 | will now return a 400 in some cases where they would previously 72 | succeeded. This does not require a microversion bump. 73 | 74 | Security impact 75 | --------------- 76 | 77 | None 78 | 79 | Active/Active HA impact 80 | ----------------------- 81 | 82 | These checks in reset-state could concievably race against updates in 83 | a cluster. Will determine what that means when we get further into 84 | implementation. 85 | 86 | 87 | Notifications impact 88 | -------------------- 89 | 90 | None 91 | 92 | Other end user impact 93 | --------------------- 94 | 95 | None 96 | 97 | Performance Impact 98 | ------------------ 99 | 100 | None 101 | 102 | Other deployer impact 103 | --------------------- 104 | 105 | Improves safety for deployers trying to clean up from issues in 106 | their cloud. 107 | 108 | Developer impact 109 | ---------------- 110 | 111 | None 112 | 113 | 114 | Implementation 115 | ============== 116 | 117 | Assignee(s) 118 | ----------- 119 | 120 | Primary assignee: 121 | TusharTgite 122 | 123 | Other contributors: 124 | eharney 125 | 126 | Work Items 127 | ---------- 128 | 129 | * Implement a check for the 130 | reset-state to available while attached case 131 | * Add code to cinder-manage to handle the case 132 | where an operator needs to override the API 133 | and reset the state anyway. 134 | * Research other sensible cases we could prevent for 135 | volumes, snaps, groups, etc. 136 | * Tempest test for a couple of the big cases 137 | 138 | 139 | Dependencies 140 | ============ 141 | 142 | None 143 | 144 | Testing 145 | ======= 146 | 147 | * New tempest tests 148 | 149 | 150 | Documentation Impact 151 | ==================== 152 | 153 | * Should document common cases where this fails. 154 | 155 | References 156 | ========== 157 | 158 | * Original proposal: https://review.opendev.org/c/openstack/cinder-specs/+/682456 159 | -------------------------------------------------------------------------------- /specs/xena/snapshot-attached-volumes.rst: -------------------------------------------------------------------------------- 1 | .. 2 | This work is licensed under a Creative Commons Attribution 3.0 Unported 3 | License. 4 | 5 | http://creativecommons.org/licenses/by/3.0/legalcode 6 | 7 | ========================================== 8 | Snapshotting attached volumes w/o force 9 | ========================================== 10 | 11 | Include the URL of your launchpad blueprint: 12 | 13 | https://blueprints.launchpad.net/cinder/+spec/fix-snapshot-create-force 14 | 15 | Cinder requires passing the "force" flag to a snapshot create 16 | call to create a snapshot from a volume while it is attached 17 | to an instance. This is unnecessary, as snapshotting attached 18 | volumes results in crash-consistent snapshots, which are useful, 19 | sufficient, and one of the most common cases of how snapshots 20 | are used in the real world. 21 | 22 | Problem description 23 | =================== 24 | 25 | Most users and other software that create Cinder snapshots actually 26 | want crash-consistent snapshots of attached volumes, so making this 27 | an exception case is not productive. Code is written to always 28 | use "force", and users learn that it is needed to create snapshots. 29 | 30 | In most virtualization platforms for many years, creating 31 | crash-consistent snapshots of online disks is not an exception case, 32 | it is a normal operation. It should be in Cinder too. 33 | 34 | 35 | Use Cases 36 | ========= 37 | 38 | * easier for end users 39 | * less surprising snapshot API for developers 40 | 41 | Proposed change 42 | =============== 43 | 44 | Introduce a new microversion that no longer uses a "force" flag 45 | to control when snapshots can be created for a volume. 46 | 47 | This means that snapshot creation succeeds for volumes that are in the 48 | "available" or "in-use" state. The "force" parameter is no longer needed, 49 | but "force=true" is accepted to reduce code changes required for users who are 50 | currently passing this flag from their code. 51 | 52 | Snapshot creation with "force=false" will be rejected as invalid after 53 | this new microversion. 54 | 55 | Alternatives 56 | ------------ 57 | 58 | None 59 | 60 | Data model impact 61 | ----------------- 62 | 63 | None 64 | 65 | REST API impact 66 | --------------- 67 | 68 | * Introduce a new microversion for this change 69 | * Snapshot creation will succeed for in-use volumes w/o force flag added. 70 | * Passing force=True will succeed for in-use volumes as it does currently, 71 | but this parameter is no longer needed for this case. 72 | * Passing force=False for snapshot creation is not allowed after the new 73 | microversion. This is presumably rarely used and removing it reduces 74 | ambiguity about what "force=False" would mean when in-use volumes can 75 | be snapshotted by default. 76 | 77 | Security impact 78 | --------------- 79 | 80 | None 81 | 82 | Active/Active HA impact 83 | ----------------------- 84 | 85 | None 86 | 87 | Notifications impact 88 | -------------------- 89 | 90 | None 91 | 92 | Other end user impact 93 | --------------------- 94 | 95 | Minimal cinderclient changes 96 | 97 | Performance Impact 98 | ------------------ 99 | 100 | * Fewer snapshot create calls that return HTTP 400 resulting in the user 101 | issuing a second snapshot create call w/ "force" added 102 | 103 | Other deployer impact 104 | --------------------- 105 | 106 | Developer impact 107 | ---------------- 108 | 109 | Implementation 110 | ============== 111 | 112 | Assignee(s) 113 | ----------- 114 | 115 | Primary assignee: 116 | eharney 117 | 118 | Work Items 119 | ---------- 120 | 121 | * Fix snapshot-create 122 | * New API microversion 123 | * Tempest Tests 124 | 125 | * Look at backup-create (similar problems) 126 | 127 | Dependencies 128 | ============ 129 | 130 | Testing 131 | ======= 132 | 133 | * New tempest tests in cinder-tempest-plugin 134 | 135 | Documentation Impact 136 | ==================== 137 | 138 | * Minimal 139 | 140 | 141 | References 142 | ========== 143 | 144 | -------------------------------------------------------------------------------- /test-requirements.txt: -------------------------------------------------------------------------------- 1 | # The order of packages is significant, because pip processes them in the order 2 | # of appearance. Changing the order has an impact on the overall integration 3 | # process, which may cause wedges in the gate later. 4 | 5 | openstackdocstheme>=2.2.1 # Apache-2.0 6 | sphinx>=2.0.0,!=2.1.0 # BSD 7 | flake8 8 | yasfb>=0.5.1 9 | doc8>=0.8.1 # Apache-2.0 10 | whereto>=0.3.0 # Apache-2.0 11 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | minversion = 3.18.0 3 | envlist = docs,pep8 4 | skipsdist = True 5 | ignore_basepython_conflict = True 6 | 7 | [testenv] 8 | basepython = python3 9 | usedevelop = True 10 | setenv = 11 | VIRTUAL_ENV={envdir} 12 | deps = -r{toxinidir}/test-requirements.txt 13 | 14 | [testenv:docs] 15 | allowlist_externals = rm 16 | commands = 17 | rm -fr doc/build 18 | sphinx-build -W -b html doc/source doc/build/html 19 | doc8 --ignore D001 doc/source 20 | whereto doc/source/_extra/.htaccess doc/test/redirect-tests.txt 21 | 22 | [testenv:pep8] 23 | commands = 24 | flake8 25 | doc8 --ignore D001 specs/ 26 | 27 | [testenv:venv] 28 | commands = {posargs} 29 | 30 | [flake8] 31 | # E123, E125 skipped as they are invalid PEP-8. 32 | 33 | show-source = True 34 | ignore = E123,E125 35 | exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build 36 | --------------------------------------------------------------------------------