├── LICENSE ├── README ├── docs └── source │ ├── conf.py │ └── index.rst ├── libzfs_core ├── __init__.py ├── _constants.py ├── _error_translation.py ├── _libzfs_core.py ├── _nvlist.py ├── bindings │ ├── __init__.py │ ├── libnvpair.py │ └── libzfs_core.py ├── ctypes.py ├── exceptions.py └── test │ ├── __init__.py │ ├── test_libzfs_core.py │ └── test_nvlist.py └── setup.py /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2015 ClusterHQ 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README: -------------------------------------------------------------------------------- 1 | This package provides a wrapper for libzfs_core C library. 2 | 3 | libzfs_core is intended to be a stable interface for programmatic 4 | administration of ZFS. 5 | This wrapper provides one-to-one wrappers for libzfs_core API functions, 6 | but the signatures and types are more natural to Python. 7 | nvlists are wrapped as dictionaries or lists depending on their usage. 8 | Some parameters have default values depending on typical use for 9 | increased convenience. 10 | Enumerations and bit flags become strings and lists of strings in Python. 11 | Errors are reported as exceptions rather than integer errno-style 12 | error codes. The wrapper takes care to provide one-to-many mapping 13 | of the error codes to the exceptions by interpreting a context 14 | in which the error code is produced. 15 | 16 | Unit tests and automated test for the libzfs_core API are provided 17 | with this package. 18 | Please note that the API tests perform lots of ZFS dataset level 19 | operations and ZFS tries hard to ensure that any modifications 20 | do reach stable storage. That means that the operations are done 21 | synchronously and that, for example, disk caches are flushed. 22 | Thus, the tests can be very slow on real hardware. 23 | It is recommended to place the default temporary directory or 24 | a temporary directory specified by, for instance, TMP environment 25 | variable on a memory backed filesystem. 26 | 27 | Package documentation: http://pyzfs.readthedocs.org 28 | Package development: https://github.com/ClusterHQ/pyzfs 29 | -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # pyzfs documentation build configuration file, created by 4 | # sphinx-quickstart on Mon Apr 6 23:48:40 2015. 5 | # 6 | # This file is execfile()d with the current directory set to its 7 | # containing dir. 8 | # 9 | # Note that not all possible configuration values are present in this 10 | # autogenerated file. 11 | # 12 | # All configuration values have a default; values that are commented out 13 | # serve to show the default. 14 | 15 | import sys 16 | import os 17 | import shlex 18 | 19 | # If extensions (or modules to document with autodoc) are in another directory, 20 | # add these directories to sys.path here. If the directory is relative to the 21 | # documentation root, use os.path.abspath to make it absolute, like shown here. 22 | sys.path.insert(0, os.path.abspath('../..')) 23 | 24 | # -- General configuration ------------------------------------------------ 25 | 26 | # If your documentation needs a minimal Sphinx version, state it here. 27 | #needs_sphinx = '1.0' 28 | 29 | # Add any Sphinx extension module names here, as strings. They can be 30 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 31 | # ones. 32 | extensions = [ 33 | 'sphinx.ext.autodoc', 34 | ] 35 | 36 | # Add any paths that contain templates here, relative to this directory. 37 | templates_path = ['_templates'] 38 | 39 | # The suffix(es) of source filenames. 40 | # You can specify multiple suffix as a list of string: 41 | # source_suffix = ['.rst', '.md'] 42 | source_suffix = '.rst' 43 | 44 | # The encoding of source files. 45 | #source_encoding = 'utf-8-sig' 46 | 47 | # The master toctree document. 48 | master_doc = 'index' 49 | 50 | # General information about the project. 51 | project = u'pyzfs' 52 | copyright = u'2015, ClusterHQ' 53 | author = u'ClusterHQ' 54 | 55 | # The version info for the project you're documenting, acts as replacement for 56 | # |version| and |release|, also used in various other places throughout the 57 | # built documents. 58 | # 59 | # The short X.Y version. 60 | version = '0.2.3' 61 | # The full version, including alpha/beta/rc tags. 62 | release = '0.2.3' 63 | 64 | # The language for content autogenerated by Sphinx. Refer to documentation 65 | # for a list of supported languages. 66 | # 67 | # This is also used if you do content translation via gettext catalogs. 68 | # Usually you set "language" from the command line for these cases. 69 | language = None 70 | 71 | # There are two options for replacing |today|: either, you set today to some 72 | # non-false value, then it is used: 73 | #today = '' 74 | # Else, today_fmt is used as the format for a strftime call. 75 | #today_fmt = '%B %d, %Y' 76 | 77 | # List of patterns, relative to source directory, that match files and 78 | # directories to ignore when looking for source files. 79 | exclude_patterns = [] 80 | 81 | # The reST default role (used for this markup: `text`) to use for all 82 | # documents. 83 | #default_role = None 84 | 85 | # If true, '()' will be appended to :func: etc. cross-reference text. 86 | #add_function_parentheses = True 87 | 88 | # If true, the current module name will be prepended to all description 89 | # unit titles (such as .. function::). 90 | #add_module_names = True 91 | 92 | # If true, sectionauthor and moduleauthor directives will be shown in the 93 | # output. They are ignored by default. 94 | #show_authors = False 95 | 96 | # The name of the Pygments (syntax highlighting) style to use. 97 | pygments_style = 'sphinx' 98 | 99 | # A list of ignored prefixes for module index sorting. 100 | #modindex_common_prefix = [] 101 | 102 | # If true, keep warnings as "system message" paragraphs in the built documents. 103 | #keep_warnings = False 104 | 105 | # If true, `todo` and `todoList` produce output, else they produce nothing. 106 | todo_include_todos = False 107 | 108 | 109 | # -- Options for HTML output ---------------------------------------------- 110 | 111 | # The theme to use for HTML and HTML Help pages. See the documentation for 112 | # a list of builtin themes. 113 | html_theme = 'classic' 114 | 115 | # Theme options are theme-specific and customize the look and feel of a theme 116 | # further. For a list of options available for each theme, see the 117 | # documentation. 118 | #html_theme_options = {} 119 | 120 | # Add any paths that contain custom themes here, relative to this directory. 121 | #html_theme_path = [] 122 | 123 | # The name for this set of Sphinx documents. If None, it defaults to 124 | # " v documentation". 125 | #html_title = None 126 | 127 | # A shorter title for the navigation bar. Default is the same as html_title. 128 | #html_short_title = None 129 | 130 | # The name of an image file (relative to this directory) to place at the top 131 | # of the sidebar. 132 | #html_logo = None 133 | 134 | # The name of an image file (within the static path) to use as favicon of the 135 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 136 | # pixels large. 137 | #html_favicon = None 138 | 139 | # Add any paths that contain custom static files (such as style sheets) here, 140 | # relative to this directory. They are copied after the builtin static files, 141 | # so a file named "default.css" will overwrite the builtin "default.css". 142 | html_static_path = ['_static'] 143 | 144 | # Add any extra paths that contain custom files (such as robots.txt or 145 | # .htaccess) here, relative to this directory. These files are copied 146 | # directly to the root of the documentation. 147 | #html_extra_path = [] 148 | 149 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 150 | # using the given strftime format. 151 | #html_last_updated_fmt = '%b %d, %Y' 152 | 153 | # If true, SmartyPants will be used to convert quotes and dashes to 154 | # typographically correct entities. 155 | #html_use_smartypants = True 156 | 157 | # Custom sidebar templates, maps document names to template names. 158 | #html_sidebars = {} 159 | 160 | # Additional templates that should be rendered to pages, maps page names to 161 | # template names. 162 | #html_additional_pages = {} 163 | 164 | # If false, no module index is generated. 165 | #html_domain_indices = True 166 | 167 | # If false, no index is generated. 168 | #html_use_index = True 169 | 170 | # If true, the index is split into individual pages for each letter. 171 | #html_split_index = False 172 | 173 | # If true, links to the reST sources are added to the pages. 174 | #html_show_sourcelink = True 175 | 176 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 177 | #html_show_sphinx = True 178 | 179 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 180 | #html_show_copyright = True 181 | 182 | # If true, an OpenSearch description file will be output, and all pages will 183 | # contain a tag referring to it. The value of this option must be the 184 | # base URL from which the finished HTML is served. 185 | #html_use_opensearch = '' 186 | 187 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 188 | #html_file_suffix = None 189 | 190 | # Language to be used for generating the HTML full-text search index. 191 | # Sphinx supports the following languages: 192 | # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' 193 | # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' 194 | #html_search_language = 'en' 195 | 196 | # A dictionary with options for the search language support, empty by default. 197 | # Now only 'ja' uses this config value 198 | #html_search_options = {'type': 'default'} 199 | 200 | # The name of a javascript file (relative to the configuration directory) that 201 | # implements a search results scorer. If empty, the default will be used. 202 | #html_search_scorer = 'scorer.js' 203 | 204 | # Output file base name for HTML help builder. 205 | htmlhelp_basename = 'pyzfsdoc' 206 | 207 | # -- Options for LaTeX output --------------------------------------------- 208 | 209 | latex_elements = { 210 | # The paper size ('letterpaper' or 'a4paper'). 211 | #'papersize': 'letterpaper', 212 | 213 | # The font size ('10pt', '11pt' or '12pt'). 214 | #'pointsize': '10pt', 215 | 216 | # Additional stuff for the LaTeX preamble. 217 | #'preamble': '', 218 | 219 | # Latex figure (float) alignment 220 | #'figure_align': 'htbp', 221 | } 222 | 223 | # Grouping the document tree into LaTeX files. List of tuples 224 | # (source start file, target name, title, 225 | # author, documentclass [howto, manual, or own class]). 226 | latex_documents = [ 227 | (master_doc, 'pyzfs.tex', u'pyzfs Documentation', 228 | u'ClusterHQ', 'manual'), 229 | ] 230 | 231 | # The name of an image file (relative to this directory) to place at the top of 232 | # the title page. 233 | #latex_logo = None 234 | 235 | # For "manual" documents, if this is true, then toplevel headings are parts, 236 | # not chapters. 237 | #latex_use_parts = False 238 | 239 | # If true, show page references after internal links. 240 | #latex_show_pagerefs = False 241 | 242 | # If true, show URL addresses after external links. 243 | #latex_show_urls = False 244 | 245 | # Documents to append as an appendix to all manuals. 246 | #latex_appendices = [] 247 | 248 | # If false, no module index is generated. 249 | #latex_domain_indices = True 250 | 251 | 252 | # -- Options for manual page output --------------------------------------- 253 | 254 | # One entry per manual page. List of tuples 255 | # (source start file, name, description, authors, manual section). 256 | man_pages = [ 257 | (master_doc, 'pyzfs', u'pyzfs Documentation', 258 | [author], 1) 259 | ] 260 | 261 | # If true, show URL addresses after external links. 262 | #man_show_urls = False 263 | 264 | 265 | # -- Options for Texinfo output ------------------------------------------- 266 | 267 | # Grouping the document tree into Texinfo files. List of tuples 268 | # (source start file, target name, title, author, 269 | # dir menu entry, description, category) 270 | texinfo_documents = [ 271 | (master_doc, 'pyzfs', u'pyzfs Documentation', 272 | author, 'pyzfs', 'One line description of project.', 273 | 'Miscellaneous'), 274 | ] 275 | 276 | # Documents to append as an appendix to all manuals. 277 | #texinfo_appendices = [] 278 | 279 | # If false, no module index is generated. 280 | #texinfo_domain_indices = True 281 | 282 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 283 | #texinfo_show_urls = 'footnote' 284 | 285 | # If true, do not generate a @detailmenu in the "Top" node's menu. 286 | #texinfo_no_detailmenu = False 287 | 288 | # Sort documentation in the same order as the source files. 289 | autodoc_member_order = 'bysource' 290 | 291 | 292 | ####################### 293 | # Neutralize effects of function wrapping on documented signatures. 294 | # The affected signatures could be explcitly placed into the 295 | # documentation (either in .rst files or as a first line of a 296 | # docstring). 297 | import functools 298 | 299 | def no_op_wraps(func): 300 | def wrapper(decorator): 301 | return func 302 | return wrapper 303 | 304 | functools.wraps = no_op_wraps 305 | -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | .. pyzfs documentation master file, created by 2 | sphinx-quickstart on Mon Apr 6 23:48:40 2015. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to pyzfs's documentation! 7 | ================================= 8 | 9 | Contents: 10 | 11 | .. toctree:: 12 | :maxdepth: 2 13 | 14 | 15 | 16 | Indices and tables 17 | ================== 18 | 19 | * :ref:`genindex` 20 | * :ref:`modindex` 21 | * :ref:`search` 22 | 23 | Documentation for the libzfs_core 24 | ********************************* 25 | 26 | .. automodule:: libzfs_core 27 | :members: 28 | :exclude-members: lzc_snap, lzc_recv, lzc_destroy_one, 29 | lzc_inherit, lzc_set_props, lzc_list 30 | 31 | Documentation for the libzfs_core exceptions 32 | ******************************************** 33 | 34 | .. automodule:: libzfs_core.exceptions 35 | :members: 36 | :undoc-members: 37 | 38 | Documentation for the miscellaneous types that correspond to specific width C types 39 | *********************************************************************************** 40 | 41 | .. automodule:: libzfs_core.ctypes 42 | :members: 43 | :undoc-members: 44 | 45 | -------------------------------------------------------------------------------- /libzfs_core/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015 ClusterHQ. See LICENSE file for details. 2 | ''' 3 | Python wrappers for **libzfs_core** library. 4 | 5 | *libzfs_core* is intended to be a stable, committed interface for programmatic 6 | administration of ZFS. 7 | This wrapper provides one-to-one wrappers for libzfs_core API functions, 8 | but the signatures and types are more natural to Python. 9 | nvlists are wrapped as dictionaries or lists depending on their usage. 10 | Some parameters have default values depending on typical use for 11 | increased convenience. 12 | Output parameters are not used and return values are directly returned. 13 | Enumerations and bit flags become strings and lists of strings in Python. 14 | Errors are reported as exceptions rather than integer errno-style 15 | error codes. The wrapper takes care to provide one-to-many mapping 16 | of the error codes to the exceptions by interpreting a context 17 | in which the error code is produced. 18 | 19 | To submit an issue or contribute to development of this package 20 | please visit its `GitHub repository `_. 21 | 22 | .. data:: MAXNAMELEN 23 | 24 | Maximum length of any ZFS name. 25 | ''' 26 | 27 | from ._constants import ( 28 | MAXNAMELEN, 29 | ) 30 | 31 | from ._libzfs_core import ( 32 | lzc_create, 33 | lzc_clone, 34 | lzc_rollback, 35 | lzc_snapshot, 36 | lzc_snap, 37 | lzc_destroy_snaps, 38 | lzc_bookmark, 39 | lzc_get_bookmarks, 40 | lzc_destroy_bookmarks, 41 | lzc_snaprange_space, 42 | lzc_hold, 43 | lzc_release, 44 | lzc_get_holds, 45 | lzc_send, 46 | lzc_send_space, 47 | lzc_receive, 48 | lzc_recv, 49 | lzc_exists, 50 | is_supported, 51 | lzc_promote, 52 | lzc_rename, 53 | lzc_destroy, 54 | lzc_inherit_prop, 55 | lzc_set_prop, 56 | lzc_get_props, 57 | lzc_list_children, 58 | lzc_list_snaps, 59 | ) 60 | 61 | __all__ = [ 62 | 'ctypes', 63 | 'exceptions', 64 | 'MAXNAMELEN', 65 | 'lzc_create', 66 | 'lzc_clone', 67 | 'lzc_rollback', 68 | 'lzc_snapshot', 69 | 'lzc_snap', 70 | 'lzc_destroy_snaps', 71 | 'lzc_bookmark', 72 | 'lzc_get_bookmarks', 73 | 'lzc_destroy_bookmarks', 74 | 'lzc_snaprange_space', 75 | 'lzc_hold', 76 | 'lzc_release', 77 | 'lzc_get_holds', 78 | 'lzc_send', 79 | 'lzc_send_space', 80 | 'lzc_receive', 81 | 'lzc_recv', 82 | 'lzc_exists', 83 | 'is_supported', 84 | 'lzc_promote', 85 | 'lzc_rename', 86 | 'lzc_destroy', 87 | 'lzc_inherit_prop', 88 | 'lzc_set_prop', 89 | 'lzc_get_props', 90 | 'lzc_list_children', 91 | 'lzc_list_snaps', 92 | ] 93 | 94 | # vim: softtabstop=4 tabstop=4 expandtab shiftwidth=4 95 | -------------------------------------------------------------------------------- /libzfs_core/_constants.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015 ClusterHQ. See LICENSE file for details. 2 | 3 | """ 4 | Important `libzfs_core` constants. 5 | """ 6 | 7 | #: Maximum length of any ZFS name. 8 | MAXNAMELEN = 255 9 | 10 | # vim: softtabstop=4 tabstop=4 expandtab shiftwidth=4 11 | -------------------------------------------------------------------------------- /libzfs_core/_error_translation.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015 ClusterHQ. See LICENSE file for details. 2 | 3 | """ 4 | Helper routines for converting ``errno`` style error codes from C functions 5 | to Python exceptions defined by `libzfs_core` API. 6 | 7 | The conversion heavily depends on the context of the error: the attempted 8 | operation and the input parameters. For this reason, there is a conversion 9 | routine for each `libzfs_core` interface function. The conversion routines 10 | have the return code as a parameter as well as all the parameters of the 11 | corresponding interface functions. 12 | 13 | The parameters and exceptions are documented in the `libzfs_core` interfaces. 14 | """ 15 | 16 | import errno 17 | import re 18 | import string 19 | from . import exceptions as lzc_exc 20 | from ._constants import MAXNAMELEN 21 | 22 | 23 | def lzc_create_translate_error(ret, name, ds_type, props): 24 | if ret == 0: 25 | return 26 | if ret == errno.EINVAL: 27 | _validate_fs_name(name) 28 | raise lzc_exc.PropertyInvalid(name) 29 | 30 | if ret == errno.EEXIST: 31 | raise lzc_exc.FilesystemExists(name) 32 | if ret == errno.ENOENT: 33 | raise lzc_exc.ParentNotFound(name) 34 | raise _generic_exception(ret, name, "Failed to create filesystem") 35 | 36 | 37 | def lzc_clone_translate_error(ret, name, origin, props): 38 | if ret == 0: 39 | return 40 | if ret == errno.EINVAL: 41 | _validate_fs_name(name) 42 | _validate_snap_name(origin) 43 | if _pool_name(name) != _pool_name(origin): 44 | raise lzc_exc.PoolsDiffer(name) # see https://www.illumos.org/issues/5824 45 | else: 46 | raise lzc_exc.PropertyInvalid(name) 47 | 48 | if ret == errno.EEXIST: 49 | raise lzc_exc.FilesystemExists(name) 50 | if ret == errno.ENOENT: 51 | if not _is_valid_snap_name(origin): 52 | raise lzc_exc.SnapshotNameInvalid(origin) 53 | raise lzc_exc.DatasetNotFound(name) 54 | raise _generic_exception(ret, name, "Failed to create clone") 55 | 56 | 57 | def lzc_rollback_translate_error(ret, name): 58 | if ret == 0: 59 | return 60 | if ret == errno.EINVAL: 61 | _validate_fs_name(name) 62 | raise lzc_exc.SnapshotNotFound(name) 63 | if ret == errno.ENOENT: 64 | if not _is_valid_fs_name(name): 65 | raise lzc_exc.NameInvalid(name) 66 | else: 67 | raise lzc_exc.FilesystemNotFound(name) 68 | raise _generic_exception(ret, name, "Failed to rollback") 69 | 70 | 71 | def lzc_snapshot_translate_errors(ret, errlist, snaps, props): 72 | if ret == 0: 73 | return 74 | 75 | def _map(ret, name): 76 | if ret == errno.EXDEV: 77 | pool_names = map(_pool_name, snaps) 78 | same_pool = all(x == pool_names[0] for x in pool_names) 79 | if same_pool: 80 | return lzc_exc.DuplicateSnapshots(name) 81 | else: 82 | return lzc_exc.PoolsDiffer(name) 83 | elif ret == errno.EINVAL: 84 | if any(not _is_valid_snap_name(s) for s in snaps): 85 | return lzc_exc.NameInvalid(name) 86 | elif any(len(s) > MAXNAMELEN for s in snaps): 87 | return lzc_exc.NameTooLong(name) 88 | else: 89 | return lzc_exc.PropertyInvalid(name) 90 | 91 | if ret == errno.EEXIST: 92 | return lzc_exc.SnapshotExists(name) 93 | if ret == errno.ENOENT: 94 | return lzc_exc.FilesystemNotFound(name) 95 | return _generic_exception(ret, name, "Failed to create snapshot") 96 | 97 | _handle_err_list(ret, errlist, snaps, lzc_exc.SnapshotFailure, _map) 98 | 99 | 100 | def lzc_destroy_snaps_translate_errors(ret, errlist, snaps, defer): 101 | if ret == 0: 102 | return 103 | 104 | def _map(ret, name): 105 | if ret == errno.EEXIST: 106 | return lzc_exc.SnapshotIsCloned(name) 107 | if ret == errno.ENOENT: 108 | return lzc_exc.PoolNotFound(name) 109 | if ret == errno.EBUSY: 110 | return lzc_exc.SnapshotIsHeld(name) 111 | return _generic_exception(ret, name, "Failed to destroy snapshot") 112 | 113 | _handle_err_list(ret, errlist, snaps, lzc_exc.SnapshotDestructionFailure, _map) 114 | 115 | 116 | def lzc_bookmark_translate_errors(ret, errlist, bookmarks): 117 | if ret == 0: 118 | return 119 | 120 | def _map(ret, name): 121 | if ret == errno.EINVAL: 122 | if name: 123 | snap = bookmarks[name] 124 | pool_names = map(_pool_name, bookmarks.keys()) 125 | if not _is_valid_bmark_name(name): 126 | return lzc_exc.BookmarkNameInvalid(name) 127 | elif not _is_valid_snap_name(snap): 128 | return lzc_exc.SnapshotNameInvalid(snap) 129 | elif _fs_name(name) != _fs_name(snap): 130 | return lzc_exc.BookmarkMismatch(name) 131 | elif any(x != _pool_name(name) for x in pool_names): 132 | return lzc_exc.PoolsDiffer(name) 133 | else: 134 | invalid_names = [b for b in bookmarks.keys() if not _is_valid_bmark_name(b)] 135 | if invalid_names: 136 | return lzc_exc.BookmarkNameInvalid(invalid_names[0]) 137 | if ret == errno.EEXIST: 138 | return lzc_exc.BookmarkExists(name) 139 | if ret == errno.ENOENT: 140 | return lzc_exc.SnapshotNotFound(name) 141 | if ret == errno.ENOTSUP: 142 | return lzc_exc.BookmarkNotSupported(name) 143 | return _generic_exception(ret, name, "Failed to create bookmark") 144 | 145 | _handle_err_list(ret, errlist, bookmarks.keys(), lzc_exc.BookmarkFailure, _map) 146 | 147 | 148 | def lzc_get_bookmarks_translate_error(ret, fsname, props): 149 | if ret == 0: 150 | return 151 | if ret == errno.ENOENT: 152 | raise lzc_exc.FilesystemNotFound(fsname) 153 | raise _generic_exception(ret, fsname, "Failed to list bookmarks") 154 | 155 | 156 | def lzc_destroy_bookmarks_translate_errors(ret, errlist, bookmarks): 157 | if ret == 0: 158 | return 159 | 160 | def _map(ret, name): 161 | if ret == errno.EINVAL: 162 | return lzc_exc.NameInvalid(name) 163 | return _generic_exception(ret, name, "Failed to destroy bookmark") 164 | 165 | _handle_err_list(ret, errlist, bookmarks, lzc_exc.BookmarkDestructionFailure, _map) 166 | 167 | 168 | def lzc_snaprange_space_translate_error(ret, firstsnap, lastsnap): 169 | if ret == 0: 170 | return 171 | if ret == errno.EXDEV and firstsnap is not None: 172 | if _pool_name(firstsnap) != _pool_name(lastsnap): 173 | raise lzc_exc.PoolsDiffer(lastsnap) 174 | else: 175 | raise lzc_exc.SnapshotMismatch(lastsnap) 176 | if ret == errno.EINVAL: 177 | if not _is_valid_snap_name(firstsnap): 178 | raise lzc_exc.NameInvalid(firstsnap) 179 | elif not _is_valid_snap_name(lastsnap): 180 | raise lzc_exc.NameInvalid(lastsnap) 181 | elif len(firstsnap) > MAXNAMELEN: 182 | raise lzc_exc.NameTooLong(firstsnap) 183 | elif len(lastsnap) > MAXNAMELEN: 184 | raise lzc_exc.NameTooLong(lastsnap) 185 | elif _pool_name(firstsnap) != _pool_name(lastsnap): 186 | raise lzc_exc.PoolsDiffer(lastsnap) 187 | else: 188 | raise lzc_exc.SnapshotMismatch(lastsnap) 189 | if ret == errno.ENOENT: 190 | raise lzc_exc.SnapshotNotFound(lastsnap) 191 | raise _generic_exception(ret, lastsnap, "Failed to calculate space used by range of snapshots") 192 | 193 | 194 | def lzc_hold_translate_errors(ret, errlist, holds, fd): 195 | if ret == 0: 196 | return 197 | 198 | def _map(ret, name): 199 | if ret == errno.EXDEV: 200 | return lzc_exc.PoolsDiffer(name) 201 | elif ret == errno.EINVAL: 202 | if name: 203 | pool_names = map(_pool_name, holds.keys()) 204 | if not _is_valid_snap_name(name): 205 | return lzc_exc.NameInvalid(name) 206 | elif len(name) > MAXNAMELEN: 207 | return lzc_exc.NameTooLong(name) 208 | elif any(x != _pool_name(name) for x in pool_names): 209 | return lzc_exc.PoolsDiffer(name) 210 | else: 211 | invalid_names = [b for b in holds.keys() if not _is_valid_snap_name(b)] 212 | if invalid_names: 213 | return lzc_exc.NameInvalid(invalid_names[0]) 214 | fs_name = None 215 | hold_name = None 216 | pool_name = None 217 | if name is not None: 218 | fs_name = _fs_name(name) 219 | pool_name = _pool_name(name) 220 | hold_name = holds[name] 221 | if ret == errno.ENOENT: 222 | return lzc_exc.FilesystemNotFound(fs_name) 223 | if ret == errno.EEXIST: 224 | return lzc_exc.HoldExists(name) 225 | if ret == errno.E2BIG: 226 | return lzc_exc.NameTooLong(hold_name) 227 | if ret == errno.ENOTSUP: 228 | return lzc_exc.FeatureNotSupported(pool_name) 229 | return _generic_exception(ret, name, "Failed to hold snapshot") 230 | 231 | if ret == errno.EBADF: 232 | raise lzc_exc.BadHoldCleanupFD() 233 | _handle_err_list(ret, errlist, holds.keys(), lzc_exc.HoldFailure, _map) 234 | 235 | 236 | def lzc_release_translate_errors(ret, errlist, holds): 237 | if ret == 0: 238 | return 239 | for _, hold_list in holds.iteritems(): 240 | if not isinstance(hold_list, list): 241 | raise lzc_exc.TypeError('holds must be in a list') 242 | 243 | def _map(ret, name): 244 | if ret == errno.EXDEV: 245 | return lzc_exc.PoolsDiffer(name) 246 | elif ret == errno.EINVAL: 247 | if name: 248 | pool_names = map(_pool_name, holds.keys()) 249 | if not _is_valid_snap_name(name): 250 | return lzc_exc.NameInvalid(name) 251 | elif len(name) > MAXNAMELEN: 252 | return lzc_exc.NameTooLong(name) 253 | elif any(x != _pool_name(name) for x in pool_names): 254 | return lzc_exc.PoolsDiffer(name) 255 | else: 256 | invalid_names = [b for b in holds.keys() if not _is_valid_snap_name(b)] 257 | if invalid_names: 258 | return lzc_exc.NameInvalid(invalid_names[0]) 259 | elif ret == errno.ENOENT: 260 | return lzc_exc.HoldNotFound(name) 261 | elif ret == errno.E2BIG: 262 | tag_list = holds[name] 263 | too_long_tags = [t for t in tag_list if len(t) > MAXNAMELEN] 264 | return lzc_exc.NameTooLong(too_long_tags[0]) 265 | elif ret == errno.ENOTSUP: 266 | pool_name = None 267 | if name is not None: 268 | pool_name = _pool_name(name) 269 | return lzc_exc.FeatureNotSupported(pool_name) 270 | else: 271 | return _generic_exception(ret, name, "Failed to release snapshot hold") 272 | 273 | _handle_err_list(ret, errlist, holds.keys(), lzc_exc.HoldReleaseFailure, _map) 274 | 275 | 276 | def lzc_get_holds_translate_error(ret, snapname): 277 | if ret == 0: 278 | return 279 | if ret == errno.EINVAL: 280 | _validate_snap_name(snapname) 281 | if ret == errno.ENOENT: 282 | raise lzc_exc.SnapshotNotFound(snapname) 283 | if ret == errno.ENOTSUP: 284 | raise lzc_exc.FeatureNotSupported(_pool_name(snapname)) 285 | raise _generic_exception(ret, snapname, "Failed to get holds on snapshot") 286 | 287 | 288 | def lzc_send_translate_error(ret, snapname, fromsnap, fd, flags): 289 | if ret == 0: 290 | return 291 | if ret == errno.EXDEV and fromsnap is not None: 292 | if _pool_name(fromsnap) != _pool_name(snapname): 293 | raise lzc_exc.PoolsDiffer(snapname) 294 | else: 295 | raise lzc_exc.SnapshotMismatch(snapname) 296 | elif ret == errno.EINVAL: 297 | if (fromsnap is not None and not _is_valid_snap_name(fromsnap) and 298 | not _is_valid_bmark_name(fromsnap)): 299 | raise lzc_exc.NameInvalid(fromsnap) 300 | elif not _is_valid_snap_name(snapname) and not _is_valid_fs_name(snapname): 301 | raise lzc_exc.NameInvalid(snapname) 302 | elif fromsnap is not None and len(fromsnap) > MAXNAMELEN: 303 | raise lzc_exc.NameTooLong(fromsnap) 304 | elif len(snapname) > MAXNAMELEN: 305 | raise lzc_exc.NameTooLong(snapname) 306 | elif fromsnap is not None and _pool_name(fromsnap) != _pool_name(snapname): 307 | raise lzc_exc.PoolsDiffer(snapname) 308 | elif ret == errno.ENOENT: 309 | if (fromsnap is not None and not _is_valid_snap_name(fromsnap) and 310 | not _is_valid_bmark_name(fromsnap)): 311 | raise lzc_exc.NameInvalid(fromsnap) 312 | raise lzc_exc.SnapshotNotFound(snapname) 313 | elif ret == errno.ENAMETOOLONG: 314 | if fromsnap is not None and len(fromsnap) > MAXNAMELEN: 315 | raise lzc_exc.NameTooLong(fromsnap) 316 | else: 317 | raise lzc_exc.NameTooLong(snapname) 318 | raise lzc_exc.StreamIOError(ret) 319 | 320 | 321 | def lzc_send_space_translate_error(ret, snapname, fromsnap): 322 | if ret == 0: 323 | return 324 | if ret == errno.EXDEV and fromsnap is not None: 325 | if _pool_name(fromsnap) != _pool_name(snapname): 326 | raise lzc_exc.PoolsDiffer(snapname) 327 | else: 328 | raise lzc_exc.SnapshotMismatch(snapname) 329 | elif ret == errno.EINVAL: 330 | if fromsnap is not None and not _is_valid_snap_name(fromsnap): 331 | raise lzc_exc.NameInvalid(fromsnap) 332 | elif not _is_valid_snap_name(snapname): 333 | raise lzc_exc.NameInvalid(snapname) 334 | elif fromsnap is not None and len(fromsnap) > MAXNAMELEN: 335 | raise lzc_exc.NameTooLong(fromsnap) 336 | elif len(snapname) > MAXNAMELEN: 337 | raise lzc_exc.NameTooLong(snapname) 338 | elif fromsnap is not None and _pool_name(fromsnap) != _pool_name(snapname): 339 | raise lzc_exc.PoolsDiffer(snapname) 340 | elif ret == errno.ENOENT and fromsnap is not None: 341 | if not _is_valid_snap_name(fromsnap): 342 | raise lzc_exc.NameInvalid(fromsnap) 343 | if ret == errno.ENOENT: 344 | raise lzc_exc.SnapshotNotFound(snapname) 345 | raise _generic_exception(ret, snapname, "Failed to estimate backup stream size") 346 | 347 | 348 | def lzc_receive_translate_error(ret, snapname, fd, force, origin, props): 349 | if ret == 0: 350 | return 351 | if ret == errno.EINVAL: 352 | if not _is_valid_snap_name(snapname) and not _is_valid_fs_name(snapname): 353 | raise lzc_exc.NameInvalid(snapname) 354 | elif len(snapname) > MAXNAMELEN: 355 | raise lzc_exc.NameTooLong(snapname) 356 | elif origin is not None and not _is_valid_snap_name(origin): 357 | raise lzc_exc.NameInvalid(origin) 358 | else: 359 | raise lzc_exc.BadStream() 360 | if ret == errno.ENOENT: 361 | if not _is_valid_snap_name(snapname): 362 | raise lzc_exc.NameInvalid(snapname) 363 | else: 364 | raise lzc_exc.DatasetNotFound(snapname) 365 | if ret == errno.EEXIST: 366 | raise lzc_exc.DatasetExists(snapname) 367 | if ret == errno.ENOTSUP: 368 | raise lzc_exc.StreamFeatureNotSupported() 369 | if ret == errno.ENODEV: 370 | raise lzc_exc.StreamMismatch(_fs_name(snapname)) 371 | if ret == errno.ETXTBSY: 372 | raise lzc_exc.DestinationModified(_fs_name(snapname)) 373 | if ret == errno.EBUSY: 374 | raise lzc_exc.DatasetBusy(_fs_name(snapname)) 375 | if ret == errno.ENOSPC: 376 | raise lzc_exc.NoSpace(_fs_name(snapname)) 377 | if ret == errno.EDQUOT: 378 | raise lzc_exc.QuotaExceeded(_fs_name(snapname)) 379 | if ret == errno.ENAMETOOLONG: 380 | raise lzc_exc.NameTooLong(snapname) 381 | if ret == errno.EROFS: 382 | raise lzc_exc.ReadOnlyPool(_pool_name(snapname)) 383 | if ret == errno.EAGAIN: 384 | raise lzc_exc.SuspendedPool(_pool_name(snapname)) 385 | 386 | raise lzc_exc.StreamIOError(ret) 387 | 388 | 389 | def lzc_promote_translate_error(ret, name): 390 | if ret == 0: 391 | return 392 | if ret == errno.EINVAL: 393 | _validate_fs_name(name) 394 | raise lzc_exc.NotClone(name) 395 | if ret == errno.ENOTSOCK: 396 | raise lzc_exc.NotClone(name) 397 | if ret == errno.ENOENT: 398 | raise lzc_exc.FilesystemNotFound(name) 399 | if ret == errno.EEXIST: 400 | raise lzc_exc.SnapshotExists(name) 401 | raise _generic_exception(ret, name, "Failed to promote dataset") 402 | 403 | 404 | def lzc_rename_translate_error(ret, source, target): 405 | if ret == 0: 406 | return 407 | if ret == errno.EINVAL: 408 | _validate_fs_name(source) 409 | _validate_fs_name(target) 410 | if _pool_name(source) != _pool_name(target): 411 | raise lzc_exc.PoolsDiffer(source) 412 | if ret == errno.EEXIST: 413 | raise lzc_exc.FilesystemExists(target) 414 | if ret == errno.ENOENT: 415 | raise lzc_exc.FilesystemNotFound(source) 416 | raise _generic_exception(ret, source, "Failed to rename dataset") 417 | 418 | 419 | def lzc_destroy_translate_error(ret, name): 420 | if ret == 0: 421 | return 422 | if ret == errno.EINVAL: 423 | _validate_fs_name(name) 424 | if ret == errno.ENOENT: 425 | raise lzc_exc.FilesystemNotFound(name) 426 | raise _generic_exception(ret, name, "Failed to destroy dataset") 427 | 428 | 429 | def lzc_inherit_prop_translate_error(ret, name, prop): 430 | if ret == 0: 431 | return 432 | if ret == errno.EINVAL: 433 | _validate_fs_name(name) 434 | raise lzc_exc.PropertyInvalid(prop) 435 | if ret == errno.ENOENT: 436 | raise lzc_exc.DatasetNotFound(name) 437 | raise _generic_exception(ret, name, "Failed to inherit a property") 438 | 439 | 440 | def lzc_set_prop_translate_error(ret, name, prop, val): 441 | if ret == 0: 442 | return 443 | if ret == errno.EINVAL: 444 | _validate_fs_or_snap_name(name) 445 | raise lzc_exc.PropertyInvalid(prop) 446 | if ret == errno.ENOENT: 447 | raise lzc_exc.DatasetNotFound(name) 448 | raise _generic_exception(ret, name, "Failed to set a property") 449 | 450 | 451 | def lzc_get_props_translate_error(ret, name): 452 | if ret == 0: 453 | return 454 | if ret == errno.EINVAL: 455 | _validate_fs_or_snap_name(name) 456 | if ret == errno.ENOENT: 457 | raise lzc_exc.DatasetNotFound(name) 458 | raise _generic_exception(ret, name, "Failed to get properties") 459 | 460 | 461 | def lzc_list_children_translate_error(ret, name): 462 | if ret == 0: 463 | return 464 | if ret == errno.EINVAL: 465 | _validate_fs_name(name) 466 | raise _generic_exception(ret, name, "Error while iterating children") 467 | 468 | 469 | def lzc_list_snaps_translate_error(ret, name): 470 | if ret == 0: 471 | return 472 | if ret == errno.EINVAL: 473 | _validate_fs_name(name) 474 | raise _generic_exception(ret, name, "Error while iterating snapshots") 475 | 476 | 477 | def lzc_list_translate_error(ret, name, opts): 478 | if ret == 0: 479 | return 480 | if ret == errno.ENOENT: 481 | raise lzc_exc.DatasetNotFound(name) 482 | if ret == errno.EINVAL: 483 | _validate_fs_or_snap_name(name) 484 | raise _generic_exception(ret, name, "Error obtaining a list") 485 | 486 | 487 | def _handle_err_list(ret, errlist, names, exception, mapper): 488 | ''' 489 | Convert one or more errors from an operation into the requested exception. 490 | 491 | :param int ret: the overall return code. 492 | :param errlist: the dictionary that maps entity names to their specific error codes. 493 | :type errlist: dict of bytes:int 494 | :param names: the list of all names of the entities on which the operation was attempted. 495 | :param type exception: the type of the exception to raise if an error occurred. 496 | The exception should be a subclass of `MultipleOperationsFailure`. 497 | :param function mapper: the function that maps an error code and a name to a Python exception. 498 | 499 | Unless ``ret`` is zero this function will raise the ``exception``. 500 | If the ``errlist`` is not empty, then the compound exception will contain a list of exceptions 501 | corresponding to each individual error code in the ``errlist``. 502 | Otherwise, the ``exception`` will contain a list with a single exception corresponding to the 503 | ``ret`` value. If the ``names`` list contains only one element, that is, the operation was 504 | attempted on a single entity, then the name of that entity is passed to the ``mapper``. 505 | If the operation was attempted on multiple entities, but the ``errlist`` is empty, then we 506 | can not know which entity caused the error and, thus, ``None`` is used as a name to signify 507 | thati fact. 508 | 509 | .. note:: 510 | Note that the ``errlist`` can contain a special element with a key of "N_MORE_ERRORS". 511 | That element means that there were too many errors to place on the ``errlist``. 512 | Those errors are suppressed and only their count is provided as a value of the special 513 | ``N_MORE_ERRORS`` element. 514 | ''' 515 | if ret == 0: 516 | return 517 | 518 | if len(errlist) == 0: 519 | suppressed_count = 0 520 | if len(names) == 1: 521 | name = names[0] 522 | else: 523 | name = None 524 | errors = [mapper(ret, name)] 525 | else: 526 | errors = [] 527 | suppressed_count = errlist.pop('N_MORE_ERRORS', 0) 528 | for name, err in errlist.iteritems(): 529 | errors.append(mapper(err, name)) 530 | 531 | raise exception(errors, suppressed_count) 532 | 533 | 534 | def _pool_name(name): 535 | ''' 536 | Extract a pool name from the given dataset or bookmark name. 537 | 538 | '/' separates dataset name components. 539 | '@' separates a snapshot name from the rest of the dataset name. 540 | '#' separates a bookmark name from the rest of the dataset name. 541 | ''' 542 | return re.split('[/@#]', name, 1)[0] 543 | 544 | 545 | def _fs_name(name): 546 | ''' 547 | Extract a dataset name from the given snapshot or bookmark name. 548 | 549 | '@' separates a snapshot name from the rest of the dataset name. 550 | '#' separates a bookmark name from the rest of the dataset name. 551 | ''' 552 | return re.split('[@#]', name, 1)[0] 553 | 554 | 555 | def _is_valid_name_component(component): 556 | allowed = string.ascii_letters + string.digits + '-_.: ' 557 | return component and all(x in allowed for x in component) 558 | 559 | 560 | def _is_valid_fs_name(name): 561 | return name and all(_is_valid_name_component(c) for c in name.split('/')) 562 | 563 | 564 | def _is_valid_snap_name(name): 565 | parts = name.split('@') 566 | return (len(parts) == 2 and _is_valid_fs_name(parts[0]) and 567 | _is_valid_name_component(parts[1])) 568 | 569 | 570 | def _is_valid_bmark_name(name): 571 | parts = name.split('#') 572 | return (len(parts) == 2 and _is_valid_fs_name(parts[0]) and 573 | _is_valid_name_component(parts[1])) 574 | 575 | 576 | def _validate_fs_name(name): 577 | if not _is_valid_fs_name(name): 578 | raise lzc_exc.FilesystemNameInvalid(name) 579 | elif len(name) > MAXNAMELEN: 580 | raise lzc_exc.NameTooLong(name) 581 | 582 | 583 | def _validate_snap_name(name): 584 | if not _is_valid_snap_name(name): 585 | raise lzc_exc.SnapshotNameInvalid(name) 586 | elif len(name) > MAXNAMELEN: 587 | raise lzc_exc.NameTooLong(name) 588 | 589 | 590 | def _validate_bmark_name(name): 591 | if not _is_valid_bmark_name(name): 592 | raise lzc_exc.BookmarkNameInvalid(name) 593 | elif len(name) > MAXNAMELEN: 594 | raise lzc_exc.NameTooLong(name) 595 | 596 | 597 | def _validate_fs_or_snap_name(name): 598 | if not _is_valid_fs_name(name) and not _is_valid_snap_name(name): 599 | raise lzc_exc.NameInvalid(name) 600 | elif len(name) > MAXNAMELEN: 601 | raise lzc_exc.NameTooLong(name) 602 | 603 | 604 | def _generic_exception(err, name, message): 605 | if err in _error_to_exception: 606 | return _error_to_exception[err](name) 607 | else: 608 | return lzc_exc.ZFSGenericError(err, message, name) 609 | 610 | _error_to_exception = {e.errno: e for e in [ 611 | lzc_exc.ZIOError, 612 | lzc_exc.NoSpace, 613 | lzc_exc.QuotaExceeded, 614 | lzc_exc.DatasetBusy, 615 | lzc_exc.NameTooLong, 616 | lzc_exc.ReadOnlyPool, 617 | lzc_exc.SuspendedPool, 618 | lzc_exc.PoolsDiffer, 619 | lzc_exc.PropertyNotSupported, 620 | ]} 621 | 622 | 623 | # vim: softtabstop=4 tabstop=4 expandtab shiftwidth=4 624 | -------------------------------------------------------------------------------- /libzfs_core/_libzfs_core.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015 ClusterHQ. See LICENSE file for details. 2 | 3 | """ 4 | Python wrappers for libzfs_core interfaces. 5 | 6 | As a rule, there is a Python function for each C function. 7 | The signatures of the Python functions generally follow those of the 8 | functions, but the argument types are natural to Python. 9 | nvlists are wrapped as dictionaries or lists depending on their usage. 10 | Some parameters have default values depending on typical use for 11 | increased convenience. Output parameters are not used and return values 12 | are directly returned. Error conditions are signalled by exceptions 13 | rather than by integer error codes. 14 | """ 15 | 16 | import errno 17 | import functools 18 | import fcntl 19 | import os 20 | import struct 21 | import threading 22 | from . import exceptions 23 | from . import _error_translation as errors 24 | from .bindings import libzfs_core 25 | from ._constants import MAXNAMELEN 26 | from .ctypes import int32_t 27 | from ._nvlist import nvlist_in, nvlist_out 28 | 29 | 30 | def lzc_create(name, ds_type='zfs', props=None): 31 | ''' 32 | Create a ZFS filesystem or a ZFS volume ("zvol"). 33 | 34 | :param bytes name: a name of the dataset to be created. 35 | :param str ds_type: the type of the dataset to be create, currently supported 36 | types are "zfs" (the default) for a filesystem 37 | and "zvol" for a volume. 38 | :param props: a `dict` of ZFS dataset property name-value pairs (empty by default). 39 | :type props: dict of bytes:Any 40 | 41 | :raises FilesystemExists: if a dataset with the given name already exists. 42 | :raises ParentNotFound: if a parent dataset of the requested dataset does not exist. 43 | :raises PropertyInvalid: if one or more of the specified properties is invalid 44 | or has an invalid type or value. 45 | :raises NameInvalid: if the name is not a valid dataset name. 46 | :raises NameTooLong: if the name is too long. 47 | ''' 48 | if props is None: 49 | props = {} 50 | if ds_type == 'zfs': 51 | ds_type = _lib.DMU_OST_ZFS 52 | elif ds_type == 'zvol': 53 | ds_type = _lib.DMU_OST_ZVOL 54 | else: 55 | raise exceptions.DatasetTypeInvalid(ds_type) 56 | nvlist = nvlist_in(props) 57 | ret = _lib.lzc_create(name, ds_type, nvlist) 58 | errors.lzc_create_translate_error(ret, name, ds_type, props) 59 | 60 | 61 | def lzc_clone(name, origin, props=None): 62 | ''' 63 | Clone a ZFS filesystem or a ZFS volume ("zvol") from a given snapshot. 64 | 65 | :param bytes name: a name of the dataset to be created. 66 | :param bytes origin: a name of the origin snapshot. 67 | :param props: a `dict` of ZFS dataset property name-value pairs (empty by default). 68 | :type props: dict of bytes:Any 69 | 70 | :raises FilesystemExists: if a dataset with the given name already exists. 71 | :raises DatasetNotFound: if either a parent dataset of the requested dataset 72 | or the origin snapshot does not exist. 73 | :raises PropertyInvalid: if one or more of the specified properties is invalid 74 | or has an invalid type or value. 75 | :raises FilesystemNameInvalid: if the name is not a valid dataset name. 76 | :raises SnapshotNameInvalid: if the origin is not a valid snapshot name. 77 | :raises NameTooLong: if the name or the origin name is too long. 78 | :raises PoolsDiffer: if the clone and the origin have different pool names. 79 | 80 | .. note:: 81 | Because of a deficiency of the underlying C interface 82 | :exc:`.DatasetNotFound` can mean that either a parent filesystem of the target 83 | or the origin snapshot does not exist. 84 | It is currently impossible to distinguish between the cases. 85 | :func:`lzc_hold` can be used to check that the snapshot exists and ensure that 86 | it is not destroyed before cloning. 87 | ''' 88 | if props is None: 89 | props = {} 90 | nvlist = nvlist_in(props) 91 | ret = _lib.lzc_clone(name, origin, nvlist) 92 | errors.lzc_clone_translate_error(ret, name, origin, props) 93 | 94 | 95 | def lzc_rollback(name): 96 | ''' 97 | Roll back a filesystem or volume to its most recent snapshot. 98 | 99 | :param bytes name: a name of the dataset to be rolled back. 100 | :return: a name of the most recent snapshot. 101 | :rtype: bytes 102 | 103 | :raises FilesystemNotFound: if the dataset does not exist. 104 | :raises SnapshotNotFound: if the dataset does not have any snapshots. 105 | :raises NameInvalid: if the dataset name is invalid. 106 | :raises NameTooLong: if the dataset name is too long. 107 | ''' 108 | # Account for terminating NUL in C strings. 109 | snapnamep = _ffi.new('char[]', MAXNAMELEN + 1) 110 | ret = _lib.lzc_rollback(name, snapnamep, MAXNAMELEN + 1) 111 | errors.lzc_rollback_translate_error(ret, name) 112 | return _ffi.string(snapnamep) 113 | 114 | 115 | def lzc_snapshot(snaps, props=None): 116 | ''' 117 | Create snapshots. 118 | 119 | All snapshots must be in the same pool. 120 | 121 | Optionally snapshot properties can be set on all snapshots. 122 | Currently only user properties (prefixed with "user:") are supported. 123 | 124 | Either all snapshots are successfully created or none are created if 125 | an exception is raised. 126 | 127 | :param snaps: a list of names of snapshots to be created. 128 | :type snaps: list of bytes 129 | :param props: a `dict` of ZFS dataset property name-value pairs (empty by default). 130 | :type props: dict of bytes:bytes 131 | 132 | :raises SnapshotFailure: if one or more snapshots could not be created. 133 | 134 | .. note:: 135 | :exc:`.SnapshotFailure` is a compound exception that provides at least 136 | one detailed error object in :attr:`SnapshotFailure.errors` `list`. 137 | 138 | .. warning:: 139 | The underlying implementation reports an individual, per-snapshot error 140 | only for :exc:`.SnapshotExists` condition and *sometimes* for 141 | :exc:`.NameTooLong`. 142 | In all other cases a single error is reported without connection to any 143 | specific snapshot name(s). 144 | 145 | This has the following implications: 146 | 147 | * if multiple error conditions are encountered only one of them is reported 148 | 149 | * unless only one snapshot is requested then it is impossible to tell 150 | how many snapshots are problematic and what they are 151 | 152 | * only if there are no other error conditions :exc:`.SnapshotExists` 153 | is reported for all affected snapshots 154 | 155 | * :exc:`.NameTooLong` can behave either in the same way as 156 | :exc:`.SnapshotExists` or as all other exceptions. 157 | The former is the case where the full snapshot name exceeds the maximum 158 | allowed length but the short snapshot name (after '@') is within 159 | the limit. 160 | The latter is the case when the short name alone exceeds the maximum 161 | allowed length. 162 | ''' 163 | snaps_dict = {name: None for name in snaps} 164 | errlist = {} 165 | snaps_nvlist = nvlist_in(snaps_dict) 166 | if props is None: 167 | props = {} 168 | props_nvlist = nvlist_in(props) 169 | with nvlist_out(errlist) as errlist_nvlist: 170 | ret = _lib.lzc_snapshot(snaps_nvlist, props_nvlist, errlist_nvlist) 171 | errors.lzc_snapshot_translate_errors(ret, errlist, snaps, props) 172 | 173 | 174 | lzc_snap = lzc_snapshot 175 | 176 | 177 | def lzc_destroy_snaps(snaps, defer): 178 | ''' 179 | Destroy snapshots. 180 | 181 | They must all be in the same pool. 182 | Snapshots that do not exist will be silently ignored. 183 | 184 | If 'defer' is not set, and a snapshot has user holds or clones, the 185 | destroy operation will fail and none of the snapshots will be 186 | destroyed. 187 | 188 | If 'defer' is set, and a snapshot has user holds or clones, it will be 189 | marked for deferred destruction, and will be destroyed when the last hold 190 | or clone is removed/destroyed. 191 | 192 | The operation succeeds if all snapshots were destroyed (or marked for 193 | later destruction if 'defer' is set) or didn't exist to begin with. 194 | 195 | :param snaps: a list of names of snapshots to be destroyed. 196 | :type snaps: list of bytes 197 | :param bool defer: whether to mark busy snapshots for deferred destruction 198 | rather than immediately failing. 199 | 200 | :raises SnapshotDestructionFailure: if one or more snapshots could not be created. 201 | 202 | .. note:: 203 | :exc:`.SnapshotDestructionFailure` is a compound exception that provides at least 204 | one detailed error object in :attr:`SnapshotDestructionFailure.errors` `list`. 205 | 206 | Typical error is :exc:`SnapshotIsCloned` if `defer` is `False`. 207 | The snapshot names are validated quite loosely and invalid names are typically 208 | ignored as nonexisiting snapshots. 209 | 210 | A snapshot name referring to a filesystem that doesn't exist is ignored. 211 | However, non-existent pool name causes :exc:`PoolNotFound`. 212 | ''' 213 | snaps_dict = {name: None for name in snaps} 214 | errlist = {} 215 | snaps_nvlist = nvlist_in(snaps_dict) 216 | with nvlist_out(errlist) as errlist_nvlist: 217 | ret = _lib.lzc_destroy_snaps(snaps_nvlist, defer, errlist_nvlist) 218 | errors.lzc_destroy_snaps_translate_errors(ret, errlist, snaps, defer) 219 | 220 | 221 | def lzc_bookmark(bookmarks): 222 | ''' 223 | Create bookmarks. 224 | 225 | :param bookmarks: a dict that maps names of wanted bookmarks to names of existing snapshots. 226 | :type bookmarks: dict of bytes to bytes 227 | 228 | :raises BookmarkFailure: if any of the bookmarks can not be created for any reason. 229 | 230 | The bookmarks `dict` maps from name of the bookmark (e.g. :file:`{pool}/{fs}#{bmark}`) to 231 | the name of the snapshot (e.g. :file:`{pool}/{fs}@{snap}`). All the bookmarks and 232 | snapshots must be in the same pool. 233 | ''' 234 | errlist = {} 235 | nvlist = nvlist_in(bookmarks) 236 | with nvlist_out(errlist) as errlist_nvlist: 237 | ret = _lib.lzc_bookmark(nvlist, errlist_nvlist) 238 | errors.lzc_bookmark_translate_errors(ret, errlist, bookmarks) 239 | 240 | 241 | def lzc_get_bookmarks(fsname, props=None): 242 | ''' 243 | Retrieve a listing of bookmarks for the given file system. 244 | 245 | :param bytes fsname: a name of the filesystem. 246 | :param props: a `list` of properties that will be returned for each bookmark. 247 | :type props: list of bytes 248 | :return: a `dict` that maps the bookmarks' short names to their properties. 249 | :rtype: dict of bytes:dict 250 | 251 | :raises FilesystemNotFound: if the filesystem is not found. 252 | 253 | The following are valid properties on bookmarks: 254 | 255 | guid : integer 256 | globally unique identifier of the snapshot the bookmark refers to 257 | createtxg : integer 258 | txg when the snapshot the bookmark refers to was created 259 | creation : integer 260 | timestamp when the snapshot the bookmark refers to was created 261 | 262 | Any other properties passed in ``props`` are ignored without reporting 263 | any error. 264 | Values in the returned dictionary map the names of the requested properties 265 | to their respective values. 266 | ''' 267 | bmarks = {} 268 | if props is None: 269 | props = [] 270 | props_dict = {name: None for name in props} 271 | nvlist = nvlist_in(props_dict) 272 | with nvlist_out(bmarks) as bmarks_nvlist: 273 | ret = _lib.lzc_get_bookmarks(fsname, nvlist, bmarks_nvlist) 274 | errors.lzc_get_bookmarks_translate_error(ret, fsname, props) 275 | return bmarks 276 | 277 | 278 | def lzc_destroy_bookmarks(bookmarks): 279 | ''' 280 | Destroy bookmarks. 281 | 282 | :param bookmarks: a list of the bookmarks to be destroyed. 283 | The bookmarks are specified as :file:`{fs}#{bmark}`. 284 | :type bookmarks: list of bytes 285 | 286 | :raises BookmarkDestructionFailure: if any of the bookmarks may not be destroyed. 287 | 288 | The bookmarks must all be in the same pool. 289 | Bookmarks that do not exist will be silently ignored. 290 | This also includes the case where the filesystem component of the bookmark 291 | name does not exist. 292 | However, an invalid bookmark name will cause :exc:`.NameInvalid` error 293 | reported in :attr:`SnapshotDestructionFailure.errors`. 294 | 295 | Either all bookmarks that existed are destroyed or an exception is raised. 296 | ''' 297 | errlist = {} 298 | bmarks_dict = {name: None for name in bookmarks} 299 | nvlist = nvlist_in(bmarks_dict) 300 | with nvlist_out(errlist) as errlist_nvlist: 301 | ret = _lib.lzc_destroy_bookmarks(nvlist, errlist_nvlist) 302 | errors.lzc_destroy_bookmarks_translate_errors(ret, errlist, bookmarks) 303 | 304 | 305 | def lzc_snaprange_space(firstsnap, lastsnap): 306 | ''' 307 | Calculate a size of data referenced by snapshots in the inclusive range between 308 | the ``firstsnap`` and the ``lastsnap`` and not shared with any other datasets. 309 | 310 | :param bytes firstsnap: the name of the first snapshot in the range. 311 | :param bytes lastsnap: the name of the last snapshot in the range. 312 | :return: the calculated stream size, in bytes. 313 | :rtype: `int` or `long` 314 | 315 | :raises SnapshotNotFound: if either of the snapshots does not exist. 316 | :raises NameInvalid: if the name of either snapshot is invalid. 317 | :raises NameTooLong: if the name of either snapshot is too long. 318 | :raises SnapshotMismatch: if ``fromsnap`` is not an ancestor snapshot of ``snapname``. 319 | :raises PoolsDiffer: if the snapshots belong to different pools. 320 | 321 | ``lzc_snaprange_space`` calculates total size of blocks that exist 322 | because they are referenced only by one or more snapshots in the given range 323 | but no other dataset. 324 | In other words, this is the set of blocks that were born after the snap before 325 | firstsnap, and died before the snap after the last snap. 326 | Yet another interpretation is that the result of ``lzc_snaprange_space`` is the size 327 | of the space that would be freed if the snapshots in the range are destroyed. 328 | 329 | If the same snapshot is given as both the ``firstsnap`` and the ``lastsnap``. 330 | In that case ``lzc_snaprange_space`` calculates space used by the snapshot. 331 | ''' 332 | valp = _ffi.new('uint64_t *') 333 | ret = _lib.lzc_snaprange_space(firstsnap, lastsnap, valp) 334 | errors.lzc_snaprange_space_translate_error(ret, firstsnap, lastsnap) 335 | return int(valp[0]) 336 | 337 | 338 | def lzc_hold(holds, fd=None): 339 | ''' 340 | Create *user holds* on snapshots. If there is a hold on a snapshot, 341 | the snapshot can not be destroyed. (However, it can be marked for deletion 342 | by :func:`lzc_destroy_snaps` ( ``defer`` = `True` ).) 343 | 344 | :param holds: the dictionary of names of the snapshots to hold mapped to the hold names. 345 | :type holds: dict of bytes : bytes 346 | :type fd: int or None 347 | :param fd: if not None then it must be the result of :func:`os.open` called as ``os.open("/dev/zfs", O_EXCL)``. 348 | :type fd: int or None 349 | :return: a list of the snapshots that do not exist. 350 | :rtype: list of bytes 351 | 352 | :raises HoldFailure: if a hold was impossible on one or more of the snapshots. 353 | :raises BadHoldCleanupFD: if ``fd`` is not a valid file descriptor associated with :file:`/dev/zfs`. 354 | 355 | The snapshots must all be in the same pool. 356 | 357 | If ``fd`` is not None, then when the ``fd`` is closed (including on process 358 | termination), the holds will be released. If the system is shut down 359 | uncleanly, the holds will be released when the pool is next opened 360 | or imported. 361 | 362 | Holds for snapshots which don't exist will be skipped and have an entry 363 | added to the return value, but will not cause an overall failure. 364 | No exceptions is raised if all holds, for snapshots that existed, were succesfully created. 365 | Otherwise :exc:`.HoldFailure` exception is raised and no holds will be created. 366 | :attr:`.HoldFailure.errors` may contain a single element for an error that is not 367 | specific to any hold / snapshot, or it may contain one or more elements 368 | detailing specific error per each affected hold. 369 | ''' 370 | errlist = {} 371 | if fd is None: 372 | fd = -1 373 | nvlist = nvlist_in(holds) 374 | with nvlist_out(errlist) as errlist_nvlist: 375 | ret = _lib.lzc_hold(nvlist, fd, errlist_nvlist) 376 | errors.lzc_hold_translate_errors(ret, errlist, holds, fd) 377 | # If there is no error (no exception raised by _handleErrList), but errlist 378 | # is not empty, then it contains missing snapshots. 379 | assert all(x == errno.ENOENT for x in errlist.itervalues()) 380 | return errlist.keys() 381 | 382 | 383 | def lzc_release(holds): 384 | ''' 385 | Release *user holds* on snapshots. 386 | 387 | If the snapshot has been marked for 388 | deferred destroy (by lzc_destroy_snaps(defer=B_TRUE)), it does not have 389 | any clones, and all the user holds are removed, then the snapshot will be 390 | destroyed. 391 | 392 | The snapshots must all be in the same pool. 393 | 394 | :param holds: a ``dict`` where keys are snapshot names and values are 395 | lists of hold tags to remove. 396 | :type holds: dict of bytes : list of bytes 397 | :return: a list of any snapshots that do not exist and of any tags that do not 398 | exist for existing snapshots. 399 | Such tags are qualified with a corresponding snapshot name 400 | using the following format :file:`{pool}/{fs}@{snap}#{tag}` 401 | :rtype: list of bytes 402 | 403 | :raises HoldReleaseFailure: if one or more existing holds could not be released. 404 | 405 | Holds which failed to release because they didn't exist will have an entry 406 | added to errlist, but will not cause an overall failure. 407 | 408 | This call is success if ``holds`` was empty or all holds that 409 | existed, were successfully removed. 410 | Otherwise an exception will be raised. 411 | ''' 412 | errlist = {} 413 | holds_dict = {} 414 | for snap, hold_list in holds.iteritems(): 415 | if not isinstance(hold_list, list): 416 | raise TypeError('holds must be in a list') 417 | holds_dict[snap] = {hold: None for hold in hold_list} 418 | nvlist = nvlist_in(holds_dict) 419 | with nvlist_out(errlist) as errlist_nvlist: 420 | ret = _lib.lzc_release(nvlist, errlist_nvlist) 421 | errors.lzc_release_translate_errors(ret, errlist, holds) 422 | # If there is no error (no exception raised by _handleErrList), but errlist 423 | # is not empty, then it contains missing snapshots and tags. 424 | assert all(x == errno.ENOENT for x in errlist.itervalues()) 425 | return errlist.keys() 426 | 427 | 428 | def lzc_get_holds(snapname): 429 | ''' 430 | Retrieve list of *user holds* on the specified snapshot. 431 | 432 | :param bytes snapname: the name of the snapshot. 433 | :return: holds on the snapshot along with their creation times 434 | in seconds since the epoch 435 | :rtype: dict of bytes : int 436 | ''' 437 | holds = {} 438 | with nvlist_out(holds) as nvlist: 439 | ret = _lib.lzc_get_holds(snapname, nvlist) 440 | errors.lzc_get_holds_translate_error(ret, snapname) 441 | return holds 442 | 443 | 444 | def lzc_send(snapname, fromsnap, fd, flags=None): 445 | ''' 446 | Generate a zfs send stream for the specified snapshot and write it to 447 | the specified file descriptor. 448 | 449 | :param bytes snapname: the name of the snapshot to send. 450 | :param fromsnap: if not None the name of the starting snapshot 451 | for the incremental stream. 452 | :type fromsnap: bytes or None 453 | :param int fd: the file descriptor to write the send stream to. 454 | :param flags: the flags that control what enhanced features can be used 455 | in the stream. 456 | :type flags: list of bytes 457 | 458 | :raises SnapshotNotFound: if either the starting snapshot is not `None` and does not exist, 459 | or if the ending snapshot does not exist. 460 | :raises NameInvalid: if the name of either snapshot is invalid. 461 | :raises NameTooLong: if the name of either snapshot is too long. 462 | :raises SnapshotMismatch: if ``fromsnap`` is not an ancestor snapshot of ``snapname``. 463 | :raises PoolsDiffer: if the snapshots belong to different pools. 464 | :raises IOError: if an input / output error occurs while writing to ``fd``. 465 | :raises UnknownStreamFeature: if the ``flags`` contain an unknown flag name. 466 | 467 | If ``fromsnap`` is None, a full (non-incremental) stream will be sent. 468 | If ``fromsnap`` is not None, it must be the full name of a snapshot or 469 | bookmark to send an incremental from, e.g. :file:`{pool}/{fs}@{earlier_snap}` 470 | or :file:`{pool}/{fs}#{earlier_bmark}`. 471 | 472 | The specified snapshot or bookmark must represent an earlier point in the history 473 | of ``snapname``. 474 | It can be an earlier snapshot in the same filesystem or zvol as ``snapname``, 475 | or it can be the origin of ``snapname``'s filesystem, or an earlier 476 | snapshot in the origin, etc. 477 | ``fromsnap`` must be strictly an earlier snapshot, specifying the same snapshot 478 | as both ``fromsnap`` and ``snapname`` is an error. 479 | 480 | If ``flags`` contains *"large_blocks"*, the stream is permitted 481 | to contain ``DRR_WRITE`` records with ``drr_length`` > 128K, and ``DRR_OBJECT`` 482 | records with ``drr_blksz`` > 128K. 483 | 484 | If ``flags`` contains *"embedded_data"*, the stream is permitted 485 | to contain ``DRR_WRITE_EMBEDDED`` records with 486 | ``drr_etype`` == ``BP_EMBEDDED_TYPE_DATA``, 487 | which the receiving system must support (as indicated by support 488 | for the *embedded_data* feature). 489 | 490 | .. note:: 491 | ``lzc_send`` can actually accept a filesystem name as the ``snapname``. 492 | In that case ``lzc_send`` acts as if a temporary snapshot was created 493 | after the start of the call and before the stream starts being produced. 494 | 495 | .. note:: 496 | ``lzc_send`` does not return until all of the stream is written to ``fd``. 497 | 498 | .. note:: 499 | ``lzc_send`` does *not* close ``fd`` upon returning. 500 | ''' 501 | if fromsnap is not None: 502 | c_fromsnap = fromsnap 503 | else: 504 | c_fromsnap = _ffi.NULL 505 | c_flags = 0 506 | if flags is None: 507 | flags = [] 508 | for flag in flags: 509 | c_flag = { 510 | 'embedded_data': _lib.LZC_SEND_FLAG_EMBED_DATA, 511 | 'large_blocks': _lib.LZC_SEND_FLAG_LARGE_BLOCK, 512 | }.get(flag) 513 | if c_flag is None: 514 | raise exceptions.UnknownStreamFeature(flag) 515 | c_flags |= c_flag 516 | 517 | ret = _lib.lzc_send(snapname, c_fromsnap, fd, c_flags) 518 | errors.lzc_send_translate_error(ret, snapname, fromsnap, fd, flags) 519 | 520 | 521 | def lzc_send_space(snapname, fromsnap=None): 522 | ''' 523 | Estimate size of a full or incremental backup stream 524 | given the optional starting snapshot and the ending snapshot. 525 | 526 | :param bytes snapname: the name of the snapshot for which the estimate should be done. 527 | :param fromsnap: the optional starting snapshot name. 528 | If not `None` then an incremental stream size is estimated, 529 | otherwise a full stream is esimated. 530 | :type fromsnap: `bytes` or `None` 531 | :return: the estimated stream size, in bytes. 532 | :rtype: `int` or `long` 533 | 534 | :raises SnapshotNotFound: if either the starting snapshot is not `None` and does not exist, 535 | or if the ending snapshot does not exist. 536 | :raises NameInvalid: if the name of either snapshot is invalid. 537 | :raises NameTooLong: if the name of either snapshot is too long. 538 | :raises SnapshotMismatch: if ``fromsnap`` is not an ancestor snapshot of ``snapname``. 539 | :raises PoolsDiffer: if the snapshots belong to different pools. 540 | 541 | ``fromsnap``, if not ``None``, must be strictly an earlier snapshot, 542 | specifying the same snapshot as both ``fromsnap`` and ``snapname`` is an error. 543 | ''' 544 | if fromsnap is not None: 545 | c_fromsnap = fromsnap 546 | else: 547 | c_fromsnap = _ffi.NULL 548 | valp = _ffi.new('uint64_t *') 549 | ret = _lib.lzc_send_space(snapname, c_fromsnap, valp) 550 | errors.lzc_send_space_translate_error(ret, snapname, fromsnap) 551 | return int(valp[0]) 552 | 553 | 554 | def lzc_receive(snapname, fd, force=False, origin=None, props=None): 555 | ''' 556 | Receive from the specified ``fd``, creating the specified snapshot. 557 | 558 | :param bytes snapname: the name of the snapshot to create. 559 | :param int fd: the file descriptor from which to read the stream. 560 | :param bool force: whether to roll back or destroy the target filesystem 561 | if that is required to receive the stream. 562 | :param origin: the optional origin snapshot name if the stream is for a clone. 563 | :type origin: bytes or None 564 | :param props: the properties to set on the snapshot as *received* properties. 565 | :type props: dict of bytes : Any 566 | 567 | :raises IOError: if an input / output error occurs while reading from the ``fd``. 568 | :raises DatasetExists: if the snapshot named ``snapname`` already exists. 569 | :raises DatasetExists: if the stream is a full stream and the destination filesystem already exists. 570 | :raises DatasetExists: if ``force`` is `True` but the destination filesystem could not 571 | be rolled back to a matching snapshot because a newer snapshot 572 | exists and it is an origin of a cloned filesystem. 573 | :raises StreamMismatch: if an incremental stream is received and the latest 574 | snapshot of the destination filesystem does not match 575 | the source snapshot of the stream. 576 | :raises StreamMismatch: if a full stream is received and the destination 577 | filesystem already exists and it has at least one snapshot, 578 | and ``force`` is `False`. 579 | :raises StreamMismatch: if an incremental clone stream is received but the specified 580 | ``origin`` is not the actual received origin. 581 | :raises DestinationModified: if an incremental stream is received and the destination 582 | filesystem has been modified since the last snapshot 583 | and ``force`` is `False`. 584 | :raises DestinationModified: if a full stream is received and the destination 585 | filesystem already exists and it does not have any 586 | snapshots, and ``force`` is `False`. 587 | :raises DatasetNotFound: if the destination filesystem and its parent do not exist. 588 | :raises DatasetNotFound: if the ``origin`` is not `None` and does not exist. 589 | :raises DatasetBusy: if ``force`` is `True` but the destination filesystem could not 590 | be rolled back to a matching snapshot because a newer snapshot 591 | is held and could not be destroyed. 592 | :raises DatasetBusy: if another receive operation is being performed on the 593 | destination filesystem. 594 | :raises BadStream: if the stream is corrupt or it is not recognized or it is 595 | a compound stream or it is a clone stream, but ``origin`` 596 | is `None`. 597 | :raises BadStream: if a clone stream is received and the destination filesystem 598 | already exists. 599 | :raises StreamFeatureNotSupported: if the stream has a feature that is not 600 | supported on this side. 601 | :raises PropertyInvalid: if one or more of the specified properties is invalid 602 | or has an invalid type or value. 603 | :raises NameInvalid: if the name of either snapshot is invalid. 604 | :raises NameTooLong: if the name of either snapshot is too long. 605 | 606 | .. note:: 607 | The ``origin`` is ignored if the actual stream is an incremental stream 608 | that is not a clone stream and the destination filesystem exists. 609 | If the stream is a full stream and the destination filesystem does not 610 | exist then the ``origin`` is checked for existence: if it does not exist 611 | :exc:`.DatasetNotFound` is raised, otherwise :exc:`.StreamMismatch` is 612 | raised, because that snapshot can not have any relation to the stream. 613 | 614 | .. note:: 615 | If ``force`` is `True` and the stream is incremental then the destination 616 | filesystem is rolled back to a matching source snapshot if necessary. 617 | Intermediate snapshots are destroyed in that case. 618 | 619 | However, none of the existing snapshots may have the same name as 620 | ``snapname`` even if such a snapshot were to be destroyed. 621 | The existing ``snapname`` snapshot always causes :exc:`.SnapshotExists` 622 | to be raised. 623 | 624 | If ``force`` is `True` and the stream is a full stream then the destination 625 | filesystem is replaced with the received filesystem unless the former 626 | has any snapshots. This prevents the destination filesystem from being 627 | rolled back / replaced. 628 | 629 | .. note:: 630 | This interface does not work on dedup'd streams 631 | (those with ``DMU_BACKUP_FEATURE_DEDUP``). 632 | 633 | .. note:: 634 | ``lzc_receive`` does not return until all of the stream is read from ``fd`` 635 | and applied to the pool. 636 | 637 | .. note:: 638 | ``lzc_receive`` does *not* close ``fd`` upon returning. 639 | ''' 640 | 641 | if origin is not None: 642 | c_origin = origin 643 | else: 644 | c_origin = _ffi.NULL 645 | if props is None: 646 | props = {} 647 | nvlist = nvlist_in(props) 648 | ret = _lib.lzc_receive(snapname, nvlist, c_origin, force, fd) 649 | errors.lzc_receive_translate_error(ret, snapname, fd, force, origin, props) 650 | 651 | 652 | lzc_recv = lzc_receive 653 | 654 | 655 | def lzc_exists(name): 656 | ''' 657 | Check if a dataset (a filesystem, or a volume, or a snapshot) 658 | with the given name exists. 659 | 660 | :param bytes name: the dataset name to check. 661 | :return: `True` if the dataset exists, `False` otherwise. 662 | :rtype: bool 663 | 664 | .. note:: 665 | ``lzc_exists`` can not be used to check for existence of bookmarks. 666 | ''' 667 | ret = _lib.lzc_exists(name) 668 | return bool(ret) 669 | 670 | 671 | def is_supported(func): 672 | ''' 673 | Check whether C *libzfs_core* provides implementation required 674 | for the given Python wrapper. 675 | 676 | If `is_supported` returns ``False`` for the function, then 677 | calling the function would result in :exc:`NotImplementedError`. 678 | 679 | :param function func: the function to check. 680 | :return bool: whether the function can be used. 681 | ''' 682 | fname = func.__name__ 683 | if fname not in globals(): 684 | raise ValueError(fname + ' is not from libzfs_core') 685 | if not callable(func): 686 | raise ValueError(fname + ' is not a function') 687 | if not fname.startswith("lzc_"): 688 | raise ValueError(fname + ' is not a libzfs_core API function') 689 | check_func = getattr(func, "_check_func", None) 690 | if check_func is not None: 691 | return is_supported(check_func) 692 | return getattr(_lib, fname, None) is not None 693 | 694 | 695 | def _uncommitted(depends_on=None): 696 | ''' 697 | Mark an API function as being an uncommitted extension that might not be 698 | available. 699 | 700 | :param function depends_on: the function that would be checked 701 | instead of a decorated function. 702 | For example, if the decorated function uses 703 | another uncommitted function. 704 | 705 | This decorator transforms a decorated function to raise 706 | :exc:`NotImplementedError` if the C libzfs_core library does not provide 707 | a function with the same name as the decorated function. 708 | 709 | The optional `depends_on` parameter can be provided if the decorated 710 | function does not directly call the C function but instead calls another 711 | Python function that follows the typical convention. 712 | One example is :func:`lzc_list_snaps` that calls :func:`lzc_list` that 713 | calls ``lzc_list`` in libzfs_core. 714 | 715 | This decorator is implemented using :func:`is_supported`. 716 | ''' 717 | def _uncommitted_decorator(func, depends_on=depends_on): 718 | @functools.wraps(func) 719 | def _f(*args, **kwargs): 720 | if not is_supported(_f): 721 | raise NotImplementedError(func.__name__) 722 | return func(*args, **kwargs) 723 | if depends_on is not None: 724 | _f._check_func = depends_on 725 | return _f 726 | return _uncommitted_decorator 727 | 728 | 729 | @_uncommitted() 730 | def lzc_promote(name): 731 | ''' 732 | Promotes the ZFS dataset. 733 | 734 | :param bytes name: the name of the dataset to promote. 735 | :raises NameInvalid: if the dataset name is invalid. 736 | :raises NameTooLong: if the dataset name is too long. 737 | :raises NameTooLong: if the dataset's origin has a snapshot that, 738 | if transferred to the dataset, would get 739 | a too long name. 740 | :raises NotClone: if the dataset is not a clone. 741 | :raises FilesystemNotFound: if the dataset does not exist. 742 | :raises SnapshotExists: if the dataset already has a snapshot with 743 | the same name as one of the origin's snapshots. 744 | ''' 745 | ret = _lib.lzc_promote(name, _ffi.NULL, _ffi.NULL) 746 | errors.lzc_promote_translate_error(ret, name) 747 | 748 | 749 | @_uncommitted() 750 | def lzc_rename(source, target): 751 | ''' 752 | Rename the ZFS dataset. 753 | 754 | :param source name: the current name of the dataset to rename. 755 | :param target name: the new name of the dataset. 756 | :raises NameInvalid: if either the source or target name is invalid. 757 | :raises NameTooLong: if either the source or target name is too long. 758 | :raises NameTooLong: if a snapshot of the source would get a too long 759 | name after renaming. 760 | :raises FilesystemNotFound: if the source does not exist. 761 | :raises FilesystemNotFound: if the target's parent does not exist. 762 | :raises FilesystemExists: if the target already exists. 763 | :raises PoolsDiffer: if the source and target belong to different pools. 764 | ''' 765 | ret = _lib.lzc_rename(source, target, _ffi.NULL, _ffi.NULL) 766 | errors.lzc_rename_translate_error(ret, source, target) 767 | 768 | 769 | @_uncommitted() 770 | def lzc_destroy_one(name): 771 | ''' 772 | Destroy the ZFS dataset. 773 | 774 | :param bytes name: the name of the dataset to destroy. 775 | :raises NameInvalid: if the dataset name is invalid. 776 | :raises NameTooLong: if the dataset name is too long. 777 | :raises FilesystemNotFound: if the dataset does not exist. 778 | ''' 779 | ret = _lib.lzc_destroy_one(name, _ffi.NULL) 780 | errors.lzc_destroy_translate_error(ret, name) 781 | 782 | 783 | # As the extended API is not committed yet, the names of the new interfaces 784 | # are not settled down yet. 785 | # lzc_destroy() might make more sense as we do not have lzc_create_one(). 786 | lzc_destroy = lzc_destroy_one 787 | 788 | 789 | @_uncommitted() 790 | def lzc_inherit(name, prop): 791 | ''' 792 | Inherit properties from a parent dataset of the given ZFS dataset. 793 | 794 | :param bytes name: the name of the dataset. 795 | :param bytes prop: the name of the property to inherit. 796 | :raises NameInvalid: if the dataset name is invalid. 797 | :raises NameTooLong: if the dataset name is too long. 798 | :raises DatasetNotFound: if the dataset does not exist. 799 | :raises PropertyInvalid: if one or more of the specified properties is invalid 800 | or has an invalid type or value. 801 | 802 | Inheriting a property actually resets it to its default value 803 | or removes it if it's a user property, so that the property could be 804 | inherited if it's inheritable. If the property is not inheritable 805 | then it would just have its default value. 806 | 807 | This function can be used on snapshots to inherit user defined properties. 808 | ''' 809 | ret = _lib.lzc_inherit(name, prop, _ffi.NULL) 810 | errors.lzc_inherit_prop_translate_error(ret, name, prop) 811 | 812 | 813 | # As the extended API is not committed yet, the names of the new interfaces 814 | # are not settled down yet. 815 | # lzc_inherit_prop makes it clearer what is to be inherited. 816 | lzc_inherit_prop = lzc_inherit 817 | 818 | 819 | @_uncommitted() 820 | def lzc_set_props(name, prop, val): 821 | ''' 822 | Set properties of the ZFS dataset. 823 | 824 | :param bytes name: the name of the dataset. 825 | :param bytes prop: the name of the property. 826 | :param Any val: the value of the property. 827 | :raises NameInvalid: if the dataset name is invalid. 828 | :raises NameTooLong: if the dataset name is too long. 829 | :raises DatasetNotFound: if the dataset does not exist. 830 | :raises NoSpace: if the property controls a quota and the values is 831 | too small for that quota. 832 | :raises PropertyInvalid: if one or more of the specified properties is invalid 833 | or has an invalid type or value. 834 | 835 | This function can be used on snapshots to set user defined properties. 836 | 837 | .. note:: 838 | An attempt to set a readonly / statistic property is ignored 839 | without reporting any error. 840 | ''' 841 | props = {prop: val} 842 | props_nv = nvlist_in(props) 843 | ret = _lib.lzc_set_props(name, props_nv, _ffi.NULL, _ffi.NULL) 844 | errors.lzc_set_prop_translate_error(ret, name, prop, val) 845 | 846 | 847 | # As the extended API is not committed yet, the names of the new interfaces 848 | # are not settled down yet. 849 | # It's not clear if atomically setting multiple properties is an achievable 850 | # goal and an interface acting on mutiple entities must do so atomically 851 | # by convention. 852 | # Being able to set a single property at a time is sufficient for ClusterHQ. 853 | lzc_set_prop = lzc_set_props 854 | 855 | 856 | @_uncommitted() 857 | def lzc_list(name, options): 858 | ''' 859 | List subordinate elements of the given dataset. 860 | 861 | This function can be used to list child datasets and snapshots 862 | of the given dataset. The listed elements can be filtered by 863 | their type and by their depth relative to the starting dataset. 864 | 865 | :param bytes name: the name of the dataset to be listed, could 866 | be a snapshot or a dataset. 867 | :param options: a `dict` of the options that control the listing 868 | behavior. 869 | :type options: dict of bytes:Any 870 | :return: a pair of file descriptors the first of which can be 871 | used to read the listing. 872 | :rtype: tuple of (int, int) 873 | :raises DatasetNotFound: if the dataset does not exist. 874 | 875 | Two options are currently available: 876 | 877 | recurse : integer or None 878 | specifies depth of the recursive listing. If ``None`` the 879 | depth is not limited. 880 | Absence of this option means that only the given dataset 881 | is listed. 882 | 883 | type : dict of bytes:None 884 | specifies dataset types to include into the listing. 885 | Currently allowed keys are "filesystem", "volume", "snapshot". 886 | Absence of this option implies all types. 887 | 888 | The first of the returned file descriptors can be used to 889 | read the listing in a binary encounded format. The data is 890 | a series of variable sized records each starting with a fixed 891 | size header, the header is followed by a serialized ``nvlist``. 892 | Each record describes a single element and contains the element's 893 | name as well as its properties. 894 | The file descriptor must be closed after reading from it. 895 | 896 | The second file descriptor represents a pipe end to which the 897 | kernel driver is writing information. It should not be closed 898 | until all interesting information has been read and it must 899 | be explicitly closed afterwards. 900 | ''' 901 | (rfd, wfd) = os.pipe() 902 | fcntl.fcntl(rfd, fcntl.F_SETFD, fcntl.FD_CLOEXEC) 903 | fcntl.fcntl(wfd, fcntl.F_SETFD, fcntl.FD_CLOEXEC) 904 | options = options.copy() 905 | options['fd'] = int32_t(wfd) 906 | opts_nv = nvlist_in(options) 907 | ret = _lib.lzc_list(name, opts_nv) 908 | if ret == errno.ESRCH: 909 | return (None, None) 910 | errors.lzc_list_translate_error(ret, name, options) 911 | return (rfd, wfd) 912 | 913 | 914 | # Description of the binary format used to pass data from the kernel. 915 | _PIPE_RECORD_FORMAT = 'IBBBB' 916 | _PIPE_RECORD_SIZE = struct.calcsize(_PIPE_RECORD_FORMAT) 917 | 918 | 919 | def _list(name, recurse=None, types=None): 920 | ''' 921 | A wrapper for :func:`lzc_list` that hides details of working 922 | with the file descriptors and provides data in an easy to 923 | consume format. 924 | 925 | :param bytes name: the name of the dataset to be listed, could 926 | be a snapshot, a volume or a filesystem. 927 | :param recurse: specifies depth of the recursive listing. 928 | If ``None`` the depth is not limited. 929 | :param types: specifies dataset types to include into the listing. 930 | Currently allowed keys are "filesystem", "volume", "snapshot". 931 | ``None`` is equivalent to specifying the type of the dataset 932 | named by `name`. 933 | :type types: list of bytes or None 934 | :type recurse: integer or None 935 | :return: a list of dictionaries each describing a single listed 936 | element. 937 | :rtype: list of dict 938 | ''' 939 | options = {} 940 | 941 | # Convert types to a dict suitable for mapping to an nvlist. 942 | if types is not None: 943 | types = {x: None for x in types} 944 | options['type'] = types 945 | if recurse is None or recurse > 0: 946 | options['recurse'] = recurse 947 | 948 | # Note that other_fd is used by the kernel side to write 949 | # the data, so we have to keep that descriptor open until 950 | # we are done. 951 | # Also, we have to explicitly close the descriptor as the 952 | # kernel doesn't do that. 953 | (fd, other_fd) = lzc_list(name, options) 954 | if fd is None: 955 | return 956 | 957 | try: 958 | while True: 959 | record_bytes = os.read(fd, _PIPE_RECORD_SIZE) 960 | if not record_bytes: 961 | break 962 | (size, _, err, _, _) = struct.unpack( 963 | _PIPE_RECORD_FORMAT, record_bytes) 964 | if err == errno.ESRCH: 965 | break 966 | errors.lzc_list_translate_error(err, name, options) 967 | if size == 0: 968 | break 969 | data_bytes = os.read(fd, size) 970 | result = {} 971 | with nvlist_out(result) as nvp: 972 | ret = _lib.nvlist_unpack(data_bytes, size, nvp, 0) 973 | if ret != 0: 974 | raise exceptions.ZFSGenericError(ret, None, 975 | "Failed to unpack list data") 976 | yield result 977 | finally: 978 | os.close(other_fd) 979 | os.close(fd) 980 | 981 | 982 | @_uncommitted(lzc_list) 983 | def lzc_get_props(name): 984 | ''' 985 | Get properties of the ZFS dataset. 986 | 987 | :param bytes name: the name of the dataset. 988 | :raises DatasetNotFound: if the dataset does not exist. 989 | :raises NameInvalid: if the dataset name is invalid. 990 | :raises NameTooLong: if the dataset name is too long. 991 | :return: a dictionary mapping the property names to their values. 992 | :rtype: dict of bytes:Any 993 | 994 | .. note:: 995 | The value of ``clones`` property is a `list` of clone names 996 | as byte strings. 997 | 998 | .. warning:: 999 | The returned dictionary does not contain entries for properties 1000 | with default values. One exception is the ``mountpoint`` property 1001 | for which the default value is derived from the dataset name. 1002 | ''' 1003 | result = next(_list(name, recurse=0)) 1004 | is_snapshot = result['dmu_objset_stats']['dds_is_snapshot'] 1005 | result = result['properties'] 1006 | # In most cases the source of the property is uninteresting and the 1007 | # value alone is sufficient. One exception is the 'mountpoint' 1008 | # property the final value of which is not the same as the inherited 1009 | # value. 1010 | mountpoint = result.get('mountpoint') 1011 | if mountpoint is not None: 1012 | mountpoint_src = mountpoint['source'] 1013 | mountpoint_val = mountpoint['value'] 1014 | # 'source' is the name of the dataset that has 'mountpoint' set 1015 | # to a non-default value and from which the current dataset inherits 1016 | # the property. 'source' can be the current dataset if its 1017 | # 'mountpoint' is explicitly set. 1018 | # 'source' can also be a special value like '$recvd', that case 1019 | # is equivalent to the property being set on the current dataset. 1020 | # Note that a normal mountpoint value should start with '/' 1021 | # unlike the special values "none" and "legacy". 1022 | if mountpoint_val.startswith('/') and not mountpoint_src.startswith('$'): 1023 | mountpoint_val = mountpoint_val + name[len(mountpoint_src):] 1024 | elif not is_snapshot: 1025 | mountpoint_val = '/' + name 1026 | else: 1027 | mountpoint_val = None 1028 | result = {k: v['value'] for k, v in result.iteritems()} 1029 | if 'clones' in result: 1030 | result['clones'] = result['clones'].keys() 1031 | if mountpoint_val is not None: 1032 | result['mountpoint'] = mountpoint_val 1033 | return result 1034 | 1035 | 1036 | @_uncommitted(lzc_list) 1037 | def lzc_list_children(name): 1038 | ''' 1039 | List the children of the ZFS dataset. 1040 | 1041 | :param bytes name: the name of the dataset. 1042 | :return: an iterator that produces the names of the children. 1043 | :raises NameInvalid: if the dataset name is invalid. 1044 | :raises NameTooLong: if the dataset name is too long. 1045 | :raises DatasetNotFound: if the dataset does not exist. 1046 | 1047 | .. warning:: 1048 | If the dataset does not exist, then the returned iterator would produce 1049 | no results and no error is reported. 1050 | That case is indistinguishable from the dataset having no children. 1051 | 1052 | An attempt to list children of a snapshot is silently ignored as well. 1053 | ''' 1054 | children = [] 1055 | for entry in _list(name, recurse=1, types=['filesystem', 'volume']): 1056 | child = entry['name'] 1057 | if child != name: 1058 | children.append(child) 1059 | 1060 | return iter(children) 1061 | 1062 | 1063 | @_uncommitted(lzc_list) 1064 | def lzc_list_snaps(name): 1065 | ''' 1066 | List the snapshots of the ZFS dataset. 1067 | 1068 | :param bytes name: the name of the dataset. 1069 | :return: an iterator that produces the names of the snapshots. 1070 | :raises NameInvalid: if the dataset name is invalid. 1071 | :raises NameTooLong: if the dataset name is too long. 1072 | :raises DatasetNotFound: if the dataset does not exist. 1073 | 1074 | .. warning:: 1075 | If the dataset does not exist, then the returned iterator would produce 1076 | no results and no error is reported. 1077 | That case is indistinguishable from the dataset having no snapshots. 1078 | 1079 | An attempt to list snapshots of a snapshot is silently ignored as well. 1080 | ''' 1081 | snaps = [] 1082 | for entry in _list(name, recurse=1, types=['snapshot']): 1083 | snap = entry['name'] 1084 | if snap != name: 1085 | snaps.append(snap) 1086 | 1087 | return iter(snaps) 1088 | 1089 | 1090 | # TODO: a better way to init and uninit the library 1091 | def _initialize(): 1092 | class LazyInit(object): 1093 | 1094 | def __init__(self, lib): 1095 | self._lib = lib 1096 | self._inited = False 1097 | self._lock = threading.Lock() 1098 | 1099 | def __getattr__(self, name): 1100 | if not self._inited: 1101 | with self._lock: 1102 | if not self._inited: 1103 | ret = self._lib.libzfs_core_init() 1104 | if ret != 0: 1105 | raise exceptions.ZFSInitializationFailed(ret) 1106 | self._inited = True 1107 | return getattr(self._lib, name) 1108 | 1109 | return LazyInit(libzfs_core.lib) 1110 | 1111 | _ffi = libzfs_core.ffi 1112 | _lib = _initialize() 1113 | 1114 | 1115 | # vim: softtabstop=4 tabstop=4 expandtab shiftwidth=4 1116 | -------------------------------------------------------------------------------- /libzfs_core/_nvlist.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015 ClusterHQ. See LICENSE file for details. 2 | 3 | """ 4 | nvlist_in and nvlist_out provide support for converting between 5 | a dictionary on the Python side and an nvlist_t on the C side 6 | with the automatic memory management for C memory allocations. 7 | 8 | nvlist_in takes a dictionary and produces a CData object corresponding 9 | to a C nvlist_t pointer suitable for passing as an input parameter. 10 | The nvlist_t is populated based on the dictionary. 11 | 12 | nvlist_out takes a dictionary and produces a CData object corresponding 13 | to a C nvlist_t pointer to pointer suitable for passing as an output parameter. 14 | Upon exit from a with-block the dictionary is populated based on the nvlist_t. 15 | 16 | The dictionary must follow a certain format to be convertible 17 | to the nvlist_t. The dictionary produced from the nvlist_t 18 | will follow the same format. 19 | 20 | Format: 21 | - keys are always byte strings 22 | - a value can be None in which case it represents boolean truth by its mere presence 23 | - a value can be a bool 24 | - a value can be a byte string 25 | - a value can be an integer 26 | - a value can be a CFFI CData object representing one of the following C types: 27 | int8_t, uint8_t, int16_t, uint16_t, int32_t, uint32_t, int64_t, uint64_t, boolean_t, uchar_t 28 | - a value can be a dictionary that recursively adheres to this format 29 | - a value can be a list of bools, byte strings, integers or CData objects of types specified above 30 | - a value can be a list of dictionaries that adhere to this format 31 | - all elements of a list value must be of the same type 32 | """ 33 | 34 | import numbers 35 | from collections import namedtuple 36 | from contextlib import contextmanager 37 | from .bindings import libnvpair 38 | from .ctypes import _type_to_suffix 39 | 40 | _ffi = libnvpair.ffi 41 | _lib = libnvpair.lib 42 | 43 | 44 | def nvlist_in(props): 45 | """ 46 | This function converts a python dictionary to a C nvlist_t 47 | and provides automatic memory management for the latter. 48 | 49 | :param dict props: the dictionary to be converted. 50 | :return: an FFI CData object representing the nvlist_t pointer. 51 | :rtype: CData 52 | """ 53 | nvlistp = _ffi.new("nvlist_t **") 54 | res = _lib.nvlist_alloc(nvlistp, 1, 0) # UNIQUE_NAME == 1 55 | if res != 0: 56 | raise MemoryError('nvlist_alloc failed') 57 | nvlist = _ffi.gc(nvlistp[0], _lib.nvlist_free) 58 | _dict_to_nvlist(props, nvlist) 59 | return nvlist 60 | 61 | 62 | @contextmanager 63 | def nvlist_out(props): 64 | """ 65 | A context manager that allocates a pointer to a C nvlist_t and yields 66 | a CData object representing a pointer to the pointer via 'as' target. 67 | The caller can pass that pointer to a pointer to a C function that 68 | creates a new nvlist_t object. 69 | The context manager takes care of memory management for the nvlist_t 70 | and also populates the 'props' dictionary with data from the nvlist_t 71 | upon leaving the 'with' block. 72 | 73 | :param dict props: the dictionary to be populated with data from the nvlist. 74 | :return: an FFI CData object representing the pointer to nvlist_t pointer. 75 | :rtype: CData 76 | """ 77 | nvlistp = _ffi.new("nvlist_t **") 78 | nvlistp[0] = _ffi.NULL # to be sure 79 | try: 80 | yield nvlistp 81 | # clear old entries, if any 82 | props.clear() 83 | _nvlist_to_dict(nvlistp[0], props) 84 | finally: 85 | if nvlistp[0] != _ffi.NULL: 86 | _lib.nvlist_free(nvlistp[0]) 87 | nvlistp[0] = _ffi.NULL 88 | 89 | 90 | _TypeInfo = namedtuple('_TypeInfo', ['suffix', 'ctype', 'is_array', 'convert']) 91 | 92 | 93 | def _type_info(typeid): 94 | return { 95 | _lib.DATA_TYPE_BOOLEAN: _TypeInfo(None, None, None, None), 96 | _lib.DATA_TYPE_BOOLEAN_VALUE: _TypeInfo("boolean_value", "boolean_t *", False, bool), 97 | _lib.DATA_TYPE_BYTE: _TypeInfo("byte", "uchar_t *", False, int), 98 | _lib.DATA_TYPE_INT8: _TypeInfo("int8", "int8_t *", False, int), 99 | _lib.DATA_TYPE_UINT8: _TypeInfo("uint8", "uint8_t *", False, int), 100 | _lib.DATA_TYPE_INT16: _TypeInfo("int16", "int16_t *", False, int), 101 | _lib.DATA_TYPE_UINT16: _TypeInfo("uint16", "uint16_t *", False, int), 102 | _lib.DATA_TYPE_INT32: _TypeInfo("int32", "int32_t *", False, int), 103 | _lib.DATA_TYPE_UINT32: _TypeInfo("uint32", "uint32_t *", False, int), 104 | _lib.DATA_TYPE_INT64: _TypeInfo("int64", "int64_t *", False, int), 105 | _lib.DATA_TYPE_UINT64: _TypeInfo("uint64", "uint64_t *", False, int), 106 | _lib.DATA_TYPE_STRING: _TypeInfo("string", "char **", False, _ffi.string), 107 | _lib.DATA_TYPE_NVLIST: _TypeInfo("nvlist", "nvlist_t **", False, lambda x: _nvlist_to_dict(x, {})), 108 | _lib.DATA_TYPE_BOOLEAN_ARRAY: _TypeInfo("boolean_array", "boolean_t **", True, bool), 109 | # XXX use bytearray ? 110 | _lib.DATA_TYPE_BYTE_ARRAY: _TypeInfo("byte_array", "uchar_t **", True, int), 111 | _lib.DATA_TYPE_INT8_ARRAY: _TypeInfo("int8_array", "int8_t **", True, int), 112 | _lib.DATA_TYPE_UINT8_ARRAY: _TypeInfo("uint8_array", "uint8_t **", True, int), 113 | _lib.DATA_TYPE_INT16_ARRAY: _TypeInfo("int16_array", "int16_t **", True, int), 114 | _lib.DATA_TYPE_UINT16_ARRAY: _TypeInfo("uint16_array", "uint16_t **", True, int), 115 | _lib.DATA_TYPE_INT32_ARRAY: _TypeInfo("int32_array", "int32_t **", True, int), 116 | _lib.DATA_TYPE_UINT32_ARRAY: _TypeInfo("uint32_array", "uint32_t **", True, int), 117 | _lib.DATA_TYPE_INT64_ARRAY: _TypeInfo("int64_array", "int64_t **", True, int), 118 | _lib.DATA_TYPE_UINT64_ARRAY: _TypeInfo("uint64_array", "uint64_t **", True, int), 119 | _lib.DATA_TYPE_STRING_ARRAY: _TypeInfo("string_array", "char ***", True, _ffi.string), 120 | _lib.DATA_TYPE_NVLIST_ARRAY: _TypeInfo("nvlist_array", "nvlist_t ***", True, lambda x: _nvlist_to_dict(x, {})), 121 | }[typeid] 122 | 123 | # only integer properties need to be here 124 | _prop_name_to_type_str = { 125 | "rewind-request": "uint32", 126 | "type": "uint32", 127 | "N_MORE_ERRORS": "int32", 128 | "pool_context": "int32", 129 | } 130 | 131 | 132 | def _nvlist_add_array(nvlist, key, array): 133 | def _is_integer(x): 134 | return isinstance(x, numbers.Integral) and not isinstance(x, bool) 135 | 136 | ret = 0 137 | specimen = array[0] 138 | is_integer = _is_integer(specimen) 139 | specimen_ctype = None 140 | if isinstance(specimen, _ffi.CData): 141 | specimen_ctype = _ffi.typeof(specimen) 142 | 143 | for element in array[1:]: 144 | if is_integer and _is_integer(element): 145 | pass 146 | elif type(element) is not type(specimen): 147 | raise TypeError('Array has elements of different types: ' + 148 | type(specimen).__name__ + 149 | ' and ' + 150 | type(element).__name__) 151 | elif specimen_ctype is not None: 152 | ctype = _ffi.typeof(element) 153 | if ctype is not specimen_ctype: 154 | raise TypeError('Array has elements of different C types: ' + 155 | _ffi.typeof(specimen).cname + 156 | ' and ' + 157 | _ffi.typeof(element).cname) 158 | 159 | if isinstance(specimen, dict): 160 | # NB: can't use automatic memory management via nvlist_in() here, 161 | # we have a loop, but 'with' would require recursion 162 | c_array = [] 163 | for dictionary in array: 164 | nvlistp = _ffi.new('nvlist_t **') 165 | res = _lib.nvlist_alloc(nvlistp, 1, 0) # UNIQUE_NAME == 1 166 | if res != 0: 167 | raise MemoryError('nvlist_alloc failed') 168 | nested_nvlist = _ffi.gc(nvlistp[0], _lib.nvlist_free) 169 | _dict_to_nvlist(dictionary, nested_nvlist) 170 | c_array.append(nested_nvlist) 171 | ret = _lib.nvlist_add_nvlist_array(nvlist, key, c_array, len(c_array)) 172 | elif isinstance(specimen, bytes): 173 | c_array = [] 174 | for string in array: 175 | c_array.append(_ffi.new('char[]', string)) 176 | ret = _lib.nvlist_add_string_array(nvlist, key, c_array, len(c_array)) 177 | elif isinstance(specimen, bool): 178 | ret = _lib.nvlist_add_boolean_array(nvlist, key, array, len(array)) 179 | elif isinstance(specimen, numbers.Integral): 180 | suffix = _prop_name_to_type_str.get(key, "uint64") 181 | cfunc = getattr(_lib, "nvlist_add_%s_array" % (suffix,)) 182 | ret = cfunc(nvlist, key, array, len(array)) 183 | elif isinstance(specimen, _ffi.CData) and _ffi.typeof(specimen) in _type_to_suffix: 184 | suffix = _type_to_suffix[_ffi.typeof(specimen)][True] 185 | cfunc = getattr(_lib, "nvlist_add_%s_array" % (suffix,)) 186 | ret = cfunc(nvlist, key, array, len(array)) 187 | else: 188 | raise TypeError('Unsupported value type ' + type(specimen).__name__) 189 | if ret != 0: 190 | raise MemoryError('nvlist_add failed, err = %d' % ret) 191 | 192 | 193 | def _nvlist_to_dict(nvlist, props): 194 | pair = _lib.nvlist_next_nvpair(nvlist, _ffi.NULL) 195 | while pair != _ffi.NULL: 196 | name = _ffi.string(_lib.nvpair_name(pair)) 197 | typeid = int(_lib.nvpair_type(pair)) 198 | typeinfo = _type_info(typeid) 199 | # XXX nvpair_type_is_array() is broken for DATA_TYPE_INT8_ARRAY at the moment 200 | # see https://www.illumos.org/issues/5778 201 | # is_array = bool(_lib.nvpair_type_is_array(pair)) 202 | is_array = typeinfo.is_array 203 | cfunc = getattr(_lib, "nvpair_value_%s" % (typeinfo.suffix,), None) 204 | val = None 205 | ret = 0 206 | if is_array: 207 | valptr = _ffi.new(typeinfo.ctype) 208 | lenptr = _ffi.new("uint_t *") 209 | ret = cfunc(pair, valptr, lenptr) 210 | if ret != 0: 211 | raise RuntimeError('nvpair_value failed') 212 | length = int(lenptr[0]) 213 | val = [] 214 | for i in range(length): 215 | val.append(typeinfo.convert(valptr[0][i])) 216 | else: 217 | if typeid == _lib.DATA_TYPE_BOOLEAN: 218 | val = None # XXX or should it be True ? 219 | else: 220 | valptr = _ffi.new(typeinfo.ctype) 221 | ret = cfunc(pair, valptr) 222 | if ret != 0: 223 | raise RuntimeError('nvpair_value failed') 224 | val = typeinfo.convert(valptr[0]) 225 | props[name] = val 226 | pair = _lib.nvlist_next_nvpair(nvlist, pair) 227 | return props 228 | 229 | 230 | def _dict_to_nvlist(props, nvlist): 231 | for k, v in props.items(): 232 | if not isinstance(k, bytes): 233 | raise TypeError('Unsupported key type ' + type(k).__name__) 234 | ret = 0 235 | if isinstance(v, dict): 236 | ret = _lib.nvlist_add_nvlist(nvlist, k, nvlist_in(v)) 237 | elif isinstance(v, list): 238 | _nvlist_add_array(nvlist, k, v) 239 | elif isinstance(v, bytes): 240 | ret = _lib.nvlist_add_string(nvlist, k, v) 241 | elif isinstance(v, bool): 242 | ret = _lib.nvlist_add_boolean_value(nvlist, k, v) 243 | elif v is None: 244 | ret = _lib.nvlist_add_boolean(nvlist, k) 245 | elif isinstance(v, numbers.Integral): 246 | suffix = _prop_name_to_type_str.get(k, "uint64") 247 | cfunc = getattr(_lib, "nvlist_add_%s" % (suffix,)) 248 | ret = cfunc(nvlist, k, v) 249 | elif isinstance(v, _ffi.CData) and _ffi.typeof(v) in _type_to_suffix: 250 | suffix = _type_to_suffix[_ffi.typeof(v)][False] 251 | cfunc = getattr(_lib, "nvlist_add_%s" % (suffix,)) 252 | ret = cfunc(nvlist, k, v) 253 | else: 254 | raise TypeError('Unsupported value type ' + type(v).__name__) 255 | if ret != 0: 256 | raise MemoryError('nvlist_add failed') 257 | 258 | 259 | # vim: softtabstop=4 tabstop=4 expandtab shiftwidth=4 260 | -------------------------------------------------------------------------------- /libzfs_core/bindings/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015 ClusterHQ. See LICENSE file for details. 2 | 3 | """ 4 | The package that contains a module per each C library that 5 | `libzfs_core` uses. The modules expose CFFI objects required 6 | to make calls to functions in the libraries. 7 | """ 8 | 9 | import threading 10 | import importlib 11 | 12 | from cffi import FFI 13 | 14 | 15 | def _setup_cffi(): 16 | class LazyLibrary(object): 17 | 18 | def __init__(self, ffi, libname): 19 | self._ffi = ffi 20 | self._libname = libname 21 | self._lib = None 22 | self._lock = threading.Lock() 23 | 24 | def __getattr__(self, name): 25 | if self._lib is None: 26 | with self._lock: 27 | if self._lib is None: 28 | self._lib = self._ffi.dlopen(self._libname) 29 | 30 | return getattr(self._lib, name) 31 | 32 | MODULES = ["libnvpair", "libzfs_core"] 33 | ffi = FFI() 34 | 35 | for module_name in MODULES: 36 | module = importlib.import_module("." + module_name, __package__) 37 | ffi.cdef(module.CDEF) 38 | lib = LazyLibrary(ffi, module.LIBRARY) 39 | setattr(module, "ffi", ffi) 40 | setattr(module, "lib", lib) 41 | 42 | 43 | _setup_cffi() 44 | 45 | # vim: softtabstop=4 tabstop=4 expandtab shiftwidth=4 46 | -------------------------------------------------------------------------------- /libzfs_core/bindings/libnvpair.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015 ClusterHQ. See LICENSE file for details. 2 | 3 | """ 4 | Python bindings for ``libnvpair``. 5 | """ 6 | 7 | CDEF = """ 8 | typedef ... nvlist_t; 9 | typedef ... nvpair_t; 10 | 11 | 12 | typedef enum { 13 | DATA_TYPE_UNKNOWN = 0, 14 | DATA_TYPE_BOOLEAN, 15 | DATA_TYPE_BYTE, 16 | DATA_TYPE_INT16, 17 | DATA_TYPE_UINT16, 18 | DATA_TYPE_INT32, 19 | DATA_TYPE_UINT32, 20 | DATA_TYPE_INT64, 21 | DATA_TYPE_UINT64, 22 | DATA_TYPE_STRING, 23 | DATA_TYPE_BYTE_ARRAY, 24 | DATA_TYPE_INT16_ARRAY, 25 | DATA_TYPE_UINT16_ARRAY, 26 | DATA_TYPE_INT32_ARRAY, 27 | DATA_TYPE_UINT32_ARRAY, 28 | DATA_TYPE_INT64_ARRAY, 29 | DATA_TYPE_UINT64_ARRAY, 30 | DATA_TYPE_STRING_ARRAY, 31 | DATA_TYPE_HRTIME, 32 | DATA_TYPE_NVLIST, 33 | DATA_TYPE_NVLIST_ARRAY, 34 | DATA_TYPE_BOOLEAN_VALUE, 35 | DATA_TYPE_INT8, 36 | DATA_TYPE_UINT8, 37 | DATA_TYPE_BOOLEAN_ARRAY, 38 | DATA_TYPE_INT8_ARRAY, 39 | DATA_TYPE_UINT8_ARRAY 40 | } data_type_t; 41 | typedef enum { B_FALSE, B_TRUE } boolean_t; 42 | 43 | typedef unsigned char uchar_t; 44 | typedef unsigned int uint_t; 45 | 46 | int nvlist_alloc(nvlist_t **, uint_t, int); 47 | void nvlist_free(nvlist_t *); 48 | 49 | int nvlist_unpack(char *, size_t, nvlist_t **, int); 50 | 51 | void dump_nvlist(nvlist_t *, int); 52 | int nvlist_dup(nvlist_t *, nvlist_t **, int); 53 | 54 | int nvlist_add_boolean(nvlist_t *, const char *); 55 | int nvlist_add_boolean_value(nvlist_t *, const char *, boolean_t); 56 | int nvlist_add_byte(nvlist_t *, const char *, uchar_t); 57 | int nvlist_add_int8(nvlist_t *, const char *, int8_t); 58 | int nvlist_add_uint8(nvlist_t *, const char *, uint8_t); 59 | int nvlist_add_int16(nvlist_t *, const char *, int16_t); 60 | int nvlist_add_uint16(nvlist_t *, const char *, uint16_t); 61 | int nvlist_add_int32(nvlist_t *, const char *, int32_t); 62 | int nvlist_add_uint32(nvlist_t *, const char *, uint32_t); 63 | int nvlist_add_int64(nvlist_t *, const char *, int64_t); 64 | int nvlist_add_uint64(nvlist_t *, const char *, uint64_t); 65 | int nvlist_add_string(nvlist_t *, const char *, const char *); 66 | int nvlist_add_nvlist(nvlist_t *, const char *, nvlist_t *); 67 | int nvlist_add_boolean_array(nvlist_t *, const char *, boolean_t *, uint_t); 68 | int nvlist_add_byte_array(nvlist_t *, const char *, uchar_t *, uint_t); 69 | int nvlist_add_int8_array(nvlist_t *, const char *, int8_t *, uint_t); 70 | int nvlist_add_uint8_array(nvlist_t *, const char *, uint8_t *, uint_t); 71 | int nvlist_add_int16_array(nvlist_t *, const char *, int16_t *, uint_t); 72 | int nvlist_add_uint16_array(nvlist_t *, const char *, uint16_t *, uint_t); 73 | int nvlist_add_int32_array(nvlist_t *, const char *, int32_t *, uint_t); 74 | int nvlist_add_uint32_array(nvlist_t *, const char *, uint32_t *, uint_t); 75 | int nvlist_add_int64_array(nvlist_t *, const char *, int64_t *, uint_t); 76 | int nvlist_add_uint64_array(nvlist_t *, const char *, uint64_t *, uint_t); 77 | int nvlist_add_string_array(nvlist_t *, const char *, char *const *, uint_t); 78 | int nvlist_add_nvlist_array(nvlist_t *, const char *, nvlist_t **, uint_t); 79 | 80 | nvpair_t *nvlist_next_nvpair(nvlist_t *, nvpair_t *); 81 | nvpair_t *nvlist_prev_nvpair(nvlist_t *, nvpair_t *); 82 | char *nvpair_name(nvpair_t *); 83 | data_type_t nvpair_type(nvpair_t *); 84 | int nvpair_type_is_array(nvpair_t *); 85 | int nvpair_value_boolean_value(nvpair_t *, boolean_t *); 86 | int nvpair_value_byte(nvpair_t *, uchar_t *); 87 | int nvpair_value_int8(nvpair_t *, int8_t *); 88 | int nvpair_value_uint8(nvpair_t *, uint8_t *); 89 | int nvpair_value_int16(nvpair_t *, int16_t *); 90 | int nvpair_value_uint16(nvpair_t *, uint16_t *); 91 | int nvpair_value_int32(nvpair_t *, int32_t *); 92 | int nvpair_value_uint32(nvpair_t *, uint32_t *); 93 | int nvpair_value_int64(nvpair_t *, int64_t *); 94 | int nvpair_value_uint64(nvpair_t *, uint64_t *); 95 | int nvpair_value_string(nvpair_t *, char **); 96 | int nvpair_value_nvlist(nvpair_t *, nvlist_t **); 97 | int nvpair_value_boolean_array(nvpair_t *, boolean_t **, uint_t *); 98 | int nvpair_value_byte_array(nvpair_t *, uchar_t **, uint_t *); 99 | int nvpair_value_int8_array(nvpair_t *, int8_t **, uint_t *); 100 | int nvpair_value_uint8_array(nvpair_t *, uint8_t **, uint_t *); 101 | int nvpair_value_int16_array(nvpair_t *, int16_t **, uint_t *); 102 | int nvpair_value_uint16_array(nvpair_t *, uint16_t **, uint_t *); 103 | int nvpair_value_int32_array(nvpair_t *, int32_t **, uint_t *); 104 | int nvpair_value_uint32_array(nvpair_t *, uint32_t **, uint_t *); 105 | int nvpair_value_int64_array(nvpair_t *, int64_t **, uint_t *); 106 | int nvpair_value_uint64_array(nvpair_t *, uint64_t **, uint_t *); 107 | int nvpair_value_string_array(nvpair_t *, char ***, uint_t *); 108 | int nvpair_value_nvlist_array(nvpair_t *, nvlist_t ***, uint_t *); 109 | """ 110 | 111 | SOURCE = """ 112 | #include 113 | """ 114 | 115 | LIBRARY = "nvpair" 116 | 117 | # vim: softtabstop=4 tabstop=4 expandtab shiftwidth=4 118 | -------------------------------------------------------------------------------- /libzfs_core/bindings/libzfs_core.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015 ClusterHQ. See LICENSE file for details. 2 | 3 | """ 4 | Python bindings for ``libzfs_core``. 5 | """ 6 | 7 | CDEF = """ 8 | enum lzc_send_flags { 9 | LZC_SEND_FLAG_EMBED_DATA = 1, 10 | LZC_SEND_FLAG_LARGE_BLOCK = 2 11 | }; 12 | 13 | typedef enum { 14 | DMU_OST_NONE, 15 | DMU_OST_META, 16 | DMU_OST_ZFS, 17 | DMU_OST_ZVOL, 18 | DMU_OST_OTHER, 19 | DMU_OST_ANY, 20 | DMU_OST_NUMTYPES 21 | } dmu_objset_type_t; 22 | 23 | int libzfs_core_init(void); 24 | void libzfs_core_fini(void); 25 | 26 | int lzc_snapshot(nvlist_t *, nvlist_t *, nvlist_t **); 27 | int lzc_create(const char *, dmu_objset_type_t, nvlist_t *); 28 | int lzc_clone(const char *, const char *, nvlist_t *); 29 | int lzc_destroy_snaps(nvlist_t *, boolean_t, nvlist_t **); 30 | int lzc_bookmark(nvlist_t *, nvlist_t **); 31 | int lzc_get_bookmarks(const char *, nvlist_t *, nvlist_t **); 32 | int lzc_destroy_bookmarks(nvlist_t *, nvlist_t **); 33 | 34 | int lzc_snaprange_space(const char *, const char *, uint64_t *); 35 | 36 | int lzc_hold(nvlist_t *, int, nvlist_t **); 37 | int lzc_release(nvlist_t *, nvlist_t **); 38 | int lzc_get_holds(const char *, nvlist_t **); 39 | 40 | int lzc_send(const char *, const char *, int, enum lzc_send_flags); 41 | int lzc_receive(const char *, nvlist_t *, const char *, boolean_t, int); 42 | int lzc_send_space(const char *, const char *, uint64_t *); 43 | 44 | boolean_t lzc_exists(const char *); 45 | 46 | int lzc_rollback(const char *, char *, int); 47 | 48 | int lzc_promote(const char *, nvlist_t *, nvlist_t **); 49 | int lzc_rename(const char *, const char *, nvlist_t *, char **); 50 | int lzc_destroy_one(const char *fsname, nvlist_t *); 51 | int lzc_inherit(const char *fsname, const char *name, nvlist_t *); 52 | int lzc_set_props(const char *, nvlist_t *, nvlist_t *, nvlist_t *); 53 | int lzc_list (const char *, nvlist_t *); 54 | """ 55 | 56 | SOURCE = """ 57 | #include 58 | """ 59 | 60 | LIBRARY = "zfs_core" 61 | 62 | # vim: softtabstop=4 tabstop=4 expandtab shiftwidth=4 63 | -------------------------------------------------------------------------------- /libzfs_core/ctypes.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015 ClusterHQ. See LICENSE file for details. 2 | 3 | """ 4 | Utility functions for casting to a specific C type. 5 | """ 6 | 7 | from .bindings.libnvpair import ffi as _ffi 8 | 9 | 10 | def _ffi_cast(type_name): 11 | type_info = _ffi.typeof(type_name) 12 | 13 | def _func(value): 14 | # this is for overflow / underflow checking only 15 | if type_info.kind == 'enum': 16 | try: 17 | type_info.elements[value] 18 | except KeyError as e: 19 | raise OverflowError('Invalid enum <%s> value %s' % 20 | (type_info.cname, e.message)) 21 | else: 22 | _ffi.new(type_name + '*', value) 23 | return _ffi.cast(type_name, value) 24 | _func.__name__ = type_name 25 | return _func 26 | 27 | 28 | uint8_t = _ffi_cast('uint8_t') 29 | int8_t = _ffi_cast('int8_t') 30 | uint16_t = _ffi_cast('uint16_t') 31 | int16_t = _ffi_cast('int16_t') 32 | uint32_t = _ffi_cast('uint32_t') 33 | int32_t = _ffi_cast('int32_t') 34 | uint64_t = _ffi_cast('uint64_t') 35 | int64_t = _ffi_cast('int64_t') 36 | boolean_t = _ffi_cast('boolean_t') 37 | uchar_t = _ffi_cast('uchar_t') 38 | 39 | 40 | # First element of the value tuple is a suffix for a single value function 41 | # while the second element is for an array function 42 | _type_to_suffix = { 43 | _ffi.typeof('uint8_t'): ('uint8', 'uint8'), 44 | _ffi.typeof('int8_t'): ('int8', 'int8'), 45 | _ffi.typeof('uint16_t'): ('uint16', 'uint16'), 46 | _ffi.typeof('int16_t'): ('int16', 'int16'), 47 | _ffi.typeof('uint32_t'): ('uint32', 'uint32'), 48 | _ffi.typeof('int32_t'): ('int32', 'int32'), 49 | _ffi.typeof('uint64_t'): ('uint64', 'uint64'), 50 | _ffi.typeof('int64_t'): ('int64', 'int64'), 51 | _ffi.typeof('boolean_t'): ('boolean_value', 'boolean'), 52 | _ffi.typeof('uchar_t'): ('byte', 'byte'), 53 | } 54 | 55 | 56 | # vim: softtabstop=4 tabstop=4 expandtab shiftwidth=4 57 | -------------------------------------------------------------------------------- /libzfs_core/exceptions.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015 ClusterHQ. See LICENSE file for details. 2 | 3 | """ 4 | Exceptions that can be raised by libzfs_core operations. 5 | """ 6 | 7 | import errno 8 | 9 | 10 | class ZFSError(Exception): 11 | errno = None 12 | message = None 13 | name = None 14 | 15 | def __str__(self): 16 | if self.name is not None: 17 | return "[Errno %d] %s: '%s'" % (self.errno, self.message, self.name) 18 | else: 19 | return "[Errno %d] %s" % (self.errno, self.message) 20 | 21 | def __repr__(self): 22 | return "%s(%r, %r)" % (self.__class__.__name__, self.errno, self.message) 23 | 24 | 25 | class ZFSGenericError(ZFSError): 26 | 27 | def __init__(self, errno, name, message): 28 | self.errno = errno 29 | self.message = message 30 | self.name = name 31 | 32 | 33 | class ZFSInitializationFailed(ZFSError): 34 | message = "Failed to initialize libzfs_core" 35 | 36 | def __init__(self, errno): 37 | self.errno = errno 38 | 39 | 40 | class MultipleOperationsFailure(ZFSError): 41 | 42 | def __init__(self, errors, suppressed_count): 43 | # Use first of the individual error codes 44 | # as an overall error code. This is more consistent. 45 | self.errno = errors[0].errno 46 | self.errors = errors 47 | #: this many errors were encountered but not placed on the `errors` list 48 | self.suppressed_count = suppressed_count 49 | 50 | def __str__(self): 51 | return "%s, %d errors included, %d suppressed" % (ZFSError.__str__(self), 52 | len(self.errors), self.suppressed_count) 53 | 54 | def __repr__(self): 55 | return "%s(%r, %r, errors=%r, supressed=%r)" % (self.__class__.__name__, 56 | self.errno, self.message, self.errors, self.suppressed_count) 57 | 58 | 59 | class DatasetNotFound(ZFSError): 60 | 61 | """ 62 | This exception is raised when an operation failure can be caused by a missing 63 | snapshot or a missing filesystem and it is impossible to distinguish between 64 | the causes. 65 | """ 66 | errno = errno.ENOENT 67 | message = "Dataset not found" 68 | 69 | def __init__(self, name): 70 | self.name = name 71 | 72 | 73 | class DatasetExists(ZFSError): 74 | 75 | """ 76 | This exception is raised when an operation failure can be caused by an existing 77 | snapshot or filesystem and it is impossible to distinguish between 78 | the causes. 79 | """ 80 | errno = errno.EEXIST 81 | message = "Dataset already exists" 82 | 83 | def __init__(self, name): 84 | self.name = name 85 | 86 | 87 | class NotClone(ZFSError): 88 | errno = errno.EINVAL 89 | message = "Filesystem is not a clone, can not promote" 90 | 91 | def __init__(self, name): 92 | self.name = name 93 | 94 | 95 | class FilesystemExists(DatasetExists): 96 | message = "Filesystem already exists" 97 | 98 | def __init__(self, name): 99 | self.name = name 100 | 101 | 102 | class FilesystemNotFound(DatasetNotFound): 103 | message = "Filesystem not found" 104 | 105 | def __init__(self, name): 106 | self.name = name 107 | 108 | 109 | class ParentNotFound(ZFSError): 110 | errno = errno.ENOENT 111 | message = "Parent not found" 112 | 113 | def __init__(self, name): 114 | self.name = name 115 | 116 | 117 | class WrongParent(ZFSError): 118 | errno = errno.EINVAL 119 | message = "Parent dataset is not a filesystem" 120 | 121 | def __init__(self, name): 122 | self.name = name 123 | 124 | 125 | class SnapshotExists(DatasetExists): 126 | message = "Snapshot already exists" 127 | 128 | def __init__(self, name): 129 | self.name = name 130 | 131 | 132 | class SnapshotNotFound(DatasetNotFound): 133 | message = "Snapshot not found" 134 | 135 | def __init__(self, name): 136 | self.name = name 137 | 138 | 139 | class SnapshotIsCloned(ZFSError): 140 | errno = errno.EEXIST 141 | message = "Snapshot is cloned" 142 | 143 | def __init__(self, name): 144 | self.name = name 145 | 146 | 147 | class SnapshotIsHeld(ZFSError): 148 | errno = errno.EBUSY 149 | message = "Snapshot is held" 150 | 151 | def __init__(self, name): 152 | self.name = name 153 | 154 | 155 | class DuplicateSnapshots(ZFSError): 156 | errno = errno.EXDEV 157 | message = "Requested multiple snapshots of the same filesystem" 158 | 159 | def __init__(self, name): 160 | self.name = name 161 | 162 | 163 | class SnapshotFailure(MultipleOperationsFailure): 164 | message = "Creation of snapshot(s) failed for one or more reasons" 165 | 166 | def __init__(self, errors, suppressed_count): 167 | super(SnapshotFailure, self).__init__(errors, suppressed_count) 168 | 169 | 170 | class SnapshotDestructionFailure(MultipleOperationsFailure): 171 | message = "Destruction of snapshot(s) failed for one or more reasons" 172 | 173 | def __init__(self, errors, suppressed_count): 174 | super(SnapshotDestructionFailure, self).__init__(errors, suppressed_count) 175 | 176 | 177 | class BookmarkExists(ZFSError): 178 | errno = errno.EEXIST 179 | message = "Bookmark already exists" 180 | 181 | def __init__(self, name): 182 | self.name = name 183 | 184 | 185 | class BookmarkNotFound(ZFSError): 186 | errno = errno.ENOENT 187 | message = "Bookmark not found" 188 | 189 | def __init__(self, name): 190 | self.name = name 191 | 192 | 193 | class BookmarkMismatch(ZFSError): 194 | errno = errno.EINVAL 195 | message = "Bookmark is not in snapshot's filesystem" 196 | 197 | def __init__(self, name): 198 | self.name = name 199 | 200 | 201 | class BookmarkNotSupported(ZFSError): 202 | errno = errno.ENOTSUP 203 | message = "Bookmark feature is not supported" 204 | 205 | def __init__(self, name): 206 | self.name = name 207 | 208 | 209 | class BookmarkFailure(MultipleOperationsFailure): 210 | message = "Creation of bookmark(s) failed for one or more reasons" 211 | 212 | def __init__(self, errors, suppressed_count): 213 | super(BookmarkFailure, self).__init__(errors, suppressed_count) 214 | 215 | 216 | class BookmarkDestructionFailure(MultipleOperationsFailure): 217 | message = "Destruction of bookmark(s) failed for one or more reasons" 218 | 219 | def __init__(self, errors, suppressed_count): 220 | super(BookmarkDestructionFailure, self).__init__(errors, suppressed_count) 221 | 222 | 223 | class BadHoldCleanupFD(ZFSError): 224 | errno = errno.EBADF 225 | message = "Bad file descriptor as cleanup file descriptor" 226 | 227 | 228 | class HoldExists(ZFSError): 229 | errno = errno.EEXIST 230 | message = "Hold with a given tag already exists on snapshot" 231 | 232 | def __init__(self, name): 233 | self.name = name 234 | 235 | 236 | class HoldNotFound(ZFSError): 237 | errno = errno.ENOENT 238 | message = "Hold with a given tag does not exist on snapshot" 239 | 240 | def __init__(self, name): 241 | self.name = name 242 | 243 | 244 | class HoldFailure(MultipleOperationsFailure): 245 | message = "Placement of hold(s) failed for one or more reasons" 246 | 247 | def __init__(self, errors, suppressed_count): 248 | super(HoldFailure, self).__init__(errors, suppressed_count) 249 | 250 | 251 | class HoldReleaseFailure(MultipleOperationsFailure): 252 | message = "Release of hold(s) failed for one or more reasons" 253 | 254 | def __init__(self, errors, suppressed_count): 255 | super(HoldReleaseFailure, self).__init__(errors, suppressed_count) 256 | 257 | 258 | class SnapshotMismatch(ZFSError): 259 | errno = errno.ENODEV 260 | message = "Snapshot is not descendant of source snapshot" 261 | 262 | def __init__(self, name): 263 | self.name = name 264 | 265 | 266 | class StreamMismatch(ZFSError): 267 | errno = errno.ENODEV 268 | message = "Stream is not applicable to destination dataset" 269 | 270 | def __init__(self, name): 271 | self.name = name 272 | 273 | 274 | class DestinationModified(ZFSError): 275 | errno = errno.ETXTBSY 276 | message = "Destination dataset has modifications that can not be undone" 277 | 278 | def __init__(self, name): 279 | self.name = name 280 | 281 | 282 | class BadStream(ZFSError): 283 | errno = errno.EINVAL 284 | message = "Bad backup stream" 285 | 286 | 287 | class StreamFeatureNotSupported(ZFSError): 288 | errno = errno.ENOTSUP 289 | message = "Stream contains unsupported feature" 290 | 291 | 292 | class UnknownStreamFeature(ZFSError): 293 | errno = errno.ENOTSUP 294 | message = "Unknown feature requested for stream" 295 | 296 | 297 | class StreamIOError(ZFSError): 298 | message = "I/O error while writing or reading stream" 299 | 300 | def __init__(self, errno): 301 | self.errno = errno 302 | 303 | 304 | class ZIOError(ZFSError): 305 | errno = errno.EIO 306 | message = "I/O error" 307 | 308 | def __init__(self, name): 309 | self.name = name 310 | 311 | 312 | class NoSpace(ZFSError): 313 | errno = errno.ENOSPC 314 | message = "No space left" 315 | 316 | def __init__(self, name): 317 | self.name = name 318 | 319 | 320 | class QuotaExceeded(ZFSError): 321 | errno = errno.EDQUOT 322 | message = "Quouta exceeded" 323 | 324 | def __init__(self, name): 325 | self.name = name 326 | 327 | 328 | class DatasetBusy(ZFSError): 329 | errno = errno.EBUSY 330 | message = "Dataset is busy" 331 | 332 | def __init__(self, name): 333 | self.name = name 334 | 335 | 336 | class NameTooLong(ZFSError): 337 | errno = errno.ENAMETOOLONG 338 | message = "Dataset name is too long" 339 | 340 | def __init__(self, name): 341 | self.name = name 342 | 343 | 344 | class NameInvalid(ZFSError): 345 | errno = errno.EINVAL 346 | message = "Invalid name" 347 | 348 | def __init__(self, name): 349 | self.name = name 350 | 351 | 352 | class SnapshotNameInvalid(NameInvalid): 353 | message = "Invalid name for snapshot" 354 | 355 | def __init__(self, name): 356 | self.name = name 357 | 358 | 359 | class FilesystemNameInvalid(NameInvalid): 360 | message = "Invalid name for filesystem or volume" 361 | 362 | def __init__(self, name): 363 | self.name = name 364 | 365 | 366 | class BookmarkNameInvalid(NameInvalid): 367 | message = "Invalid name for bookmark" 368 | 369 | def __init__(self, name): 370 | self.name = name 371 | 372 | 373 | class ReadOnlyPool(ZFSError): 374 | errno = errno.EROFS 375 | message = "Pool is read-only" 376 | 377 | def __init__(self, name): 378 | self.name = name 379 | 380 | 381 | class SuspendedPool(ZFSError): 382 | errno = errno.EAGAIN 383 | message = "Pool is suspended" 384 | 385 | def __init__(self, name): 386 | self.name = name 387 | 388 | 389 | class PoolNotFound(ZFSError): 390 | errno = errno.EXDEV 391 | message = "No such pool" 392 | 393 | def __init__(self, name): 394 | self.name = name 395 | 396 | 397 | class PoolsDiffer(ZFSError): 398 | errno = errno.EXDEV 399 | message = "Source and target belong to different pools" 400 | 401 | def __init__(self, name): 402 | self.name = name 403 | 404 | 405 | class FeatureNotSupported(ZFSError): 406 | errno = errno.ENOTSUP 407 | message = "Feature is not supported in this version" 408 | 409 | def __init__(self, name): 410 | self.name = name 411 | 412 | 413 | class PropertyNotSupported(ZFSError): 414 | errno = errno.ENOTSUP 415 | message = "Property is not supported in this version" 416 | 417 | def __init__(self, name): 418 | self.name = name 419 | 420 | 421 | class PropertyInvalid(ZFSError): 422 | errno = errno.EINVAL 423 | message = "Invalid property or property value" 424 | 425 | def __init__(self, name): 426 | self.name = name 427 | 428 | 429 | class DatasetTypeInvalid(ZFSError): 430 | errno = errno.EINVAL 431 | message = "Specified dataset type is unknown" 432 | 433 | def __init__(self, name): 434 | self.name = name 435 | 436 | 437 | # vim: softtabstop=4 tabstop=4 expandtab shiftwidth=4 438 | -------------------------------------------------------------------------------- /libzfs_core/test/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ClusterHQ/pyzfs/9c77b036d667ea6d976ceb095fe88495c5873f5b/libzfs_core/test/__init__.py -------------------------------------------------------------------------------- /libzfs_core/test/test_nvlist.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015 ClusterHQ. See LICENSE file for details. 2 | 3 | """ 4 | Tests for _nvlist module. 5 | The tests convert from a `dict` to C ``nvlist_t`` and back to a `dict` 6 | and verify that no information is lost and value types are correct. 7 | The tests also check that various error conditions like unsupported 8 | value types or out of bounds values are detected. 9 | """ 10 | 11 | import unittest 12 | 13 | from .._nvlist import nvlist_in, nvlist_out, _lib 14 | from ..ctypes import ( 15 | uint8_t, int8_t, uint16_t, int16_t, uint32_t, int32_t, 16 | uint64_t, int64_t, boolean_t, uchar_t 17 | ) 18 | 19 | 20 | class TestNVList(unittest.TestCase): 21 | 22 | def _dict_to_nvlist_to_dict(self, props): 23 | res = {} 24 | nv_in = nvlist_in(props) 25 | with nvlist_out(res) as nv_out: 26 | _lib.nvlist_dup(nv_in, nv_out, 0) 27 | return res 28 | 29 | def _assertIntDictsEqual(self, dict1, dict2): 30 | self.assertEqual(len(dict1), len(dict1), "resulting dictionary is of different size") 31 | for key in dict1.keys(): 32 | self.assertEqual(int(dict1[key]), int(dict2[key])) 33 | 34 | def _assertIntArrayDictsEqual(self, dict1, dict2): 35 | self.assertEqual(len(dict1), len(dict1), "resulting dictionary is of different size") 36 | for key in dict1.keys(): 37 | val1 = dict1[key] 38 | val2 = dict2[key] 39 | self.assertEqual(len(val1), len(val2), "array values of different sizes") 40 | for x, y in zip(val1, val2): 41 | self.assertEqual(int(x), int(y)) 42 | 43 | def test_empty(self): 44 | res = self._dict_to_nvlist_to_dict({}) 45 | self.assertEqual(len(res), 0, "expected empty dict") 46 | 47 | def test_invalid_key_type(self): 48 | with self.assertRaises(TypeError): 49 | self._dict_to_nvlist_to_dict({1: None}) 50 | 51 | def test_invalid_val_type__tuple(self): 52 | with self.assertRaises(TypeError): 53 | self._dict_to_nvlist_to_dict({"key": (1, 2)}) 54 | 55 | def test_invalid_val_type__set(self): 56 | with self.assertRaises(TypeError): 57 | self._dict_to_nvlist_to_dict({"key": set(1, 2)}) 58 | 59 | def test_invalid_array_val_type(self): 60 | with self.assertRaises(TypeError): 61 | self._dict_to_nvlist_to_dict({"key": [(1, 2), (3, 4)]}) 62 | 63 | def test_invalid_array_of_arrays_val_type(self): 64 | with self.assertRaises(TypeError): 65 | self._dict_to_nvlist_to_dict({"key": [[1, 2], [3, 4]]}) 66 | 67 | def test_string_value(self): 68 | props = {"key": "value"} 69 | res = self._dict_to_nvlist_to_dict(props) 70 | self.assertEqual(props, res) 71 | 72 | def test_implicit_boolean_value(self): 73 | props = {"key": None} 74 | res = self._dict_to_nvlist_to_dict(props) 75 | self.assertEqual(props, res) 76 | 77 | def test_boolean_values(self): 78 | props = {"key1": True, "key2": False} 79 | res = self._dict_to_nvlist_to_dict(props) 80 | self.assertEqual(props, res) 81 | 82 | def test_explicit_boolean_true_value(self): 83 | props = {"key": boolean_t(1)} 84 | res = self._dict_to_nvlist_to_dict(props) 85 | self._assertIntDictsEqual(props, res) 86 | 87 | def test_explicit_boolean_false_value(self): 88 | props = {"key": boolean_t(0)} 89 | res = self._dict_to_nvlist_to_dict(props) 90 | self._assertIntDictsEqual(props, res) 91 | 92 | def test_explicit_boolean_invalid_value(self): 93 | with self.assertRaises(OverflowError): 94 | props = {"key": boolean_t(2)} 95 | self._dict_to_nvlist_to_dict(props) 96 | 97 | def test_explicit_boolean_another_invalid_value(self): 98 | with self.assertRaises(OverflowError): 99 | props = {"key": boolean_t(-1)} 100 | self._dict_to_nvlist_to_dict(props) 101 | 102 | def test_uint64_value(self): 103 | props = {"key": 1} 104 | res = self._dict_to_nvlist_to_dict(props) 105 | self.assertEqual(props, res) 106 | 107 | def test_uint64_max_value(self): 108 | props = {"key": 2 ** 64 - 1} 109 | res = self._dict_to_nvlist_to_dict(props) 110 | self.assertEqual(props, res) 111 | 112 | def test_uint64_too_large_value(self): 113 | props = {"key": 2 ** 64} 114 | with self.assertRaises(OverflowError): 115 | self._dict_to_nvlist_to_dict(props) 116 | 117 | def test_uint64_negative_value(self): 118 | props = {"key": -1} 119 | with self.assertRaises(OverflowError): 120 | self._dict_to_nvlist_to_dict(props) 121 | 122 | def test_explicit_uint64_value(self): 123 | props = {"key": uint64_t(1)} 124 | res = self._dict_to_nvlist_to_dict(props) 125 | self._assertIntDictsEqual(props, res) 126 | 127 | def test_explicit_uint64_max_value(self): 128 | props = {"key": uint64_t(2 ** 64 - 1)} 129 | res = self._dict_to_nvlist_to_dict(props) 130 | self._assertIntDictsEqual(props, res) 131 | 132 | def test_explicit_uint64_too_large_value(self): 133 | with self.assertRaises(OverflowError): 134 | props = {"key": uint64_t(2 ** 64)} 135 | self._dict_to_nvlist_to_dict(props) 136 | 137 | def test_explicit_uint64_negative_value(self): 138 | with self.assertRaises(OverflowError): 139 | props = {"key": uint64_t(-1)} 140 | self._dict_to_nvlist_to_dict(props) 141 | 142 | def test_explicit_uint32_value(self): 143 | props = {"key": uint32_t(1)} 144 | res = self._dict_to_nvlist_to_dict(props) 145 | self._assertIntDictsEqual(props, res) 146 | 147 | def test_explicit_uint32_max_value(self): 148 | props = {"key": uint32_t(2 ** 32 - 1)} 149 | res = self._dict_to_nvlist_to_dict(props) 150 | self._assertIntDictsEqual(props, res) 151 | 152 | def test_explicit_uint32_too_large_value(self): 153 | with self.assertRaises(OverflowError): 154 | props = {"key": uint32_t(2 ** 32)} 155 | self._dict_to_nvlist_to_dict(props) 156 | 157 | def test_explicit_uint32_negative_value(self): 158 | with self.assertRaises(OverflowError): 159 | props = {"key": uint32_t(-1)} 160 | self._dict_to_nvlist_to_dict(props) 161 | 162 | def test_explicit_uint16_value(self): 163 | props = {"key": uint16_t(1)} 164 | res = self._dict_to_nvlist_to_dict(props) 165 | self._assertIntDictsEqual(props, res) 166 | 167 | def test_explicit_uint16_max_value(self): 168 | props = {"key": uint16_t(2 ** 16 - 1)} 169 | res = self._dict_to_nvlist_to_dict(props) 170 | self._assertIntDictsEqual(props, res) 171 | 172 | def test_explicit_uint16_too_large_value(self): 173 | with self.assertRaises(OverflowError): 174 | props = {"key": uint16_t(2 ** 16)} 175 | self._dict_to_nvlist_to_dict(props) 176 | 177 | def test_explicit_uint16_negative_value(self): 178 | with self.assertRaises(OverflowError): 179 | props = {"key": uint16_t(-1)} 180 | self._dict_to_nvlist_to_dict(props) 181 | 182 | def test_explicit_uint8_value(self): 183 | props = {"key": uint8_t(1)} 184 | res = self._dict_to_nvlist_to_dict(props) 185 | self._assertIntDictsEqual(props, res) 186 | 187 | def test_explicit_uint8_max_value(self): 188 | props = {"key": uint8_t(2 ** 8 - 1)} 189 | res = self._dict_to_nvlist_to_dict(props) 190 | self._assertIntDictsEqual(props, res) 191 | 192 | def test_explicit_uint8_too_large_value(self): 193 | with self.assertRaises(OverflowError): 194 | props = {"key": uint8_t(2 ** 8)} 195 | self._dict_to_nvlist_to_dict(props) 196 | 197 | def test_explicit_uint8_negative_value(self): 198 | with self.assertRaises(OverflowError): 199 | props = {"key": uint8_t(-1)} 200 | self._dict_to_nvlist_to_dict(props) 201 | 202 | def test_explicit_byte_value(self): 203 | props = {"key": uchar_t(1)} 204 | res = self._dict_to_nvlist_to_dict(props) 205 | self._assertIntDictsEqual(props, res) 206 | 207 | def test_explicit_byte_max_value(self): 208 | props = {"key": uchar_t(2 ** 8 - 1)} 209 | res = self._dict_to_nvlist_to_dict(props) 210 | self._assertIntDictsEqual(props, res) 211 | 212 | def test_explicit_byte_too_large_value(self): 213 | with self.assertRaises(OverflowError): 214 | props = {"key": uchar_t(2 ** 8)} 215 | self._dict_to_nvlist_to_dict(props) 216 | 217 | def test_explicit_byte_negative_value(self): 218 | with self.assertRaises(OverflowError): 219 | props = {"key": uchar_t(-1)} 220 | self._dict_to_nvlist_to_dict(props) 221 | 222 | def test_explicit_int64_value(self): 223 | props = {"key": int64_t(1)} 224 | res = self._dict_to_nvlist_to_dict(props) 225 | self._assertIntDictsEqual(props, res) 226 | 227 | def test_explicit_int64_max_value(self): 228 | props = {"key": int64_t(2 ** 63 - 1)} 229 | res = self._dict_to_nvlist_to_dict(props) 230 | self._assertIntDictsEqual(props, res) 231 | 232 | def test_explicit_int64_min_value(self): 233 | props = {"key": int64_t(-(2 ** 63))} 234 | res = self._dict_to_nvlist_to_dict(props) 235 | self._assertIntDictsEqual(props, res) 236 | 237 | def test_explicit_int64_too_large_value(self): 238 | with self.assertRaises(OverflowError): 239 | props = {"key": int64_t(2 ** 63)} 240 | self._dict_to_nvlist_to_dict(props) 241 | 242 | def test_explicit_int64_too_small_value(self): 243 | with self.assertRaises(OverflowError): 244 | props = {"key": int64_t(-(2 ** 63) - 1)} 245 | self._dict_to_nvlist_to_dict(props) 246 | 247 | def test_explicit_int32_value(self): 248 | props = {"key": int32_t(1)} 249 | res = self._dict_to_nvlist_to_dict(props) 250 | self._assertIntDictsEqual(props, res) 251 | 252 | def test_explicit_int32_max_value(self): 253 | props = {"key": int32_t(2 ** 31 - 1)} 254 | res = self._dict_to_nvlist_to_dict(props) 255 | self._assertIntDictsEqual(props, res) 256 | 257 | def test_explicit_int32_min_value(self): 258 | props = {"key": int32_t(-(2 ** 31))} 259 | res = self._dict_to_nvlist_to_dict(props) 260 | self._assertIntDictsEqual(props, res) 261 | 262 | def test_explicit_int32_too_large_value(self): 263 | with self.assertRaises(OverflowError): 264 | props = {"key": int32_t(2 ** 31)} 265 | self._dict_to_nvlist_to_dict(props) 266 | 267 | def test_explicit_int32_too_small_value(self): 268 | with self.assertRaises(OverflowError): 269 | props = {"key": int32_t(-(2 ** 31) - 1)} 270 | self._dict_to_nvlist_to_dict(props) 271 | 272 | def test_explicit_int16_value(self): 273 | props = {"key": int16_t(1)} 274 | res = self._dict_to_nvlist_to_dict(props) 275 | self._assertIntDictsEqual(props, res) 276 | 277 | def test_explicit_int16_max_value(self): 278 | props = {"key": int16_t(2 ** 15 - 1)} 279 | res = self._dict_to_nvlist_to_dict(props) 280 | self._assertIntDictsEqual(props, res) 281 | 282 | def test_explicit_int16_min_value(self): 283 | props = {"key": int16_t(-(2 ** 15))} 284 | res = self._dict_to_nvlist_to_dict(props) 285 | self._assertIntDictsEqual(props, res) 286 | 287 | def test_explicit_int16_too_large_value(self): 288 | with self.assertRaises(OverflowError): 289 | props = {"key": int16_t(2 ** 15)} 290 | self._dict_to_nvlist_to_dict(props) 291 | 292 | def test_explicit_int16_too_small_value(self): 293 | with self.assertRaises(OverflowError): 294 | props = {"key": int16_t(-(2 ** 15) - 1)} 295 | self._dict_to_nvlist_to_dict(props) 296 | 297 | def test_explicit_int8_value(self): 298 | props = {"key": int8_t(1)} 299 | res = self._dict_to_nvlist_to_dict(props) 300 | self._assertIntDictsEqual(props, res) 301 | 302 | def test_explicit_int8_max_value(self): 303 | props = {"key": int8_t(2 ** 7 - 1)} 304 | res = self._dict_to_nvlist_to_dict(props) 305 | self._assertIntDictsEqual(props, res) 306 | 307 | def test_explicit_int8_min_value(self): 308 | props = {"key": int8_t(-(2 ** 7))} 309 | res = self._dict_to_nvlist_to_dict(props) 310 | self._assertIntDictsEqual(props, res) 311 | 312 | def test_explicit_int8_too_large_value(self): 313 | with self.assertRaises(OverflowError): 314 | props = {"key": int8_t(2 ** 7)} 315 | self._dict_to_nvlist_to_dict(props) 316 | 317 | def test_explicit_int8_too_small_value(self): 318 | with self.assertRaises(OverflowError): 319 | props = {"key": int8_t(-(2 ** 7) - 1)} 320 | self._dict_to_nvlist_to_dict(props) 321 | 322 | def test_nested_dict(self): 323 | props = {"key": {}} 324 | res = self._dict_to_nvlist_to_dict(props) 325 | self.assertEqual(props, res) 326 | 327 | def test_nested_nested_dict(self): 328 | props = {"key": {"key": {}}} 329 | res = self._dict_to_nvlist_to_dict(props) 330 | self.assertEqual(props, res) 331 | 332 | def test_mismatching_values_array(self): 333 | props = {"key": [1, "string"]} 334 | with self.assertRaises(TypeError): 335 | self._dict_to_nvlist_to_dict(props) 336 | 337 | def test_mismatching_values_array2(self): 338 | props = {"key": [True, 10]} 339 | with self.assertRaises(TypeError): 340 | self._dict_to_nvlist_to_dict(props) 341 | 342 | def test_mismatching_values_array3(self): 343 | props = {"key": [1, False]} 344 | with self.assertRaises(TypeError): 345 | self._dict_to_nvlist_to_dict(props) 346 | 347 | def test_string_array(self): 348 | props = {"key": ["value", "value2"]} 349 | res = self._dict_to_nvlist_to_dict(props) 350 | self.assertEqual(props, res) 351 | 352 | def test_boolean_array(self): 353 | props = {"key": [True, False]} 354 | res = self._dict_to_nvlist_to_dict(props) 355 | self.assertEqual(props, res) 356 | 357 | def test_explicit_boolean_array(self): 358 | props = {"key": [boolean_t(False), boolean_t(True)]} 359 | res = self._dict_to_nvlist_to_dict(props) 360 | self._assertIntArrayDictsEqual(props, res) 361 | 362 | def test_uint64_array(self): 363 | props = {"key": [0, 1, 2 ** 64 - 1]} 364 | res = self._dict_to_nvlist_to_dict(props) 365 | self.assertEqual(props, res) 366 | 367 | def test_uint64_array_too_large_value(self): 368 | props = {"key": [0, 2 ** 64]} 369 | with self.assertRaises(OverflowError): 370 | self._dict_to_nvlist_to_dict(props) 371 | 372 | def test_uint64_array_negative_value(self): 373 | props = {"key": [0, -1]} 374 | with self.assertRaises(OverflowError): 375 | self._dict_to_nvlist_to_dict(props) 376 | 377 | def test_mixed_explict_int_array(self): 378 | with self.assertRaises(TypeError): 379 | props = {"key": [uint64_t(0), uint32_t(0)]} 380 | self._dict_to_nvlist_to_dict(props) 381 | 382 | def test_explict_uint64_array(self): 383 | props = {"key": [uint64_t(0), uint64_t(1), uint64_t(2 ** 64 - 1)]} 384 | res = self._dict_to_nvlist_to_dict(props) 385 | self._assertIntArrayDictsEqual(props, res) 386 | 387 | def test_explict_uint64_array_too_large_value(self): 388 | with self.assertRaises(OverflowError): 389 | props = {"key": [uint64_t(0), uint64_t(2 ** 64)]} 390 | self._dict_to_nvlist_to_dict(props) 391 | 392 | def test_explict_uint64_array_negative_value(self): 393 | with self.assertRaises(OverflowError): 394 | props = {"key": [uint64_t(0), uint64_t(-1)]} 395 | self._dict_to_nvlist_to_dict(props) 396 | 397 | def test_explict_uint32_array(self): 398 | props = {"key": [uint32_t(0), uint32_t(1), uint32_t(2 ** 32 - 1)]} 399 | res = self._dict_to_nvlist_to_dict(props) 400 | self._assertIntArrayDictsEqual(props, res) 401 | 402 | def test_explict_uint32_array_too_large_value(self): 403 | with self.assertRaises(OverflowError): 404 | props = {"key": [uint32_t(0), uint32_t(2 ** 32)]} 405 | self._dict_to_nvlist_to_dict(props) 406 | 407 | def test_explict_uint32_array_negative_value(self): 408 | with self.assertRaises(OverflowError): 409 | props = {"key": [uint32_t(0), uint32_t(-1)]} 410 | self._dict_to_nvlist_to_dict(props) 411 | 412 | def test_explict_uint16_array(self): 413 | props = {"key": [uint16_t(0), uint16_t(1), uint16_t(2 ** 16 - 1)]} 414 | res = self._dict_to_nvlist_to_dict(props) 415 | self._assertIntArrayDictsEqual(props, res) 416 | 417 | def test_explict_uint16_array_too_large_value(self): 418 | with self.assertRaises(OverflowError): 419 | props = {"key": [uint16_t(0), uint16_t(2 ** 16)]} 420 | self._dict_to_nvlist_to_dict(props) 421 | 422 | def test_explict_uint16_array_negative_value(self): 423 | with self.assertRaises(OverflowError): 424 | props = {"key": [uint16_t(0), uint16_t(-1)]} 425 | self._dict_to_nvlist_to_dict(props) 426 | 427 | def test_explict_uint8_array(self): 428 | props = {"key": [uint8_t(0), uint8_t(1), uint8_t(2 ** 8 - 1)]} 429 | res = self._dict_to_nvlist_to_dict(props) 430 | self._assertIntArrayDictsEqual(props, res) 431 | 432 | def test_explict_uint8_array_too_large_value(self): 433 | with self.assertRaises(OverflowError): 434 | props = {"key": [uint8_t(0), uint8_t(2 ** 8)]} 435 | self._dict_to_nvlist_to_dict(props) 436 | 437 | def test_explict_uint8_array_negative_value(self): 438 | with self.assertRaises(OverflowError): 439 | props = {"key": [uint8_t(0), uint8_t(-1)]} 440 | self._dict_to_nvlist_to_dict(props) 441 | 442 | def test_explict_byte_array(self): 443 | props = {"key": [uchar_t(0), uchar_t(1), uchar_t(2 ** 8 - 1)]} 444 | res = self._dict_to_nvlist_to_dict(props) 445 | self._assertIntArrayDictsEqual(props, res) 446 | 447 | def test_explict_byte_array_too_large_value(self): 448 | with self.assertRaises(OverflowError): 449 | props = {"key": [uchar_t(0), uchar_t(2 ** 8)]} 450 | self._dict_to_nvlist_to_dict(props) 451 | 452 | def test_explict_byte_array_negative_value(self): 453 | with self.assertRaises(OverflowError): 454 | props = {"key": [uchar_t(0), uchar_t(-1)]} 455 | self._dict_to_nvlist_to_dict(props) 456 | 457 | def test_explict_int64_array(self): 458 | props = {"key": [int64_t(0), int64_t(1), int64_t(2 ** 63 - 1), int64_t(-(2 ** 63))]} 459 | res = self._dict_to_nvlist_to_dict(props) 460 | self._assertIntArrayDictsEqual(props, res) 461 | 462 | def test_explict_int64_array_too_large_value(self): 463 | with self.assertRaises(OverflowError): 464 | props = {"key": [int64_t(0), int64_t(2 ** 63)]} 465 | self._dict_to_nvlist_to_dict(props) 466 | 467 | def test_explict_int64_array_too_small_value(self): 468 | with self.assertRaises(OverflowError): 469 | props = {"key": [int64_t(0), int64_t(-(2 ** 63) - 1)]} 470 | self._dict_to_nvlist_to_dict(props) 471 | 472 | def test_explict_int32_array(self): 473 | props = {"key": [int32_t(0), int32_t(1), int32_t(2 ** 31 - 1), int32_t(-(2 ** 31))]} 474 | res = self._dict_to_nvlist_to_dict(props) 475 | self._assertIntArrayDictsEqual(props, res) 476 | 477 | def test_explict_int32_array_too_large_value(self): 478 | with self.assertRaises(OverflowError): 479 | props = {"key": [int32_t(0), int32_t(2 ** 31)]} 480 | self._dict_to_nvlist_to_dict(props) 481 | 482 | def test_explict_int32_array_too_small_value(self): 483 | with self.assertRaises(OverflowError): 484 | props = {"key": [int32_t(0), int32_t(-(2 ** 31) - 1)]} 485 | self._dict_to_nvlist_to_dict(props) 486 | 487 | def test_explict_int16_array(self): 488 | props = {"key": [int16_t(0), int16_t(1), int16_t(2 ** 15 - 1), int16_t(-(2 ** 15))]} 489 | res = self._dict_to_nvlist_to_dict(props) 490 | self._assertIntArrayDictsEqual(props, res) 491 | 492 | def test_explict_int16_array_too_large_value(self): 493 | with self.assertRaises(OverflowError): 494 | props = {"key": [int16_t(0), int16_t(2 ** 15)]} 495 | self._dict_to_nvlist_to_dict(props) 496 | 497 | def test_explict_int16_array_too_small_value(self): 498 | with self.assertRaises(OverflowError): 499 | props = {"key": [int16_t(0), int16_t(-(2 ** 15) - 1)]} 500 | self._dict_to_nvlist_to_dict(props) 501 | 502 | def test_explict_int8_array(self): 503 | props = {"key": [int8_t(0), int8_t(1), int8_t(2 ** 7 - 1), int8_t(-(2 ** 7))]} 504 | res = self._dict_to_nvlist_to_dict(props) 505 | self._assertIntArrayDictsEqual(props, res) 506 | 507 | def test_explict_int8_array_too_large_value(self): 508 | with self.assertRaises(OverflowError): 509 | props = {"key": [int8_t(0), int8_t(2 ** 7)]} 510 | self._dict_to_nvlist_to_dict(props) 511 | 512 | def test_explict_int8_array_too_small_value(self): 513 | with self.assertRaises(OverflowError): 514 | props = {"key": [int8_t(0), int8_t(-(2 ** 7) - 1)]} 515 | self._dict_to_nvlist_to_dict(props) 516 | 517 | def test_dict_array(self): 518 | props = {"key": [{"key": 1}, {"key": None}, {"key": {}}]} 519 | res = self._dict_to_nvlist_to_dict(props) 520 | self.assertEqual(props, res) 521 | 522 | def test_implicit_uint32_value(self): 523 | props = {"rewind-request": 1} 524 | res = self._dict_to_nvlist_to_dict(props) 525 | self._assertIntDictsEqual(props, res) 526 | 527 | def test_implicit_uint32_max_value(self): 528 | props = {"rewind-request": 2 ** 32 - 1} 529 | res = self._dict_to_nvlist_to_dict(props) 530 | self._assertIntDictsEqual(props, res) 531 | 532 | def test_implicit_uint32_too_large_value(self): 533 | with self.assertRaises(OverflowError): 534 | props = {"rewind-request": 2 ** 32} 535 | self._dict_to_nvlist_to_dict(props) 536 | 537 | def test_implicit_uint32_negative_value(self): 538 | with self.assertRaises(OverflowError): 539 | props = {"rewind-request": -1} 540 | self._dict_to_nvlist_to_dict(props) 541 | 542 | def test_implicit_int32_value(self): 543 | props = {"pool_context": 1} 544 | res = self._dict_to_nvlist_to_dict(props) 545 | self._assertIntDictsEqual(props, res) 546 | 547 | def test_implicit_int32_max_value(self): 548 | props = {"pool_context": 2 ** 31 - 1} 549 | res = self._dict_to_nvlist_to_dict(props) 550 | self._assertIntDictsEqual(props, res) 551 | 552 | def test_implicit_int32_min_value(self): 553 | props = {"pool_context": -(2 ** 31)} 554 | res = self._dict_to_nvlist_to_dict(props) 555 | self._assertIntDictsEqual(props, res) 556 | 557 | def test_implicit_int32_too_large_value(self): 558 | with self.assertRaises(OverflowError): 559 | props = {"pool_context": 2 ** 31} 560 | self._dict_to_nvlist_to_dict(props) 561 | 562 | def test_implicit_int32_too_small_value(self): 563 | with self.assertRaises(OverflowError): 564 | props = {"pool_context": -(2 ** 31) - 1} 565 | self._dict_to_nvlist_to_dict(props) 566 | 567 | def test_complex_dict(self): 568 | props = { 569 | "key1": "str", 570 | "key2": 10, 571 | "key3": { 572 | "skey1": True, 573 | "skey2": None, 574 | "skey3": [ 575 | True, 576 | False, 577 | True 578 | ] 579 | }, 580 | "key4": [ 581 | "ab", 582 | "bc" 583 | ], 584 | "key5": [ 585 | 2 ** 64 - 1, 586 | 1, 587 | 2, 588 | 3 589 | ], 590 | "key6": [ 591 | { 592 | "skey71": "a", 593 | "skey72": "b", 594 | }, 595 | { 596 | "skey71": "c", 597 | "skey72": "d", 598 | }, 599 | { 600 | "skey71": "e", 601 | "skey72": "f", 602 | } 603 | 604 | ], 605 | "type": 2 ** 32 - 1, 606 | "pool_context": -(2 ** 31) 607 | } 608 | res = self._dict_to_nvlist_to_dict(props) 609 | self.assertEqual(props, res) 610 | 611 | 612 | # vim: softtabstop=4 tabstop=4 expandtab shiftwidth=4 613 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015 ClusterHQ. See LICENSE file for details. 2 | 3 | from setuptools import setup, find_packages 4 | 5 | setup( 6 | name="pyzfs", 7 | version="0.2.3", 8 | description="Wrapper for libzfs_core", 9 | author="ClusterHQ", 10 | author_email="support@clusterhq.com", 11 | url="http://pyzfs.readthedocs.org", 12 | license="Apache License, Version 2.0", 13 | classifiers=[ 14 | "Development Status :: 4 - Beta", 15 | "Intended Audience :: Developers", 16 | "License :: OSI Approved :: Apache Software License", 17 | "Programming Language :: Python :: 2 :: Only", 18 | "Programming Language :: Python :: 2.7", 19 | "Topic :: System :: Filesystems", 20 | "Topic :: Software Development :: Libraries", 21 | ], 22 | keywords=[ 23 | "ZFS", 24 | "OpenZFS", 25 | "libzfs_core", 26 | ], 27 | 28 | packages=find_packages(), 29 | include_package_data=True, 30 | install_requires=[ 31 | "cffi", 32 | ], 33 | setup_requires=[ 34 | "cffi", 35 | ], 36 | zip_safe=False, 37 | test_suite="libzfs_core.test", 38 | ) 39 | 40 | # vim: softtabstop=4 tabstop=4 expandtab shiftwidth=4 41 | --------------------------------------------------------------------------------