├── .cirrus.yml ├── .envrc ├── .gitattributes ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md └── dependabot.yml ├── .gitignore ├── .restyled.yaml ├── .spelling ├── CHANGELOG.md ├── CODE_OF_CONDUCT.md ├── Cargo.lock ├── Cargo.toml ├── LICENSE ├── README.md ├── Vagrantfile ├── azure-pipelines.yml ├── cliff.toml ├── codecov.yml ├── contributing.md ├── flake.lock ├── flake.nix ├── justfile ├── libzetta.png ├── rust-toolchain.toml ├── src ├── fuzzy.rs ├── lib.rs ├── log.rs ├── parsers │ ├── fixtures │ │ └── SIGABRT.PID.84191.TIME.2019-08-21.20.04.09.fuzz │ ├── mod.rs │ ├── stdout.pest │ ├── zfs.pest │ └── zfs.rs ├── utils.rs ├── zfs │ ├── delegating.rs │ ├── description.rs │ ├── errors.rs │ ├── fixtures │ │ ├── bookmark_properties_freebsd.sorted │ │ ├── filesystem_properties_freebsd │ │ ├── filesystem_properties_freebsd.sorted │ │ ├── snapshot_properties_freebsd.sorted │ │ └── volume_properties_freebsd.sorted │ ├── lzc.rs │ ├── mod.rs │ ├── open3.rs │ ├── pathext.rs │ └── properties.rs └── zpool │ ├── description.rs │ ├── fixtures │ ├── import_with_empty_comment │ └── status_with_block_device_nested │ ├── mod.rs │ ├── open3.rs │ ├── properties.rs │ ├── topology.rs │ └── vdev.rs └── tests ├── test_misc.rs ├── test_misc2.rs ├── test_zfs.rs └── test_zpool.rs /.cirrus.yml: -------------------------------------------------------------------------------- 1 | setup: &FREEBSD_SETUP 2 | env: 3 | HOME: /tmp # cargo needs it 4 | RUST_BACKTRACE: full # Better info for debugging test failures. 5 | setup_script: 6 | - pkg install -y llvm 7 | - fetch https://sh.rustup.rs -o rustup.sh 8 | - sh rustup.sh -y --profile=minimal --default-toolchain ${VERSION} 9 | 10 | build: &BUILD_TEST 11 | cargo_cache: 12 | folder: $HOME/.cargo/registry 13 | fingerprint_script: cat Cargo.lock || echo "" 14 | build_script: 15 | - env PATH="$HOME/.cargo/bin:$PATH" cargo build --all-features 16 | test_script: 17 | - mdconfig -a -s 96m -u1 18 | - env PATH="$HOME/.cargo/bin:$PATH" cargo test --all-features 19 | 20 | task: 21 | name: FreeBSD 14 22 | env: 23 | VERSION: stable 24 | freebsd_instance: 25 | image_family: freebsd-14-0-snap 26 | << : *FREEBSD_SETUP 27 | << : *BUILD_TEST 28 | before_cache_script: rm -rf $HOME/.cargo/registry/index 29 | 30 | task: 31 | name: FreeBSD 13 32 | env: 33 | VERSION: stable 34 | freebsd_instance: 35 | image: freebsd-13-3-release-amd64 36 | << : *FREEBSD_SETUP 37 | << : *BUILD_TEST 38 | before_cache_script: rm -rf $HOME/.cargo/registry/index 39 | -------------------------------------------------------------------------------- /.envrc: -------------------------------------------------------------------------------- 1 | use flake 2 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | * text=auto 2 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: 'Type: Bug' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior. 15 | 16 | **Expected behavior** 17 | A clear and concise description of what you expected to happen. 18 | 19 | 20 | ** Please complete the following information:** 21 | - OS: [e.g. FreeBSD 12] 22 | - ZoL version if on Linux 23 | - libZetta version 24 | 25 | **Additional context** 26 | Add any other context about the problem here. 27 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: cargo 4 | directory: "/" 5 | schedule: 6 | interval: daily 7 | time: "10:00" 8 | open-pull-requests-limit: 10 9 | reviewers: 10 | - andoriyu 11 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target/ 2 | **/*.rs.bk 3 | .*.swp 4 | target* 5 | .idea 6 | *.iml 7 | *.fmt 8 | test-results/ 9 | .direnv 10 | .vagrant 11 | libfuckery 12 | build.rs 13 | .pre-commit-config.yaml 14 | -------------------------------------------------------------------------------- /.restyled.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | image: restyled/restyler-rustfmt:v1.4.11-nightly 3 | command: 4 | - rustfmt 5 | arguments: [] 6 | include: 7 | - "**/*.rs" 8 | interpreters: [] 9 | -------------------------------------------------------------------------------- /.spelling: -------------------------------------------------------------------------------- 1 | # markdown-spellcheck spelling configuration file 2 | # Format - lines beginning # are comments 3 | # global dictionary is at the start, file overrides afterwards 4 | # one word per line, to define a file override use ' - filename' 5 | # where filename is relative to this configuration file 6 | libzfs-rs 7 | zfs 8 | crate.io 9 | libpandemonium 10 | zol 11 | zpool 12 | vdev 13 | api 14 | BSD-2-Clause 15 | - README.md 16 | 0.2.0. 17 | 1.0.1 18 | 9.2. 19 | 64mb 20 | open3 21 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | ## [0.5.0] - 2023-05-13 6 | 7 | ### Features 8 | 9 | - **zpool:** [**breaking**] Change ZpoolEngine::{status,status_all,all} functions ([#196](https://github.com/ZeroAssumptions/aide-de-camp/issues/196)) 10 | 11 | ## [0.4.3] - 2023-05-12 12 | 13 | ### Bug Fixes 14 | 15 | - Accidently used Deref from reexport instead of std. ([#186](https://github.com/ZeroAssumptions/aide-de-camp/issues/186)) 16 | - Include colon in valid path param ([#189](https://github.com/ZeroAssumptions/aide-de-camp/issues/189)) 17 | - **zpool:** Disk.path() does not return a full relative path 18 | 19 | ### Features 20 | 21 | - **zpool:** Add `status_all(...)` fn to query zpool status with some parameters ([#190](https://github.com/ZeroAssumptions/aide-de-camp/issues/190)) 22 | 23 | ## [0.4.2] - 2023-04-01 24 | 25 | ### Bug Fixes 26 | 27 | - Fix solaris build by using forked zfs-core-sys ([#185](https://github.com/ZeroAssumptions/aide-de-camp/issues/185)) 28 | 29 | ## [0.4.0] - 2023-04-01 30 | 31 | ### Bug Fixes 32 | 33 | - **zfs:** Allow snapshotting of entire pool ([#173](https://github.com/ZeroAssumptions/aide-de-camp/issues/173)) 34 | - **zpool:** Handling of in-use spares 35 | 36 | ### Documentation 37 | 38 | - **README:** Bring it more closer to reality. ([#171](https://github.com/ZeroAssumptions/aide-de-camp/issues/171)) 39 | 40 | ## [0.3.1] - 2022-04-17 41 | 42 | ### Bug Fixes 43 | 44 | - **zfs:** Fix destroy_bookmarks method ([#170](https://github.com/ZeroAssumptions/aide-de-camp/issues/170)) 45 | 46 | ### Ci 47 | 48 | - Disable bookmarking test for now 49 | 50 | ## [0.3.0] - 2022-04-16 51 | 52 | ### Bug Fixes 53 | 54 | - **zfs:** Unflip bool_to_u64 results 55 | - **zfs:** ZLE (Zero Length Encoding) compression is not "LZE" 56 | - **zfs:** Filesystem_limit/snapshot_limit/filesystem_count/snapshot_count can now be 'none' ([#163](https://github.com/ZeroAssumptions/aide-de-camp/issues/163)) 57 | - (zpool): "see" line in status is not parsed correctly ([#168](https://github.com/ZeroAssumptions/aide-de-camp/issues/168)) 58 | 59 | ### Features 60 | 61 | - **zfs:** In CreateDatasetRequest, inherit most things if not explicitly set ([#155](https://github.com/ZeroAssumptions/aide-de-camp/issues/155)) 62 | - **zfs:** Add support for running channel programs 63 | - **crate:** Reexport libnv, do not use unused strum 64 | 65 | ### Ci 66 | 67 | - **azure:** Bump builders to 20.04 ([#158](https://github.com/ZeroAssumptions/aide-de-camp/issues/158)) 68 | 69 | ## [0.2.3] - 2021-10-25 70 | 71 | ### Bug Fixes 72 | 73 | - **zfs:** Fixed 'Failed to parse value: VariantNotFound' on Linux, zfs-2.0.3 ([#146](https://github.com/ZeroAssumptions/aide-de-camp/issues/146)) 74 | 75 | ### Documentation 76 | 77 | - **changelog:** Update for v0.2.2 78 | 79 | ### Features 80 | 81 | - **zfs:** Convert nvlist errors to HashMaps to make the Error type Send+Sync ([#152](https://github.com/ZeroAssumptions/aide-de-camp/issues/152)) ([#153](https://github.com/ZeroAssumptions/aide-de-camp/issues/153)) 82 | 83 | ## [0.2.2] - 2020-04-26 84 | 85 | ### Bug Fixes 86 | 87 | - **zfs:** Fix incremental send in LZC ([#128](https://github.com/ZeroAssumptions/aide-de-camp/issues/128)) 88 | 89 | ### Documentation 90 | 91 | - **readme:** Update to reflect current state of things 92 | - **readme:** Fix footnote 93 | 94 | ### Chose 95 | 96 | - **changelog:** Update for 0.2.1 97 | 98 | ## [0.2.1] - 2020-03-22 99 | 100 | ### Features 101 | 102 | - **zfs:** Fix dataset name parser for ZFS ([#127](https://github.com/ZeroAssumptions/aide-de-camp/issues/127)) 103 | 104 | ## [0.2.0] - 2020-03-01 105 | 106 | ### Bug Fixes 107 | 108 | - **zpool:** Integer overflow in zpool parser 109 | 110 | ### Features 111 | 112 | - Inception of ZFS module ([#83](https://github.com/ZeroAssumptions/aide-de-camp/issues/83)) 113 | - **zfs:** Check existence of dataset. 114 | - Fuzzy testing target ([#90](https://github.com/ZeroAssumptions/aide-de-camp/issues/90)) 115 | - **zfs:** Basic zfs create and destroy operations ([#91](https://github.com/ZeroAssumptions/aide-de-camp/issues/91)) 116 | - Remove unicode feature from regex crate ([#93](https://github.com/ZeroAssumptions/aide-de-camp/issues/93)) 117 | - **zfs:** Listing filesystems and volumes ([#94](https://github.com/ZeroAssumptions/aide-de-camp/issues/94)) 118 | - **zfs:** Add PathExt trait to make it easier to work with dataset names ([#100](https://github.com/ZeroAssumptions/aide-de-camp/issues/100)) 119 | - **zfs:** Pass errors from lzc snapshot call to the consumer. ([#102](https://github.com/ZeroAssumptions/aide-de-camp/issues/102)) 120 | - **zfs:** Ability to read filesystem dataset properties ([#111](https://github.com/ZeroAssumptions/aide-de-camp/issues/111)) 121 | - **zfs:** Read properties of a snapshot ([#112](https://github.com/ZeroAssumptions/aide-de-camp/issues/112)) 122 | - **zfs:** Read properties of a volume ([#113](https://github.com/ZeroAssumptions/aide-de-camp/issues/113)) 123 | - **zfs:** Read properties of a bookmark ([#114](https://github.com/ZeroAssumptions/aide-de-camp/issues/114)) 124 | - **zfs:** Ability to work with bookmarks 125 | - **zfs:** Ability to send snapshot ([#119](https://github.com/ZeroAssumptions/aide-de-camp/issues/119)) 126 | - **zfs:** Remove known unknowns from properties ([#121](https://github.com/ZeroAssumptions/aide-de-camp/issues/121)) 127 | - Add a single point of logging configuration ([#123](https://github.com/ZeroAssumptions/aide-de-camp/issues/123)) 128 | 129 | ### Styling 130 | 131 | - Run cargo fmt ([#117](https://github.com/ZeroAssumptions/aide-de-camp/issues/117)) 132 | 133 | ### Ci 134 | 135 | - Add Cirrus-CI ([#76](https://github.com/ZeroAssumptions/aide-de-camp/issues/76)) 136 | - Create an memory device for tests on Cirrus ([#77](https://github.com/ZeroAssumptions/aide-de-camp/issues/77)) 137 | - **cirrus:** Update FreeBSD builder images to latest production release ([#122](https://github.com/ZeroAssumptions/aide-de-camp/issues/122)) 138 | 139 | ## [0.1.1] - 2019-08-12 140 | 141 | ### Features 142 | 143 | - **zpool:** Add Zpool::add ([#53](https://github.com/ZeroAssumptions/aide-de-camp/issues/53)) 144 | - **zpool:** Remove device from zpool ([#60](https://github.com/ZeroAssumptions/aide-de-camp/issues/60)) 145 | - **zpool:** Fix parser for logs and caches. Add add_zil and add_cache ([#63](https://github.com/ZeroAssumptions/aide-de-camp/issues/63)) 146 | - **zpool:** Add replace_disk. Closes #25 ([#67](https://github.com/ZeroAssumptions/aide-de-camp/issues/67)) 147 | - **zpool:** Add regex for another type of vdev reuse. Closes #49 ([#69](https://github.com/ZeroAssumptions/aide-de-camp/issues/69)) 148 | 149 | ### Refactor 150 | 151 | - Make Vdev and Zpool structure more understandable ([#39](https://github.com/ZeroAssumptions/aide-de-camp/issues/39)) 152 | - Switch to Pairs#as_span ([#56](https://github.com/ZeroAssumptions/aide-de-camp/issues/56)) 153 | 154 | ### Styling 155 | 156 | - New fmt config ([#54](https://github.com/ZeroAssumptions/aide-de-camp/issues/54)) 157 | 158 | ### Ci 159 | 160 | - Report coverage to Azure Pipelines ([#55](https://github.com/ZeroAssumptions/aide-de-camp/issues/55)) 161 | - Try to speedup the build ([#68](https://github.com/ZeroAssumptions/aide-de-camp/issues/68)) 162 | 163 | 164 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to making participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, gender identity and expression, level of experience, 9 | nationality, personal appearance, race, religion, or sexual identity and 10 | orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies both within project spaces and in public spaces 49 | when an individual is representing the project or its community. Examples of 50 | representing a project or community include using an official project e-mail 51 | address, posting via an official social media account, or acting as an appointed 52 | representative at an online or offline event. Representation of a project may be 53 | further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting the project team at andoriyu@gmail.com. All 59 | complaints will be reviewed and investigated and will result in a response that 60 | is deemed necessary and appropriate to the circumstances. The project team is 61 | obligated to maintain confidentiality with regard to the reporter of an incident. 62 | Further details of specific enforcement policies may be posted separately. 63 | 64 | Project maintainers who do not follow or enforce the Code of Conduct in good 65 | faith may face temporary or permanent repercussions as determined by other 66 | members of the project's leadership. 67 | 68 | ## Attribution 69 | 70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 71 | available at [http://contributor-covenant.org/version/1/4][version] 72 | 73 | [homepage]: http://contributor-covenant.org 74 | [version]: http://contributor-covenant.org/version/1/4/ -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | authors = ["Andrey Snow "] 3 | version = "0.5.0" 4 | description = "libzetta is a stable interface for programmatic administration of ZFS" 5 | repository = "https://github.com/Inner-Heaven/libzetta-rs" 6 | keywords = ["zfs", "freebsd", "zol", "os", "open3"] 7 | categories = ["os", "filesystem", "external-ffi-bindings"] 8 | license = "BSD-2-Clause" 9 | name = "libzetta" 10 | edition = "2018" 11 | readme = "README.md" 12 | 13 | [badges] 14 | maintenance = { status = "actively-developed" } 15 | is-it-maintained-open-issues = { repository = "Inner-Heaven/libzetta-rs" } 16 | is-it-maintained-issue-resolution = { repository = "Inner-Heaven/libzetta-rs" } 17 | azure-devops = { project = "andoriyu/libpandemonium", pipeline = "libzetta-rs", build="4" } 18 | 19 | [dependencies] 20 | getset = "0.1.0" 21 | derive_builder = "0.12" 22 | lazy_static = "1.2" 23 | libc = "0.2" 24 | pest = "2.7" 25 | pest_derive = "2.7" 26 | quick-error = "1.2" 27 | slog = "2" 28 | slog-stdlog = "4" 29 | zfs-core-sys = { version = "0.5.2", package = "libzetta-zfs-core-sys"} 30 | cstr-argument = "0.1.1" 31 | strum = "0.25.0" 32 | strum_macros = "0.25.2" 33 | chrono = "0.4.30" 34 | bitflags = "1.2.1" 35 | once_cell = "1.18.0" 36 | 37 | [dependencies.libnv] 38 | version = "0.4.3" 39 | default-features = false 40 | features = ["nvpair"] 41 | 42 | [dependencies.regex] 43 | version = "1.9.4" 44 | default-features = false 45 | features = ["std", "perf", "unicode-perl"] 46 | 47 | [dev-dependencies] 48 | cavity = "1.1" 49 | rand = "0.8" 50 | slog-term = "2" 51 | tempdir = "0.3" 52 | tempfile = "3" 53 | 54 | [build-dependencies] 55 | cmake = "0.1" 56 | 57 | [package.metadata.release] 58 | dev-version-ext = "pre" 59 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 2-Clause License 2 | 3 | Copyright (c) 2017, Andrey Cherkashin 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | * Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | * Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 17 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 20 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

2 | 3 |

4 | 5 | [![Build Status](https://dev.azure.com/andoriyu/libpandemonium/_apis/build/status/libzetta-rs?branchName=master)](https://dev.azure.com/andoriyu/libpandemonium/_build/latest?definitionId=4&branchName=master) 6 | [![codecov](https://codecov.io/gh/Inner-Heaven/libzetta-rs/branch/master/graph/badge.svg)](https://codecov.io/gh/Inner-Heaven/libzetta-rs) 7 | [![Crates.io](https://img.shields.io/crates/v/libzetta.svg)](https://crates.io/crates/libzetta) 8 | [![Cirrus CI - Base Branch Build Status](https://img.shields.io/cirrus/github/Inner-Heaven/libzetta-rs?label=cirrus-ci)](https://cirrus-ci.com/github/Inner-Heaven/libzetta-rs) 9 | [![docs.rs](https://docs.rs/libzetta/badge.svg)](https://docs.rs/libzetta) 10 | [![license](https://img.shields.io/github/license/Inner-Heaven/libzetta-rs)](https://github.com/Inner-Heaven/libzetta-rs/blob/master/LICENSE) 11 | 12 | > libzetta-rs is a stable interface for programmatic administration of ZFS 13 | 14 | ## Installation 15 | 16 | Not yet. It won't break your pool or kill your brother, but API might change. Wait until 1.0.0. I have a pretty decent roadmap to 1.0.0. 17 | 18 | ## Usage 19 | 20 | Public API for `zpool` stable. Public API for `zfs` might change after I actually get to use it in other projects. Consult the [documention](https://docs.rs/libzetta/latest/libzetta/) on usage. 21 | 22 | ### FreeBSD 23 | 24 | This library focused on FreeBSD support. This should work on any FreeBSD version since 9.2. No intention on supporting legacy versions. Supported versions: 25 | - 12.1 26 | - 13.0 (No CI setup for it) 27 | 28 | *NOTE*: FreeBSD 13.0 borked `libzfs_core` dependencies. Until it's fixed solution is to use `LD_PRELOAD` to load `libzfs_core` from ports. 29 | *NOTE*: Since FreeBSD switched to OpenZFS, support for "legacy" will be dropped at first breakage. 30 | 31 | ### Linux 32 | 33 | Verified on what is avaiable for Ubuntu 20.04 at the time of writting it's `0.8.3`. 34 | 35 | ## How it works 36 | 37 | ZFS doesn't have stable API at all.`libzfs_core`(`lzc`) fills some gaps, but not entirely. While `lzc` provides stable APi to some features of zfs, there is no such thing for zpool. This library resorts to `zfs(8)` and `zpool(8)` where `lzc` falls shorts. 38 | 39 | ## Running tests 40 | 41 | `Vagrantfile` has 3 VMS: ubuntu-20.04, FreeBSD 12 and FreeBSD 13 to use them: 42 | 43 | - Spin up either one of those 44 | - Install [`just`](https://github.com/casey/just) 45 | - Run `just test-ubuntu` or `just test-freebsd12` to run tests in the VM 46 | - To run a specific test run `just test-ubuntu "-- easy_snapshot_and_bookmark"` 47 | 48 | *NOTE*: Integration tests must be run as a root. Zpools and datasets will be created/modified/destroyed. If it wipes your system datasets that's on you for running it outside of VM. 49 | 50 | ## Nix 51 | 52 | Project is [nix-flake](https://nixos.wiki/wiki/Flakes) enabled, but it flake itself isn't enough: you need to provide `libzfs_core` and its dependencies yourself. This is on-purpose. 53 | 54 | ## Current feature status 55 | 56 | ### zpool 57 | 58 | | | Create | Destroy | Get Properties | Set Properties | Scrub | Export | Import | List Available | Read Status | Add vdev | Replace Disk | 59 | |-------|--------|---------|----------------|----------------|-------|--------|--------|----------------|-------------|----------|--------------| 60 | | open3 | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | ✔ | ✔¹ | ✔ | ✔ | 61 | 62 | 1. Reads the status, but api isn't stable and does poor job at reporting scrubbing status. 63 | 64 | 65 | ### zfs 66 | 67 | #### Filesystem and ZVOL 68 | 69 | | | Create | Destroy | List | Get Properties | Update Properties | 70 | | ------- | --------- | ----------- | -------- | ----------------- | --------------------- | 71 | | open3 | ❌ | ❌ | ✔ | ✔ | ❌ | 72 | | lzc | ✔¹ | ✔ | ❌ | ❌ | ❌ | 73 | 74 | 1. Might not have all properties available. 75 | 76 | #### Snapshot and bookmark 77 | 78 | | | Create | Destroy | List | Get Properties | Send | Recv | 79 | |------- |--------- |----------- |-------- |----------------- |-------- |------ | 80 | | open3 | ❌ | ❌ | ✔ | ✔ | ❌ | ❌ | 81 | | lzc | ✔¹ | ✔ | ❌ | ❌ | ✔ | ❌ | 82 | 83 | 1. Might not have all properties available. 84 | 85 | ## Alternatives 86 | 87 | ### https://github.com/whamcloud/rust-libzfs 88 | 89 | Unlike them LibZetta doesn't link against private libraries of ZFS. `libzetta` also has more documention. 90 | 91 | ### https://github.com/jmesmon/rust-libzfs 92 | 93 | LibZetta has zpool APIs. LibZetta shares `-sys` crates with this library. LibZetta also will delegate certain features of `zfs(8)` to open3 implementation. 94 | 95 | ## LICENSE 96 | 97 | [BSD-2-Clause](LICENSE). 98 | -------------------------------------------------------------------------------- /Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | Vagrant.configure("2") do |config| 5 | config.vm.define "zetta-ubuntu" do |c| 6 | c.vm.box = "generic/ubuntu2004" 7 | c.vm.hostname = "zetta-ubuntu" 8 | c.vm.provision "shell", inline: <<-SHELL 9 | apt-get update 10 | apt-get install -y libblkid-dev libattr1-dev libcurl4-openssl-dev libelf-dev libdw-dev cmake gcc binutils-dev libiberty-dev zlib1g-dev libssl-dev curl rsync 11 | apt-get update 12 | apt-get install -y zfsutils-linux libnvpair1linux libzfslinux-dev pkg-config 13 | SHELL 14 | end 15 | 16 | config.vm.define "zetta-freebsd13" do |c| 17 | c.vm.box = "generic/freebsd13" 18 | c.vm.hostname = "zetta-freebsd13" 19 | c.vm.provision "shell", inline: <<-SHELL 20 | env ASSUME_ALWAYS_YES=YES pkg install curl pkgconf rsync openzfs 21 | SHELL 22 | end 23 | 24 | config.vm.define "zetta-freebsd14" do |c| 25 | c.vm.box = "generic/freebsd14" 26 | c.vm.hostname = "zetta-freebsd14" 27 | c.vm.provision "shell", inline: <<-SHELL 28 | env ASSUME_ALWAYS_YES=YES pkg install curl pkgconf rsync 29 | SHELL 30 | end 31 | 32 | config.vm.box_check_update = false 33 | 34 | config.vm.provision "shell", privileged: false, inline: <<-SHELL 35 | curl https://sh.rustup.rs -sSf | sh -s -- -y 36 | . "$HOME/.cargo/env" 37 | 38 | rustup install stable 39 | SHELL 40 | end 41 | -------------------------------------------------------------------------------- /azure-pipelines.yml: -------------------------------------------------------------------------------- 1 | trigger: 2 | - master 3 | pr: 4 | - master 5 | jobs: 6 | - job: run_tests 7 | displayName: Run Unit Tests 8 | pool: 9 | vmImage: ubuntu-20.04 10 | steps: 11 | - script: | 12 | sudo apt-get update 13 | sudo apt-get install -y libblkid-dev libattr1-dev libcurl4-openssl-dev libelf-dev libdw-dev cmake gcc binutils-dev libiberty-dev zlib1g-dev libssl-dev 14 | displayName: Install dependencies 15 | - script: | 16 | sudo sh -c "curl https://sh.rustup.rs -sSf | sh -s -- -y" 17 | sudo env PATH=${PATH} rustup install stable 18 | - script: | 19 | sudo apt-get update 20 | sudo apt-get install zfsutils-linux libnvpair1linux libzfslinux-dev 21 | displayName: Install ZFS on Linux 22 | - script: | 23 | dd if=/dev/zero of=loop99 bs=1M count=96 24 | sudo losetup /dev/loop99 `pwd`/loop99 25 | displayName: Setup loop device 26 | - script: | 27 | curl -LSfs https://japaric.github.io/trust/install.sh | sh -s -- --git andoriyu/cargo-suity --tag v0.3.0 28 | 29 | sudo ln -s /home/vsts/.cargo/bin/cargo-suity /usr/local/bin/ 30 | displayName: Install suity 31 | - script: | 32 | sudo env PATH=${PATH} RUSTFLAGS="-C link-dead-code" cargo +stable test --no-run 33 | mkdir test-results 34 | sudo env PATH=${PATH} RUSTFLAGS="-C link-dead-code" cargo +stable suity 35 | displayName: Run Tests 36 | - script: | 37 | sudo sh -c "zpool list -H -oname | grep test | xargs zpool destroy" 38 | RUSTFLAGS="--cfg procmacro2_semver_exempt" cargo +stable install cargo-tarpaulin -f 39 | sudo env PATH=${PATH} RUSTFLAGS="-C link-dead-code" cargo +stable tarpaulin --out Xml 40 | bash <(curl -s https://codecov.io/bash) 41 | displayName: Run Coverage 42 | condition: and(succeeded(),eq(variables['rustup_toolchain'], 'nightly')) 43 | - task: PublishCodeCoverageResults@1 44 | inputs: 45 | codeCoverageTool: 'Cobertura' 46 | summaryFileLocation: $(System.DefaultWorkingDirectory)/**/cobertura.xml 47 | condition: and(succeeded(),eq(variables['rustup_toolchain'], 'nightly')) 48 | - task: PublishTestResults@2 49 | inputs: 50 | testRunner: JUnit 51 | testResultsFiles: 'test-results/default.xml' 52 | failTaskOnFailedTests: true 53 | -------------------------------------------------------------------------------- /cliff.toml: -------------------------------------------------------------------------------- 1 | # configuration file for git-cliff (0.1.0) 2 | 3 | [changelog] 4 | # changelog header 5 | header = """ 6 | # Changelog\n 7 | All notable changes to this project will be documented in this file.\n 8 | """ 9 | # template for the changelog body 10 | # https://tera.netlify.app/docs/#introduction 11 | body = """ 12 | {% if version %}\ 13 | ## [{{ version | trim_start_matches(pat="v") }}] - {{ timestamp | date(format="%Y-%m-%d") }} 14 | {% else %}\ 15 | ## [unreleased] 16 | {% endif %}\ 17 | {% for group, commits in commits | group_by(attribute="group") %} 18 | ### {{ group | upper_first }} 19 | {% for commit in commits %} 20 | - {% if commit.scope %}**{{commit.scope}}:** {% endif %}{% if commit.breaking %}[**breaking**] {% endif %}{{ commit.message | upper_first }}\ 21 | {% endfor %} 22 | {% endfor %}\n 23 | """ 24 | # remove the leading and trailing whitespace from the template 25 | trim = true 26 | # changelog footer 27 | footer = """ 28 | 29 | """ 30 | 31 | [git] 32 | # parse the commits based on https://www.conventionalcommits.org 33 | conventional_commits = true 34 | # filter out the commits that are not conventional 35 | filter_unconventional = true 36 | # process each line of a commit as an individual commit 37 | split_commits = false 38 | # regex for preprocessing the commit messages 39 | commit_preprocessors = [ 40 | { pattern = '\((\w+\s)?#([0-9]+)\)', replace = "([#${2}](https://github.com/ZeroAssumptions/aide-de-camp/issues/${2}))"}, 41 | ] 42 | # regex for parsing and grouping commits 43 | commit_parsers = [ 44 | { message = "^feat", group = "Features"}, 45 | { message = "^fix", group = "Bug Fixes"}, 46 | { message = "^doc", group = "Documentation"}, 47 | { message = "^perf", group = "Performance"}, 48 | { message = "^refactor", group = "Refactor"}, 49 | { message = "^style", group = "Styling"}, 50 | { message = "^test", group = "Testing"}, 51 | { message = "^chore\\(release\\): prepare for", skip = true}, 52 | { message = "^chore", group = "Miscellaneous Tasks", skip = true}, 53 | { body = ".*security", group = "Security"}, 54 | ] 55 | # filter out the commits that are not matched by commit parsers 56 | filter_commits = false 57 | # glob pattern for matching git tags 58 | tag_pattern = "v[0-9]*" 59 | # regex for skipping tags 60 | skip_tags = "v0.1.0-beta.1" 61 | # regex for ignoring tags 62 | ignore_tags = "" 63 | # sort the tags chronologically 64 | date_order = false 65 | # sort the commits inside sections by oldest/newest order 66 | sort_commits = "oldest" 67 | -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | codecov: 2 | token: 4fb4f3a3-27d6-4d17-ac96-852e234d1519 3 | notify: 4 | require_ci_to_pass: yes 5 | 6 | ignore: 7 | - "tests/*" 8 | coverage: 9 | range: 50..90 10 | round: down 11 | precision: 3 12 | status: 13 | project: 14 | target: 90 15 | threshold: 3 16 | -------------------------------------------------------------------------------- /contributing.md: -------------------------------------------------------------------------------- 1 | # Contributing to libzetta-rs 2 | 3 | First and foremost, thank you! We appreciate that you want to contribute to libzetta-rs, your time is valuable, and your contributions mean a lot to us. 4 | 5 | ## Important! 6 | 7 | By contributing to this project, you: 8 | 9 | * Agree that you have authored 100% of the content 10 | * Agree that you have the necessary rights to the content 11 | * Agree that you have received the necessary permissions from your employer to make the contributions (if applicable) 12 | * Agree that the content you contribute may be provided under the Project license(s) 13 | 14 | ## Getting started 15 | 16 | **What does "contributing" mean?** 17 | 18 | Creating an issue is the simplest form of contributing to a project. But there are many ways to contribute, including the following: 19 | 20 | - Updating or correcting documentation 21 | - Feature requests 22 | - Bug reports 23 | 24 | If you'd like to learn more about contributing in general, the [Guide to Idiomatic Contributing](https://github.com/jonschlinkert/idiomatic-contributing) has a lot of useful information. 25 | 26 | **Showing support for libzetta-rs** 27 | 28 | Please keep in mind that open source software is built by people like you, who spend their free time creating things the rest the community can use. 29 | 30 | Don't have time to contribute? No worries, here are some other ways to show your support for libzetta-rs: 31 | 32 | - star the [project](https://github.com/Inner-Heaven/libzetta-rs) 33 | - tweet your support for libzetta-rs 34 | 35 | ## Issues 36 | 37 | ### Before creating an issue 38 | 39 | Please try to determine if the issue is caused by an underlying library, and if so, create the issue there. Sometimes this is difficult to know. We only ask that you attempt to give a reasonable attempt to find out. Oftentimes the readme will have advice about where to go to create issues. 40 | 41 | Try to follow these guidelines 42 | 43 | - **Avoid creating issues for implementation help**. It's much better for discoverability, SEO, and semantics - to keep the issue tracker focused on bugs and feature requests - to ask implementation-related questions on [stackoverflow.com][so] 44 | - **Investigate the issue**: 45 | - **Check the readme** - oftentimes you will find notes about creating issues, and where to go depending on the type of issue. 46 | - Create the issue in the appropriate repository. 47 | 48 | ### Creating an issue 49 | 50 | Please be as descriptive as possible when creating an issue. Give us the information we need to successfully answer your question or address your issue by answering the following in your issue: 51 | 52 | - **version**: please note the version of libzetta-rs are you using 53 | - **extensions, plugins, helpers, etc** (if applicable): please list any extensions you're using 54 | - **error messages**: please paste any error messages into the issue, or a [gist](https://gist.github.com/) 55 | 56 | ### Closing issues 57 | 58 | The original poster or the maintainer's of libzetta-rs may close an issue at any time. Typically, but not exclusively, issues are closed when: 59 | 60 | - The issue is resolved 61 | - The project's maintainers have determined the issue is out of scope 62 | - An issue is clearly a duplicate of another issue, in which case the duplicate issue will be linked. 63 | - A discussion has clearly run its course 64 | 65 | 66 | ## Next steps 67 | 68 | **Tips for creating idiomatic issues** 69 | 70 | Spending just a little extra time to review best practices and brush up on your contributing skills will, at minimum, make your issue easier to read, easier to resolve, and more likely to be found by others who have the same or similar issue in the future. At best, it will open up doors and potential career opportunities by helping you be at your best. 71 | 72 | The following resources were hand-picked to help you be the most effective contributor you can be: 73 | 74 | - The [Guide to Idiomatic Contributing](https://github.com/jonschlinkert/idiomatic-contributing) is a great place for newcomers to start, but there is also information for experienced contributors there. 75 | - Take some time to learn basic markdown. We can't stress this enough. Don't start pasting code into GitHub issues before you've taken a moment to review this [markdown cheatsheet](https://gist.github.com/jonschlinkert/5854601) 76 | - The GitHub guide to [basic markdown](https://help.github.com/articles/markdown-basics/) is another great markdown resource. 77 | - Learn about [GitHub Flavored Markdown](https://help.github.com/articles/github-flavored-markdown/). And if you want to really go above and beyond, read [mastering markdown](https://guides.github.com/features/mastering-markdown/). 78 | 79 | At the very least, please try to: 80 | 81 | - Use backticks to wrap code. This ensures that it retains its formatting and isn't modified when it's rendered by GitHub, and makes the code more readable to others 82 | - When applicable, use syntax highlighting by adding the correct language name after the first "code fence" 83 | 84 | 85 | [so]: http://stackoverflow.com/questions/tagged/libzetta-rs 86 | -------------------------------------------------------------------------------- /flake.lock: -------------------------------------------------------------------------------- 1 | { 2 | "nodes": { 3 | "fenix": { 4 | "inputs": { 5 | "nixpkgs": [ 6 | "nixpkgs" 7 | ], 8 | "rust-analyzer-src": "rust-analyzer-src" 9 | }, 10 | "locked": { 11 | "lastModified": 1683872481, 12 | "narHash": "sha256-BLXcc6oCbv98MGn/MSUTH8C4oKdczk/C5JqZ3ZlGAXU=", 13 | "owner": "nix-community", 14 | "repo": "fenix", 15 | "rev": "dd518e99e2833bb13e25570f88f2e16cdc5f8b4e", 16 | "type": "github" 17 | }, 18 | "original": { 19 | "owner": "nix-community", 20 | "repo": "fenix", 21 | "type": "github" 22 | } 23 | }, 24 | "flake-compat": { 25 | "flake": false, 26 | "locked": { 27 | "lastModified": 1673956053, 28 | "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=", 29 | "owner": "edolstra", 30 | "repo": "flake-compat", 31 | "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9", 32 | "type": "github" 33 | }, 34 | "original": { 35 | "owner": "edolstra", 36 | "repo": "flake-compat", 37 | "type": "github" 38 | } 39 | }, 40 | "flake-utils": { 41 | "inputs": { 42 | "systems": "systems" 43 | }, 44 | "locked": { 45 | "lastModified": 1681202837, 46 | "narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=", 47 | "owner": "numtide", 48 | "repo": "flake-utils", 49 | "rev": "cfacdce06f30d2b68473a46042957675eebb3401", 50 | "type": "github" 51 | }, 52 | "original": { 53 | "owner": "numtide", 54 | "repo": "flake-utils", 55 | "type": "github" 56 | } 57 | }, 58 | "flake-utils_2": { 59 | "locked": { 60 | "lastModified": 1667395993, 61 | "narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=", 62 | "owner": "numtide", 63 | "repo": "flake-utils", 64 | "rev": "5aed5285a952e0b949eb3ba02c12fa4fcfef535f", 65 | "type": "github" 66 | }, 67 | "original": { 68 | "owner": "numtide", 69 | "repo": "flake-utils", 70 | "type": "github" 71 | } 72 | }, 73 | "gitignore": { 74 | "inputs": { 75 | "nixpkgs": [ 76 | "pre-commit-hooks", 77 | "nixpkgs" 78 | ] 79 | }, 80 | "locked": { 81 | "lastModified": 1660459072, 82 | "narHash": "sha256-8DFJjXG8zqoONA1vXtgeKXy68KdJL5UaXR8NtVMUbx8=", 83 | "owner": "hercules-ci", 84 | "repo": "gitignore.nix", 85 | "rev": "a20de23b925fd8264fd7fad6454652e142fd7f73", 86 | "type": "github" 87 | }, 88 | "original": { 89 | "owner": "hercules-ci", 90 | "repo": "gitignore.nix", 91 | "type": "github" 92 | } 93 | }, 94 | "nixpkgs": { 95 | "locked": { 96 | "lastModified": 1683408522, 97 | "narHash": "sha256-9kcPh6Uxo17a3kK3XCHhcWiV1Yu1kYj22RHiymUhMkU=", 98 | "owner": "nixos", 99 | "repo": "nixpkgs", 100 | "rev": "897876e4c484f1e8f92009fd11b7d988a121a4e7", 101 | "type": "github" 102 | }, 103 | "original": { 104 | "owner": "nixos", 105 | "ref": "nixos-unstable", 106 | "repo": "nixpkgs", 107 | "type": "github" 108 | } 109 | }, 110 | "nixpkgs-stable": { 111 | "locked": { 112 | "lastModified": 1678872516, 113 | "narHash": "sha256-/E1YwtMtFAu2KUQKV/1+KFuReYPANM2Rzehk84VxVoc=", 114 | "owner": "NixOS", 115 | "repo": "nixpkgs", 116 | "rev": "9b8e5abb18324c7fe9f07cb100c3cd4a29cda8b8", 117 | "type": "github" 118 | }, 119 | "original": { 120 | "owner": "NixOS", 121 | "ref": "nixos-22.11", 122 | "repo": "nixpkgs", 123 | "type": "github" 124 | } 125 | }, 126 | "nixpkgs_2": { 127 | "locked": { 128 | "lastModified": 1681303793, 129 | "narHash": "sha256-JEdQHsYuCfRL2PICHlOiH/2ue3DwoxUX7DJ6zZxZXFk=", 130 | "owner": "NixOS", 131 | "repo": "nixpkgs", 132 | "rev": "fe2ecaf706a5907b5e54d979fbde4924d84b65fc", 133 | "type": "github" 134 | }, 135 | "original": { 136 | "owner": "NixOS", 137 | "ref": "nixos-unstable", 138 | "repo": "nixpkgs", 139 | "type": "github" 140 | } 141 | }, 142 | "pre-commit-hooks": { 143 | "inputs": { 144 | "flake-compat": "flake-compat", 145 | "flake-utils": "flake-utils_2", 146 | "gitignore": "gitignore", 147 | "nixpkgs": "nixpkgs_2", 148 | "nixpkgs-stable": "nixpkgs-stable" 149 | }, 150 | "locked": { 151 | "lastModified": 1682596858, 152 | "narHash": "sha256-Hf9XVpqaGqe/4oDGr30W8HlsWvJXtMsEPHDqHZA6dDg=", 153 | "owner": "cachix", 154 | "repo": "pre-commit-hooks.nix", 155 | "rev": "fb58866e20af98779017134319b5663b8215d912", 156 | "type": "github" 157 | }, 158 | "original": { 159 | "owner": "cachix", 160 | "repo": "pre-commit-hooks.nix", 161 | "type": "github" 162 | } 163 | }, 164 | "root": { 165 | "inputs": { 166 | "fenix": "fenix", 167 | "flake-utils": "flake-utils", 168 | "nixpkgs": "nixpkgs", 169 | "pre-commit-hooks": "pre-commit-hooks" 170 | } 171 | }, 172 | "rust-analyzer-src": { 173 | "flake": false, 174 | "locked": { 175 | "lastModified": 1683815219, 176 | "narHash": "sha256-dC79Q2Xw8sBGz6a41H15XaT3pHQ6xP4EVY4axdxUb4E=", 177 | "owner": "rust-lang", 178 | "repo": "rust-analyzer", 179 | "rev": "9b3387454d7c70ec768114871682ee2946ec88a8", 180 | "type": "github" 181 | }, 182 | "original": { 183 | "owner": "rust-lang", 184 | "ref": "nightly", 185 | "repo": "rust-analyzer", 186 | "type": "github" 187 | } 188 | }, 189 | "systems": { 190 | "locked": { 191 | "lastModified": 1681028828, 192 | "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", 193 | "owner": "nix-systems", 194 | "repo": "default", 195 | "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", 196 | "type": "github" 197 | }, 198 | "original": { 199 | "owner": "nix-systems", 200 | "repo": "default", 201 | "type": "github" 202 | } 203 | } 204 | }, 205 | "root": "root", 206 | "version": 7 207 | } 208 | -------------------------------------------------------------------------------- /flake.nix: -------------------------------------------------------------------------------- 1 | { 2 | description = "libZetta Development Environment"; 3 | inputs = { 4 | nixpkgs.url = "github:nixos/nixpkgs/nixos-unstable"; 5 | flake-utils.url = "github:numtide/flake-utils"; 6 | fenix = { 7 | url = "github:nix-community/fenix"; 8 | inputs.nixpkgs.follows = "nixpkgs"; 9 | }; 10 | pre-commit-hooks.url = "github:cachix/pre-commit-hooks.nix"; 11 | }; 12 | outputs = 13 | { self 14 | , nixpkgs 15 | , fenix 16 | , flake-utils 17 | , pre-commit-hooks 18 | , ... 19 | }: 20 | flake-utils.lib.eachSystem [ 21 | "x86_64-linux" 22 | "aarch64-linux" 23 | "aarch64-darwin" 24 | ] 25 | (system: 26 | let 27 | overlays = [ fenix.overlays.default ]; 28 | pkgs = import nixpkgs { inherit system overlays; }; 29 | in 30 | with pkgs; { 31 | checks = { 32 | pre-commit-check = pre-commit-hooks.lib.${system}.run { 33 | src = ./.; 34 | hooks = { 35 | nixpkgs-fmt.enable = true; 36 | shellcheck.enable = true; 37 | statix.enable = true; 38 | }; 39 | }; 40 | }; 41 | 42 | devShell = clangStdenv.mkDerivation rec { 43 | inherit (self.checks.${system}.pre-commit-check) shellHook; 44 | name = "libzetta-env"; 45 | nativeBuildInputs = [ 46 | (pkgs.fenix.complete.withComponents [ 47 | "cargo" 48 | "clippy" 49 | "rust-src" 50 | "rustc" 51 | "rustfmt" 52 | ]) 53 | rust-analyzer-nightly 54 | bacon 55 | cargo-cache 56 | cargo-deny 57 | cargo-diet 58 | cargo-sort 59 | cargo-sweep 60 | cargo-wipe 61 | cargo-outdated 62 | cargo-release 63 | git-cliff 64 | cmake 65 | gnumake 66 | openssl.dev 67 | pkg-config 68 | nixpkgs-fmt 69 | zfs.dev 70 | just 71 | vagrant 72 | ]; 73 | PROJECT_ROOT = builtins.toString ./.; 74 | }; 75 | }); 76 | } 77 | 78 | -------------------------------------------------------------------------------- /justfile: -------------------------------------------------------------------------------- 1 | workspace := "~/libzetta-rs" 2 | ubuntu_host := "zetta-ubuntu" 3 | freebsd13_host := "zetta-freebsd13" 4 | freebsd14_host := "zetta-freebsd14" 5 | rsync_exclude := "--exclude .git --exclude .idea --exclude target --exclude libzfs_core-sys/target" 6 | 7 | set positional-arguments 8 | 9 | test-freebsd14 args='': 10 | just delete-test-pool-on {{freebsd14_host}} 11 | just copy-code-to {{freebsd14_host}} 12 | -ssh {{freebsd14_host}} "sudo sh -c 'mdconfig -d -u 1; mdconfig -a -s 96m -u1'" 13 | ssh {{freebsd14_host}} '. "$HOME/.cargo/env";cd {{workspace}} && sudo env PATH=$PATH cargo test {{args}}' 14 | 15 | test-freebsd13 args='': 16 | just delete-test-pool-on {{freebsd13_host}} 17 | just copy-code-to {{freebsd13_host}} 18 | -ssh {{freebsd13_host}} "sudo sh -c 'mdconfig -d -u 1; mdconfig -a -s 96m -u1'" 19 | ssh {{freebsd13_host}} '. "$HOME/.cargo/env";cd {{workspace}} && sudo env PATH=$PATH LD_PRELOAD=/usr/local/lib/libzfs_core.so cargo test {{args}}' 20 | 21 | test-ubuntu args='': 22 | just delete-test-pool-on {{ubuntu_host}} 23 | just copy-code-to {{ubuntu_host}} 24 | ssh {{ubuntu_host}} '. "$HOME/.cargo/env";cd {{workspace}} && sudo env PATH=$PATH cargo test {{args}}' 25 | 26 | delete-test-pool-on host: 27 | -ssh {{host}} "sudo sh -c 'zpool list -H -oname | grep test | xargs zpool destroy'" 28 | 29 | copy-code-to host: 30 | rsync -az -e "ssh" {{rsync_exclude}} --progress ./ {{host}}:{{workspace}} 31 | 32 | 33 | -------------------------------------------------------------------------------- /libzetta.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Inner-Heaven/libzetta-rs/f0c5dbd135d740b7c2d9b90e76c4ebbbbf5c32df/libzetta.png -------------------------------------------------------------------------------- /rust-toolchain.toml: -------------------------------------------------------------------------------- 1 | [toolchain] 2 | channel = "stable" 3 | profile = "default" 4 | components = ["rust-src"] -------------------------------------------------------------------------------- /src/fuzzy.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | parsers::{Rule, StdoutParser}, 3 | pest::Parser, 4 | zpool::Zpool, 5 | }; 6 | 7 | pub fn fuzzy_target_1(data: &[u8]) { 8 | if let Ok(s) = std::str::from_utf8(data) { 9 | let _: Vec<_> = StdoutParser::parse(Rule::zpools, s) 10 | .map(|pairs| pairs.map(Zpool::from_pest_pair).collect()) 11 | .unwrap(); 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | #![recursion_limit = "256"] 2 | #![deny( 3 | nonstandard_style, 4 | future_incompatible, 5 | clippy::all, 6 | clippy::restriction, 7 | clippy::nursery 8 | )] 9 | #![allow( 10 | clippy::module_name_repetitions, 11 | clippy::multiple_inherent_impl, 12 | clippy::implicit_return, 13 | clippy::missing_inline_in_public_items, 14 | clippy::missing_docs_in_private_items 15 | )] 16 | 17 | //! Rust bindings to libzfs_core and wrapper around `zpool(8)`. 18 | //! 19 | //! This library intends to provide a safe, low-level interface to ZFS operator tools. As such, not 20 | //! much will be sugar coated here. 21 | //! 22 | //! # Overview 23 | //! ## zpool 24 | //! A feature complete wrapper around `zpool(8)` with a somewhat stable API. I can't 25 | //! guarantee that the API won't change at any moment, but I don't see a reason for it change at the 26 | //! moment. 27 | //! 28 | //! Refer to the [zpool module documentation](zpool/index.html) for more information. 29 | //! 30 | //! ## zfs 31 | //! Most of functionality of `libzfs_core` is covered with some gaps filled in by `open3`. 32 | //! 33 | //! Refer to the [zfs module documentation](zfs/index.html) for more information. 34 | //! 35 | //! # Usage 36 | //! 37 | //! This section is currently under contstruction. Meanwhile, look at integration tests for 38 | //! inspiration. 39 | //! 40 | //! # Project Structure 41 | //! ### parsers 42 | //! Module for PEG parsers backed by [Pest](https://pest.rs/). 43 | //! 44 | //! ### zpool 45 | //! This module contains everything you need to work with zpools. 46 | 47 | #[macro_use] 48 | extern crate derive_builder; 49 | #[macro_use] 50 | extern crate getset; 51 | #[macro_use] 52 | extern crate lazy_static; 53 | #[macro_use] 54 | extern crate quick_error; 55 | 56 | #[macro_use] 57 | pub extern crate slog; 58 | pub use pest; 59 | 60 | pub extern crate libnv; 61 | 62 | // library modules 63 | pub mod parsers; 64 | pub mod zfs; 65 | pub mod zpool; 66 | 67 | pub mod utils; 68 | 69 | #[cfg(fuzzing)] 70 | pub mod fuzzy; 71 | 72 | const VERSION: &str = env!("CARGO_PKG_VERSION"); 73 | 74 | pub mod log; 75 | pub use log::GlobalLogger; 76 | 77 | pub mod fuckery { 78 | extern "C" { 79 | pub(crate) fn fuckery_make_nvlist() -> *mut zfs_core_sys::nvlist_t; 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /src/log.rs: -------------------------------------------------------------------------------- 1 | use once_cell::sync::OnceCell; 2 | use slog::{Drain, Logger as SlogLogger}; 3 | use slog_stdlog::StdLog; 4 | use std::borrow::Borrow; 5 | use std::ops::Deref; 6 | 7 | static GLOBAL_LOGGER: OnceCell = OnceCell::new(); 8 | 9 | #[derive(Debug, Clone)] 10 | pub struct GlobalLogger { 11 | inner: SlogLogger, 12 | } 13 | 14 | impl Deref for GlobalLogger { 15 | type Target = SlogLogger; 16 | 17 | fn deref(&self) -> &Self::Target { 18 | self.inner.borrow() 19 | } 20 | } 21 | 22 | impl GlobalLogger { 23 | fn new(logger: SlogLogger) -> Self { 24 | GlobalLogger { inner: logger } 25 | } 26 | 27 | /// Get global logger. If you didn't call `Logger::setup` prior calling this then default logger 28 | /// created with `StdLog` as drain. 29 | pub fn get() -> &'static GlobalLogger { 30 | GLOBAL_LOGGER.get_or_init(|| { 31 | let root_logger = SlogLogger::root(StdLog.fuse(), o!()); 32 | GlobalLogger::new(logger_from_root_logger(&root_logger)) 33 | }) 34 | } 35 | 36 | /// Set global logger. Optional. 37 | /// Can only called once. Returns Ok(()) if the cell was empty and Err(value) if it was full. 38 | pub fn setup(root_logger: &SlogLogger) -> Result<(), GlobalLogger> { 39 | GLOBAL_LOGGER.set(GlobalLogger::new(logger_from_root_logger(root_logger))) 40 | } 41 | } 42 | 43 | fn logger_from_root_logger(root_logger: &SlogLogger) -> SlogLogger { 44 | root_logger.new(o!("zetta_version" => crate::VERSION)) 45 | } 46 | -------------------------------------------------------------------------------- /src/parsers/fixtures/SIGABRT.PID.84191.TIME.2019-08-21.20.04.09.fuzz: -------------------------------------------------------------------------------- 1 | pool: tank 2 | state: DEGRADED 3 | status: One o0 more devices could not be opened. SufficiWed Jan 20 15r13:59p2010 4 | config: 5 | 6 | NAME STATE READ WRITE CKSUM 7 | tank DEGRADED 1 0 0 8 | mirror-0 DEGRADED 0 0 0 9 | c1t0d0 ONLINE 0 0 0 10 | c1t1d0 UNAVAIL 0 tank DEGRADED 1 0 0 11 | mirror-0 DEGRADED 0 0 0 12 | c1t0d0 ONLINE 0 0 0 13 | c1t1d0 UNAVAIL0 0 tank DEGRADED 1 0 0 14 | mirror-0 DEGRADED 0 0 0 15 | c1t0d0 ONLINE 0 0 0 16 | c1t1d0 UNAVAIL0 999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999 9999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999999 0 0 cann ONLINE (0 0 0 17 |  c1t1d09 UNAVAIL0 0 tank DEGRADED 1 0 0 18 | mirror-0 DEGRADED 0 0 0 -------------------------------------------------------------------------------- /src/parsers/stdout.pest: -------------------------------------------------------------------------------- 1 | whitespace = _{ " " | "\t" } 2 | 3 | digit = _{ '0'..'9' } 4 | digits = { digit ~ (digit | "_")* } 5 | alpha = _{ 'a'..'z' | 'A'..'Z' } 6 | symbol = _{ "!" | "@" | "," | "." | ";" | ":" | "/" | "\'" | "\"" | "(" | ")" | "-" | "%" | "\\" } 7 | alpha_num = _{ digit | alpha } 8 | alpha_nums = _{ alpha_num+ } 9 | text = _{ (alpha_num | whitespace |symbol)+ } 10 | path = @{ !(raid_enum | pseudo_vdev_enum) ~ "/"? ~ (name ~ "/"?)+ } 11 | url = @{ ("https" | "http") ~ ":/" ~ path } 12 | state_enum = { "ONLINE" | "OFFLINE" | "UNAVAIL" | "DEGRADED" | "FAULTED" | "AVAIL" | "INUSE" | "REMOVED" } 13 | raid_enum = { "mirror" | "raidz1" | "raidz2" | "raidz3" } 14 | raid_name = ${ raid_enum ~ ("-" ~ digits)? } 15 | pseudo_vdev_enum = { "spare" | "replacing" } 16 | pseudo_vdev_name = ${ pseudo_vdev_enum ~ "-" ~ digits } 17 | name = @{ ("_" | "-" | "."| ":" | alpha_num)+ } 18 | 19 | pool_name = { whitespace* ~ "pool:" ~ whitespace ~ name ~ "\n" } 20 | pool_id = { whitespace* ~ "id:" ~ whitespace ~ digits ~ "\n" } 21 | state = { whitespace* ~ "state:" ~ whitespace ~ state_enum ~ "\n" } 22 | status = { whitespace* ~ "status:" ~ multi_line_text } 23 | action = { whitespace* ~ "action: " ~ multi_line_text } 24 | see = { whitespace* ~ "see:" ~ whitespace ~ url ~ "\n" } 25 | config = { whitespace* ~ "config:" ~ "\n" } 26 | comment = { whitespace* ~ "comment: " ~ text? ~ "\n" } 27 | reason = { text } 28 | error_statistics = { whitespace* ~ digits ~ whitespace* ~ digits ~ whitespace* ~ digits } 29 | 30 | pool_line = { whitespace* ~ name ~ whitespace* ~ state_enum ~ whitespace? ~ error_statistics? ~ whitespace* ~ reason? ~ "\n"? } 31 | raid_line = { whitespace* ~ raid_name ~ whitespace* ~ state_enum ~ whitespace? ~ error_statistics? ~ whitespace* ~ reason? ~ "\n"? } 32 | disk_line = { whitespace* ~ path ~ whitespace* ~ state_enum ~ whitespace? ~ error_statistics? ~ whitespace* ~ reason? ~ "\n"? } 33 | pseudo_vdev_line = { whitespace* ~ pseudo_vdev_name ~ whitespace* ~ state_enum ~ whitespace? ~ error_statistics? ~ whitespace* ~ "\n"? } 34 | 35 | scan_line = { whitespace* ~ "scan:" ~ whitespace* ~ multi_line_text } 36 | pool_headers = _{ whitespace* ~ "NAME" ~ whitespace* ~ "STATE" ~ whitespace* ~ "READ" ~ whitespace* ~ "WRITE" ~ whitespace* ~ "CKSUM" ~ "\n" } 37 | no_errors = { "No known data errors" } 38 | errors = { whitespace* ~ "errors:" ~ whitespace* ~ (no_errors | multi_line_text) } 39 | naked_vdev = { disk_line } 40 | raided_vdev = { raid_line ~ (pseudo_vdev_line | disk_line)+} 41 | vdev = _{ raided_vdev | naked_vdev } 42 | vdev_line = _{ vdev ~ "\n"? } 43 | vdevs = { vdev_line+ } 44 | logs = { whitespace* ~ "logs" ~ whitespace* ~ "\n" ~ whitespace* ~ vdevs ~ "\n"?} 45 | caches = { whitespace* ~ "cache" ~ whitespace* ~ "\n" ~ whitespace* ~ disk_line+ ~ "\n"?} 46 | spares = { whitespace* ~ "spares" ~ whitespace* ~ "\n" ~ whitespace* ~ disk_line+ ~ "\n"?} 47 | special = { whitespace* ~ "special" ~ whitespace* ~ "\n" ~ whitespace* ~ vdevs ~ "\n"?} 48 | 49 | zpool = { "\n"? ~ pool_name ~ pool_id? ~ state ~ status? ~ action? ~ comment? ~ see? ~ scan_line? ~ config ~ "\n" ~ pool_headers? ~ pool_line ~ vdevs ~ logs? ~ caches? ~ spares? ~ special? ~ errors? ~ "\n"?} 50 | zpools = _{ zpool* ~ whitespace* } 51 | 52 | text_line = _{ text ~ "\n" } 53 | aligned_text_line = _{ (whitespace{8} | "\t") ~ text ~ "\n" } 54 | multi_line_text = { text_line ~ aligned_text_line{, 5} } 55 | -------------------------------------------------------------------------------- /src/parsers/zfs.pest: -------------------------------------------------------------------------------- 1 | // 2 | // Created by intellij-pest on 2019-09-07 3 | // errors 4 | // Author: andoriyu 5 | // 6 | 7 | whitespace = _{ " "+ | "\t" } 8 | 9 | digit = _{ '0'..'9' } 10 | digits = { digit ~ (digit | "_")* } 11 | alpha = _{ 'a'..'z' | 'A'..'Z' } 12 | symbol = _{ "!" | "@" | "," | "." | ";" | ":" | "/" | "\'" | "\"" | "(" | ")" | "-" | "%" } 13 | alpha_num = _{ digit | alpha } 14 | alpha_nums = _{ alpha_num+ } 15 | text = _{ (alpha_num | whitespace |symbol)+ } 16 | path_segment = _{ ("_" | "-" | "." | ":" | alpha_num)+ } 17 | snapshot_segment = _{ "@" ~ path_segment} 18 | bookmark_segment = _{ "#" ~ path_segment} 19 | dataset_name = { path_segment ~ ( "/" ~ path_segment)* ~ ( snapshot_segment | bookmark_segment )? } 20 | dataset_type = { "filesystem" | "snapshot" | "volume" | "bookmark" } 21 | dataset_with_type = { dataset_type ~ whitespace ~ dataset_name } 22 | 23 | 24 | dataset_not_found = { "cannot open '" ~ dataset_name ~ "': dataset does not exist"} 25 | 26 | error = { 27 | dataset_not_found 28 | } 29 | 30 | datasets = { (dataset_name ~ "\n"?)* } 31 | datasets_with_type = { (dataset_with_type ~ "\n"?)* } 32 | -------------------------------------------------------------------------------- /src/parsers/zfs.rs: -------------------------------------------------------------------------------- 1 | use pest_derive::Parser; 2 | 3 | #[derive(Parser)] 4 | #[grammar = "parsers/zfs.pest"] // relative to src 5 | pub struct ZfsParser; 6 | 7 | #[cfg(test)] 8 | mod test { 9 | use super::{Rule, ZfsParser}; 10 | use pest::{consumes_to, parses_to, Parser}; 11 | 12 | #[test] 13 | fn test_parse_filesystem_name_root() { 14 | let line = "z"; 15 | 16 | parses_to! { 17 | parser: ZfsParser, 18 | input: line, 19 | rule: Rule::dataset_name, 20 | tokens: [ 21 | dataset_name(0,1) 22 | ] 23 | } 24 | 25 | let pairs = ZfsParser::parse(Rule::dataset_name, line).unwrap(); 26 | assert_eq!("z", pairs.as_str()); 27 | } 28 | #[test] 29 | fn test_parse_filesystem_name_nested() { 30 | let line = "z/foo/bar"; 31 | 32 | parses_to! { 33 | parser: ZfsParser, 34 | input: line, 35 | rule: Rule::dataset_name, 36 | tokens: [ 37 | dataset_name(0,9) 38 | ] 39 | } 40 | 41 | let pairs = ZfsParser::parse(Rule::dataset_name, line).unwrap(); 42 | assert_eq!("z/foo/bar", pairs.as_str()); 43 | } 44 | #[test] 45 | fn test_parse_filesystem_name_root_snapshot() { 46 | let line = "z@backup-20190707"; 47 | 48 | parses_to! { 49 | parser: ZfsParser, 50 | input: line, 51 | rule: Rule::dataset_name, 52 | tokens: [ 53 | dataset_name(0,17) 54 | ] 55 | } 56 | 57 | let pairs = ZfsParser::parse(Rule::dataset_name, line).unwrap(); 58 | assert_eq!("z@backup-20190707", pairs.as_str()); 59 | } 60 | #[test] 61 | fn test_parse_filesystem_name_nexted_snapshot() { 62 | let line = "z/foo/bar@backup-20190707"; 63 | 64 | parses_to! { 65 | parser: ZfsParser, 66 | input: line, 67 | rule: Rule::dataset_name, 68 | tokens: [ 69 | dataset_name(0,25) 70 | ] 71 | } 72 | 73 | let pairs = ZfsParser::parse(Rule::dataset_name, line).unwrap(); 74 | assert_eq!("z/foo/bar@backup-20190707", pairs.as_str()); 75 | } 76 | 77 | #[test] 78 | fn test_parse_dataset_not_found() { 79 | let line = "cannot open 's/asd/asd': dataset does not exist"; 80 | let mut pairs = ZfsParser::parse(Rule::error, line).unwrap(); 81 | let dataset_not_found_pair = pairs.next().unwrap().into_inner().next().unwrap(); 82 | assert_eq!(Rule::dataset_not_found, dataset_not_found_pair.as_rule()); 83 | let dataset_name_pair = dataset_not_found_pair.into_inner().next().unwrap(); 84 | assert_eq!("s/asd/asd", dataset_name_pair.as_str()); 85 | } 86 | 87 | #[test] 88 | fn test_parse_datasets() { 89 | let lines = "s\ns/s/s/s\ns/d@test"; 90 | let expected = ["s", "s/s/s/s", "s/d@test"]; 91 | 92 | let mut pairs = ZfsParser::parse(Rule::datasets, lines).unwrap(); 93 | let datasets_pairs = pairs.next().unwrap().into_inner(); 94 | assert_eq!(3, datasets_pairs.clone().count()); 95 | 96 | for (idx, pair) in datasets_pairs.enumerate() { 97 | assert_eq!(Rule::dataset_name, pair.as_rule()); 98 | assert_eq!(expected[idx], pair.as_str()); 99 | } 100 | } 101 | 102 | #[test] 103 | fn test_parse_datasets_with_type() { 104 | let lines = r#"volume z/iohyve/rancher/disk0 105 | filesystem z/var/mail 106 | snapshot z/var/mail@backup-2019-08-08 107 | bookmark z/var/mail#backup-2019-08-08 108 | "#; 109 | let expected = vec![ 110 | ("volume", "z/iohyve/rancher/disk0"), 111 | ("filesystem", "z/var/mail"), 112 | ("snapshot", "z/var/mail@backup-2019-08-08"), 113 | ("bookmark", "z/var/mail#backup-2019-08-08"), 114 | ]; 115 | 116 | let mut pairs = ZfsParser::parse(Rule::datasets_with_type, lines).unwrap(); 117 | let datasets_pairs = pairs.next().unwrap().into_inner(); 118 | assert_eq!(4, datasets_pairs.clone().count()); 119 | 120 | for (idx, pair) in datasets_pairs.enumerate() { 121 | assert_eq!(Rule::dataset_with_type, pair.as_rule()); 122 | let mut dataset_with_type_pair = pair.into_inner(); 123 | let dataset_type = dataset_with_type_pair.next().unwrap(); 124 | let dataset_name = dataset_with_type_pair.next().unwrap(); 125 | assert_eq!( 126 | expected[idx], 127 | (dataset_type.as_str(), dataset_name.as_str()) 128 | ); 129 | } 130 | } 131 | 132 | #[test] 133 | fn test_issue_126() { 134 | let lines = r#"z/ROOT 135 | z/ROOT/default 136 | z/docker 137 | z/iohyve 138 | z/iohyve/Firmware 139 | z/iohyve/ISO 140 | z/iohyve/ISO/rancheros-v1.4.0.iso 141 | z/iohyve/rancher 142 | z/portshaker 143 | z/portshaker/cache 144 | z/portshaker/cache/andoriyu-local 145 | z/portshaker/cache/freebsd-svn 146 | z/portshaker/cache/vscode-git 147 | z/poudriere 148 | z/poudriere/data 149 | z/poudriere/data/.m 150 | z/poudriere/data/cache 151 | z/poudriere/data/logs 152 | z/poudriere/data/packages 153 | z/poudriere/data/wrkdirs 154 | z/poudriere/jails 155 | z/poudriere/jails/live 156 | z/poudriere/ports 157 | z/poudriere/ports/dev 158 | z/poudriere/ports/prestine 159 | z/poudriere/ports/system 160 | z/tmp 161 | z/usr 162 | z/usr/home 163 | z/usr/ports 164 | z/usr/ports/distfiles 165 | z/usr/src 166 | z/var 167 | z/var/ccache 168 | z/var/crash 169 | z/var/log 170 | z/var/mail 171 | z/var/tmp 172 | "#; 173 | 174 | let mut pairs = ZfsParser::parse(Rule::datasets, lines).unwrap(); 175 | let datasets_pairs = pairs.next().unwrap().into_inner(); 176 | assert_eq!(38, datasets_pairs.clone().count()); 177 | } 178 | 179 | #[test] 180 | fn test_issue_261() { 181 | let lines = r#"z@2022-08-13T12:29-04 182 | z/data@2023-08-19T01:06-04 183 | z/data@2023-12-12T18:45-05 184 | z/data/home@2023-08-19T01:06-04 185 | z/data/home@2023-12-12T18:45-05 186 | z/data/home/username@2023-12-12T18:45-05 187 | z/data/home/username/image-2023-09-09T08:50-04@2023-08-19T01:06-04 188 | z/data/root@2023-08-19T01:06-04 189 | z/data/root@2023-12-12T18:45-05 190 | z/data/srv@2023-08-19T01:06-04 191 | z/data/srv@2023-12-12T18:45-05 192 | z/data/usr@2023-08-19T01:06-04 193 | z/data/usr@2023-12-12T18:45-05 194 | z/data/usr/local@2023-08-19T01:06-04 195 | z/data/usr/local@2023-12-12T18:45-05 196 | z/data/var@2023-08-19T01:06-04 197 | z/data/var@2023-12-12T18:45-05 198 | z/data/var/games@2023-08-19T01:06-04 199 | z/data/var/games@2023-12-12T18:45-05 200 | z/data/var/lib@2023-08-19T01:06-04 201 | z/data/var/lib@2023-12-12T18:45-05 202 | z/data/var/lib/AccountsService@2023-08-19T01:06-04 203 | z/data/var/lib/AccountsService@2023-12-12T18:45-05 204 | z/data/var/lib/docker@2023-08-19T01:06-04 205 | z/data/var/lib/docker@2023-12-12T18:45-05 206 | z/data/var/lib/libvirt@2023-08-19T01:06-04 207 | z/data/var/lib/libvirt@2023-12-12T18:45-05 208 | z/data/var/lib/lxc@2023-08-19T01:06-04 209 | z/data/var/lib/lxc@2023-12-12T18:45-05 210 | z/data/var/lib/nfs@2023-08-19T01:06-04 211 | z/data/var/lib/nfs@2023-12-12T18:45-05 212 | z/data/var/log@2023-08-19T01:06-04 213 | z/data/var/log@2023-12-12T18:45-05 214 | z/data/var/snap@2023-08-19T01:06-04 215 | z/data/var/snap@2023-12-12T18:45-05 216 | z/data/var/spool@2023-08-19T01:06-04 217 | z/data/var/spool@2023-12-12T18:45-05 218 | z/data/var/www@2023-08-19T01:06-04 219 | z/data/var/www@2023-12-12T18:45-05 220 | "#; 221 | 222 | let mut pairs = ZfsParser::parse(Rule::datasets, lines).unwrap(); 223 | let snapshot_pairs = pairs.next().unwrap().into_inner(); 224 | assert_eq!(39, snapshot_pairs.clone().count()); 225 | } 226 | } 227 | -------------------------------------------------------------------------------- /src/utils.rs: -------------------------------------------------------------------------------- 1 | /// Very pricey way of parsing strings. Used because some ratios have `x` character, and some don't. 2 | #[inline(always)] 3 | pub fn parse_float(input: &mut String) -> Result { 4 | let last_char = { 5 | let chars = input.chars(); 6 | chars.last() 7 | }; 8 | if last_char == Some('x') { 9 | input.pop(); 10 | } 11 | input.parse() 12 | } 13 | -------------------------------------------------------------------------------- /src/zfs/delegating.rs: -------------------------------------------------------------------------------- 1 | use crate::zfs::{ 2 | lzc::ZfsLzc, open3::ZfsOpen3, BookmarkRequest, CreateDatasetRequest, DatasetKind, 3 | DestroyTiming, Properties, Result, SendFlags, ZfsEngine, 4 | }; 5 | use std::{collections::HashMap, os::unix::io::AsRawFd, path::PathBuf}; 6 | 7 | /// Handy wrapper that delegates your call to correct implementation. 8 | pub struct DelegatingZfsEngine { 9 | lzc: ZfsLzc, 10 | open3: ZfsOpen3, 11 | } 12 | 13 | impl DelegatingZfsEngine { 14 | pub fn new() -> Result { 15 | let lzc = ZfsLzc::new()?; 16 | let open3 = ZfsOpen3::new(); 17 | Ok(DelegatingZfsEngine { lzc, open3 }) 18 | } 19 | } 20 | 21 | impl ZfsEngine for DelegatingZfsEngine { 22 | fn exists>(&self, name: N) -> Result { 23 | self.lzc.exists(name) 24 | } 25 | 26 | fn create(&self, request: CreateDatasetRequest) -> Result<()> { 27 | self.lzc.create(request) 28 | } 29 | 30 | fn snapshot( 31 | &self, 32 | snapshots: &[PathBuf], 33 | user_properties: Option>, 34 | ) -> Result<()> { 35 | self.lzc.snapshot(snapshots, user_properties) 36 | } 37 | 38 | fn bookmark(&self, bookmarks: &[BookmarkRequest]) -> Result<()> { 39 | self.lzc.bookmark(bookmarks) 40 | } 41 | 42 | fn destroy>(&self, name: N) -> Result<()> { 43 | self.open3.destroy(name) 44 | } 45 | 46 | fn destroy_snapshots(&self, snapshots: &[PathBuf], timing: DestroyTiming) -> Result<()> { 47 | self.lzc.destroy_snapshots(snapshots, timing) 48 | } 49 | 50 | fn destroy_bookmarks(&self, bookmarks: &[PathBuf]) -> Result<()> { 51 | self.lzc.destroy_bookmarks(bookmarks) 52 | } 53 | 54 | fn list>(&self, pool: N) -> Result> { 55 | self.open3.list(pool) 56 | } 57 | 58 | fn list_filesystems>(&self, pool: N) -> Result> { 59 | self.open3.list_filesystems(pool) 60 | } 61 | 62 | fn list_snapshots>(&self, pool: N) -> Result> { 63 | self.open3.list_snapshots(pool) 64 | } 65 | 66 | fn list_bookmarks>(&self, pool: N) -> Result> { 67 | self.open3.list_bookmarks(pool) 68 | } 69 | 70 | fn list_volumes>(&self, pool: N) -> Result> { 71 | self.open3.list_volumes(pool) 72 | } 73 | 74 | fn read_properties>(&self, path: N) -> Result { 75 | self.open3.read_properties(path) 76 | } 77 | 78 | fn send_full, FD: AsRawFd>( 79 | &self, 80 | path: N, 81 | fd: FD, 82 | flags: SendFlags, 83 | ) -> Result<()> { 84 | self.lzc.send_full(path, fd, flags) 85 | } 86 | 87 | fn send_incremental, F: Into, FD: AsRawFd>( 88 | &self, 89 | path: N, 90 | from: F, 91 | fd: FD, 92 | flags: SendFlags, 93 | ) -> Result<()> { 94 | self.lzc.send_incremental(path, from, fd, flags) 95 | } 96 | 97 | fn run_channel_program>( 98 | &self, 99 | pool: N, 100 | program: &str, 101 | instr_limit: u64, 102 | mem_limit: u64, 103 | sync: bool, 104 | args: libnv::nvpair::NvList, 105 | ) -> Result { 106 | self.lzc 107 | .run_channel_program(pool, program, instr_limit, mem_limit, sync, args) 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /src/zfs/description.rs: -------------------------------------------------------------------------------- 1 | use std::default::Default; 2 | use strum_macros::{AsRefStr, Display, EnumString}; 3 | 4 | #[derive(AsRefStr, EnumString, Display, Eq, PartialEq, Debug, Clone)] 5 | pub enum DatasetKind { 6 | #[strum(serialize = "filesystem")] 7 | Filesystem, 8 | #[strum(serialize = "volume")] 9 | Volume, 10 | #[strum(serialize = "snapshot")] 11 | Snapshot, 12 | } 13 | 14 | impl Default for DatasetKind { 15 | fn default() -> Self { 16 | DatasetKind::Filesystem 17 | } 18 | } 19 | 20 | impl DatasetKind { 21 | pub fn as_c_uint(&self) -> zfs_core_sys::lzc_dataset_type::Type { 22 | match self { 23 | DatasetKind::Filesystem => zfs_core_sys::lzc_dataset_type::LZC_DATSET_TYPE_ZFS, 24 | DatasetKind::Volume => zfs_core_sys::lzc_dataset_type::LZC_DATSET_TYPE_ZVOL, 25 | _ => panic!("Not supported"), 26 | } 27 | } 28 | 29 | pub fn as_nvpair_value(&self) -> &str { 30 | match &self { 31 | DatasetKind::Filesystem => "zfs", 32 | DatasetKind::Volume => "zvol", 33 | _ => panic!("Unsupported dataset kind"), 34 | } 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /src/zfs/errors.rs: -------------------------------------------------------------------------------- 1 | use crate::parsers::zfs::{Rule, ZfsParser}; 2 | use pest::Parser; 3 | use std::{borrow::Cow, collections::HashMap, io, path::PathBuf}; 4 | 5 | pub type Result = std::result::Result; 6 | pub type ValidationResult = std::result::Result; 7 | 8 | quick_error! { 9 | #[derive(Debug)] 10 | pub enum Error { 11 | /// `zfs not found in the PATH. Open3 specific error. 12 | CmdNotFound {} 13 | LZCInitializationFailed(err: std::io::Error) { 14 | cause(err) 15 | } 16 | NvOpError(err: libnv::NvError) { 17 | cause(err) 18 | from() 19 | } 20 | Io(err: std::io::Error) { 21 | cause(err) 22 | from() 23 | } 24 | Unknown {} 25 | UnknownSoFar(err: String) {} 26 | DatasetNotFound(dataset: PathBuf) {} 27 | ValidationErrors(errors: Vec) { 28 | from() 29 | } 30 | MultiOpError(err: HashMap) { 31 | from() 32 | } 33 | ChanProgInval(err: HashMap) {} 34 | ChanProgRuntime(err: HashMap) {} 35 | Unimplemented {} 36 | } 37 | } 38 | 39 | impl From for Error { 40 | fn from(err: ValidationError) -> Error { 41 | Error::ValidationErrors(vec![err]) 42 | } 43 | } 44 | 45 | impl Error { 46 | pub fn kind(&self) -> ErrorKind { 47 | match self { 48 | Error::CmdNotFound => ErrorKind::CmdNotFound, 49 | Error::LZCInitializationFailed(_) => ErrorKind::LZCInitializationFailed, 50 | Error::NvOpError(_) => ErrorKind::NvOpError, 51 | Error::Io(_) => ErrorKind::Io, 52 | Error::DatasetNotFound(_) => ErrorKind::DatasetNotFound, 53 | Error::Unknown | Error::UnknownSoFar(_) => ErrorKind::Unknown, 54 | Error::ValidationErrors(_) => ErrorKind::ValidationErrors, 55 | Error::MultiOpError(_) => ErrorKind::MultiOpError, 56 | Error::ChanProgInval(_) => ErrorKind::ChanProgInval, 57 | Error::ChanProgRuntime(_) => ErrorKind::ChanProgRuntime, 58 | Error::Unimplemented => ErrorKind::Unimplemented, 59 | } 60 | } 61 | 62 | fn unknown_so_far(stderr: Cow<'_, str>) -> Self { 63 | Error::UnknownSoFar(stderr.into()) 64 | } 65 | 66 | #[allow(clippy::option_unwrap_used)] 67 | #[allow(clippy::wildcard_enum_match_arm)] 68 | pub(crate) fn from_stderr(stderr_raw: &[u8]) -> Self { 69 | let stderr = String::from_utf8_lossy(stderr_raw); 70 | if let Ok(mut pairs) = ZfsParser::parse(Rule::error, &stderr) { 71 | // Pest: error > dataset_not_found > dataset_name: "s/asd/asd" 72 | let error_pair = pairs.next().unwrap().into_inner().next().unwrap(); 73 | match error_pair.as_rule() { 74 | Rule::dataset_not_found => { 75 | let dataset_name_pair = error_pair.into_inner().next().unwrap(); 76 | Error::DatasetNotFound(PathBuf::from(dataset_name_pair.as_str())) 77 | } 78 | _ => Self::unknown_so_far(stderr), 79 | } 80 | } else { 81 | Self::unknown_so_far(stderr) 82 | } 83 | } 84 | 85 | pub fn invalid_input() -> Self { 86 | Error::Io(io::Error::from(io::ErrorKind::InvalidInput)) 87 | } 88 | } 89 | 90 | #[derive(Copy, Clone, Eq, PartialEq, Debug)] 91 | pub enum ErrorKind { 92 | CmdNotFound, 93 | LZCInitializationFailed, 94 | NvOpError, 95 | InvalidInput, 96 | Io, 97 | Unknown, 98 | DatasetNotFound, 99 | ValidationErrors, 100 | Unimplemented, 101 | MultiOpError, 102 | ChanProgInval, 103 | ChanProgRuntime, 104 | } 105 | 106 | impl PartialEq for Error { 107 | fn eq(&self, other: &Self) -> bool { 108 | match (self, other) { 109 | (Error::ValidationErrors(l), Error::ValidationErrors(r)) => l == r, 110 | _ => self.kind() == other.kind(), 111 | } 112 | } 113 | } 114 | quick_error! { 115 | #[derive(Debug, Eq, PartialEq)] 116 | pub enum ValidationError { 117 | MultipleZpools(zpools: Vec) {} 118 | NameTooLong(dataset: PathBuf) {} 119 | MissingName(dataset: PathBuf) {} 120 | MissingSnapshotName(dataset: PathBuf) {} 121 | MissingPool(dataset: PathBuf) {} 122 | Unknown(dataset: PathBuf) {} 123 | } 124 | } 125 | -------------------------------------------------------------------------------- /src/zfs/fixtures/bookmark_properties_freebsd.sorted: -------------------------------------------------------------------------------- 1 | z/var/tmp#backup-2019-08-08 createtxg 2967653 - 2 | z/var/tmp#backup-2019-08-08 creation 1565321370 - 3 | z/var/tmp#backup-2019-08-08 guid 12396914211240477066 - 4 | z/var/tmp#backup-2019-08-08 type bookmark - 5 | -------------------------------------------------------------------------------- /src/zfs/fixtures/filesystem_properties_freebsd: -------------------------------------------------------------------------------- 1 | z/usr/home type filesystem - 2 | z/usr/home creation 1493670099 - 3 | z/usr/home used 102563762176 - 4 | z/usr/home available 161379753984 - 5 | z/usr/home referenced 97392148480 - 6 | z/usr/home compressratio 1.25x - 7 | z/usr/home mounted yes - 8 | z/usr/home quota 0 default 9 | z/usr/home reservation 0 default 10 | z/usr/home recordsize 131072 default 11 | z/usr/home mountpoint /usr/home local 12 | z/usr/home sharenfs off local 13 | z/usr/home checksum on default 14 | z/usr/home compression lz4 inherited from z 15 | z/usr/home atime off inherited from z 16 | z/usr/home devices on default 17 | z/usr/home exec on default 18 | z/usr/home setuid on default 19 | z/usr/home readonly off inherited from z 20 | z/usr/home jailed off default 21 | z/usr/home snapdir hidden default 22 | z/usr/home aclmode discard default 23 | z/usr/home aclinherit restricted default 24 | z/usr/home createtxg 46918 - 25 | z/usr/home canmount on default 26 | z/usr/home xattr off temporary 27 | z/usr/home copies 1 default 28 | z/usr/home version 5 - 29 | z/usr/home utf8only off - 30 | z/usr/home normalization none - 31 | z/usr/home casesensitivity sensitive - 32 | z/usr/home vscan off default 33 | z/usr/home nbmand off default 34 | z/usr/home sharesmb off default 35 | z/usr/home refquota 0 default 36 | z/usr/home refreservation 0 default 37 | z/usr/home guid 10533576440524459469 - 38 | z/usr/home primarycache all default 39 | z/usr/home secondarycache all default 40 | z/usr/home usedbysnapshots 5171613696 - 41 | z/usr/home usedbydataset 97392148480 - 42 | z/usr/home usedbychildren 0 - 43 | z/usr/home usedbyrefreservation 0 - 44 | z/usr/home logbias latency default 45 | z/usr/home dedup off default 46 | z/usr/home mlslabel - 47 | z/usr/home sync standard default 48 | z/usr/home dnodesize legacy default 49 | z/usr/home refcompressratio 1.23x - 50 | z/usr/home written 35372666880 - 51 | z/usr/home logicalused 125882283520 - 52 | z/usr/home logicalreferenced 117966950912 - 53 | z/usr/home volmode default default 54 | z/usr/home filesystem_limit 18446744073709551615 default 55 | z/usr/home snapshot_limit 18446744073709551615 default 56 | z/usr/home filesystem_count 18446744073709551615 default 57 | z/usr/home snapshot_count 18446744073709551615 default 58 | z/usr/home redundant_metadata all default 59 | -------------------------------------------------------------------------------- /src/zfs/fixtures/filesystem_properties_freebsd.sorted: -------------------------------------------------------------------------------- 1 | z/usr/home aclinherit restricted default 2 | z/usr/home aclmode discard default 3 | z/usr/home atime off inherited from z 4 | z/usr/home available 161379753984 - 5 | z/usr/home canmount on default 6 | z/usr/home casesensitivity sensitive - 7 | z/usr/home checksum on default 8 | z/usr/home compression lz4 inherited from z 9 | z/usr/home compressratio 1.25x - 10 | z/usr/home copies 1 default 11 | z/usr/home createtxg 46918 - 12 | z/usr/home creation 1493670099 - 13 | z/usr/home dedup off default 14 | z/usr/home devices on default 15 | z/usr/home dnodesize legacy default 16 | z/usr/home exec on default 17 | z/usr/home filesystem_count 18446744073709551615 default 18 | z/usr/home filesystem_limit 18446744073709551615 default 19 | z/usr/home guid 10533576440524459469 - 20 | z/usr/home jailed off default 21 | z/usr/home logbias latency default 22 | z/usr/home logicalreferenced 117966950912 - 23 | z/usr/home logicalused 125882283520 - 24 | z/usr/home mlslabel - 25 | z/usr/home mounted yes - 26 | z/usr/home mountpoint /usr/home local 27 | z/usr/home nbmand off default 28 | z/usr/home normalization none - 29 | z/usr/home primarycache all default 30 | z/usr/home quota 0 default 31 | z/usr/home readonly off inherited from z 32 | z/usr/home recordsize 131072 default 33 | z/usr/home redundant_metadata all default 34 | z/usr/home refcompressratio 1.23x - 35 | z/usr/home referenced 97392148480 - 36 | z/usr/home refquota 0 default 37 | z/usr/home refreservation 0 default 38 | z/usr/home reservation 0 default 39 | z/usr/home secondarycache all default 40 | z/usr/home setuid on default 41 | z/usr/home sharenfs off local 42 | z/usr/home sharesmb off default 43 | z/usr/home snapdir hidden default 44 | z/usr/home snapshot_count 18446744073709551615 default 45 | z/usr/home snapshot_limit 18446744073709551615 default 46 | z/usr/home sync standard default 47 | z/usr/home type filesystem - 48 | z/usr/home used 102563762176 - 49 | z/usr/home usedbychildren 0 - 50 | z/usr/home usedbydataset 97392148480 - 51 | z/usr/home usedbyrefreservation 0 - 52 | z/usr/home usedbysnapshots 5171613696 - 53 | z/usr/home utf8only off - 54 | z/usr/home version 5 - 55 | z/usr/home volmode default default 56 | z/usr/home vscan off default 57 | z/usr/home written 35372666880 - 58 | z/usr/home xattr off temporary 59 | -------------------------------------------------------------------------------- /src/zfs/fixtures/snapshot_properties_freebsd.sorted: -------------------------------------------------------------------------------- 1 | z/usr@backup-2019-11-24 casesensitivity sensitive - 2 | z/usr@backup-2019-11-24 clones - 3 | z/usr@backup-2019-11-24 compressratio 1.00x - 4 | z/usr@backup-2019-11-24 createtxg 3034392 - 5 | z/usr@backup-2019-11-24 creation 1574590597 - 6 | z/usr@backup-2019-11-24 defer_destroy off - 7 | z/usr@backup-2019-11-24 devices on default 8 | z/usr@backup-2019-11-24 exec on default 9 | z/usr@backup-2019-11-24 guid 6033436932844487115 - 10 | z/usr@backup-2019-11-24 logicalreferenced 37376 - 11 | z/usr@backup-2019-11-24 mlslabel - 12 | z/usr@backup-2019-11-24 nbmand off default 13 | z/usr@backup-2019-11-24 normalization none - 14 | z/usr@backup-2019-11-24 primarycache all default 15 | z/usr@backup-2019-11-24 refcompressratio 1.00x - 16 | z/usr@backup-2019-11-24 referenced 90210 - 17 | z/usr@backup-2019-11-24 secondarycache all default 18 | z/usr@backup-2019-11-24 setuid on default 19 | z/usr@backup-2019-11-24 type snapshot - 20 | z/usr@backup-2019-11-24 used 0 - 21 | z/usr@backup-2019-11-24 userrefs 0 - 22 | z/usr@backup-2019-11-24 utf8only off - 23 | z/usr@backup-2019-11-24 version 5 - 24 | z/usr@backup-2019-11-24 volmode default default 25 | z/usr@backup-2019-11-24 written 0 - 26 | z/usr@backup-2019-11-24 xattr on default 27 | -------------------------------------------------------------------------------- /src/zfs/fixtures/volume_properties_freebsd.sorted: -------------------------------------------------------------------------------- 1 | z/iohyve/rancher/disk0 available 175800672256 - 2 | z/iohyve/rancher/disk0 checksum on default 3 | z/iohyve/rancher/disk0 compression lz4 inherited from z 4 | z/iohyve/rancher/disk0 compressratio 1.30x - 5 | z/iohyve/rancher/disk0 copies 1 default 6 | z/iohyve/rancher/disk0 createtxg 2432774 - 7 | z/iohyve/rancher/disk0 creation 1531943675 - 8 | z/iohyve/rancher/disk0 dedup off default 9 | z/iohyve/rancher/disk0 guid 8670277898870184975 - 10 | z/iohyve/rancher/disk0 logbias latency default 11 | z/iohyve/rancher/disk0 logicalreferenced 3618547712 - 12 | z/iohyve/rancher/disk0 logicalused 3618551808 - 13 | z/iohyve/rancher/disk0 mlslabel - 14 | z/iohyve/rancher/disk0 primarycache all default 15 | z/iohyve/rancher/disk0 readonly off inherited from z 16 | z/iohyve/rancher/disk0 redundant_metadata all default 17 | z/iohyve/rancher/disk0 refcompressratio 1.30x - 18 | z/iohyve/rancher/disk0 referenced 2781577216 - 19 | z/iohyve/rancher/disk0 refreservation 70871154688 local 20 | z/iohyve/rancher/disk0 reservation 0 default 21 | z/iohyve/rancher/disk0 secondarycache all default 22 | z/iohyve/rancher/disk0 snapshot_count 18446744073709551615 default 23 | z/iohyve/rancher/disk0 snapshot_limit 18446744073709551615 default 24 | z/iohyve/rancher/disk0 sync standard default 25 | z/iohyve/rancher/disk0 type volume - 26 | z/iohyve/rancher/disk0 used 73652740096 - 27 | z/iohyve/rancher/disk0 usedbychildren 0 - 28 | z/iohyve/rancher/disk0 usedbydataset 2781577216 - 29 | z/iohyve/rancher/disk0 usedbyrefreservation 70871146496 - 30 | z/iohyve/rancher/disk0 usedbysnapshots 16384 - 31 | z/iohyve/rancher/disk0 volblocksize 8192 default 32 | z/iohyve/rancher/disk0 volmode dev local 33 | z/iohyve/rancher/disk0 volsize 68719476736 local 34 | z/iohyve/rancher/disk0 written 8192 - 35 | -------------------------------------------------------------------------------- /src/zfs/lzc.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | zfs::{ 3 | BookmarkRequest, Checksum, Compression, Copies, CreateDatasetRequest, DatasetKind, 4 | DestroyTiming, Error, Result, SendFlags, SnapDir, ValidationError, ZfsEngine, 5 | }, 6 | GlobalLogger, 7 | }; 8 | use cstr_argument::CStrArgument; 9 | use libnv::nvpair::NvList; 10 | use slog::Logger; 11 | 12 | use crate::zfs::{ 13 | errors::Error::ValidationErrors, 14 | properties::{AclInheritMode, AclMode, ZfsProp}, 15 | PathExt, 16 | }; 17 | use std::{ 18 | collections::HashMap, 19 | ffi::CString, 20 | os::unix::io::{AsRawFd, RawFd}, 21 | path::PathBuf, 22 | ptr::null_mut, 23 | }; 24 | use zfs_core_sys as sys; 25 | 26 | #[cfg(target_os = "freebsd")] 27 | const ECHRNG: libc::c_int = libc::ENXIO; 28 | #[cfg(target_os = "linux")] 29 | const ECHRNG: libc::c_int = libc::ECHRNG; 30 | 31 | #[derive(Debug, Clone)] 32 | pub struct ZfsLzc { 33 | logger: Logger, 34 | } 35 | 36 | impl ZfsLzc { 37 | /// Initialize libzfs_core backed ZfsEngine. 38 | /// If root logger is None, then StdLog drain used. 39 | pub fn new() -> Result { 40 | let errno = unsafe { sys::libzfs_core_init() }; 41 | 42 | if errno != 0 { 43 | let io_error = std::io::Error::from_raw_os_error(errno); 44 | return Err(Error::LZCInitializationFailed(io_error)); 45 | } 46 | let logger = GlobalLogger::get().new(o!("zetta_module" => "zfs", "zfs_impl" => "lzc")); 47 | 48 | Ok(ZfsLzc { logger }) 49 | } 50 | 51 | pub fn logger(&self) -> &Logger { 52 | &self.logger 53 | } 54 | 55 | fn send( 56 | &self, 57 | path: PathBuf, 58 | from: Option, 59 | fd: RawFd, 60 | flags: SendFlags, 61 | ) -> Result<()> { 62 | let snapshot = 63 | CString::new(path.to_str().unwrap()).expect("Failed to create CString from path"); 64 | let snapshot_ptr = snapshot.as_ptr(); 65 | let from_cstr = from.map(|f| { 66 | CString::new(f.to_str().unwrap()).expect("Failed to create CString from path") 67 | }); 68 | let fd_raw = fd; 69 | let errno = if let Some(src) = from_cstr { 70 | unsafe { zfs_core_sys::lzc_send(snapshot_ptr, src.as_ptr(), fd_raw, flags.bits) } 71 | } else { 72 | unsafe { zfs_core_sys::lzc_send(snapshot_ptr, std::ptr::null(), fd_raw, flags.bits) } 73 | }; 74 | 75 | match errno { 76 | 0 => Ok(()), 77 | _ => { 78 | let io_error = std::io::Error::from_raw_os_error(errno); 79 | Err(Error::Io(io_error)) 80 | } 81 | } 82 | } 83 | } 84 | 85 | impl ZfsEngine for ZfsLzc { 86 | fn exists>(&self, name: N) -> Result { 87 | let path = name.into(); 88 | let n = path.to_str().expect("Invalid Path").into_cstr(); 89 | let ret = unsafe { sys::lzc_exists(n.as_ref().as_ptr()) }; 90 | 91 | if ret == 1 { 92 | Ok(true) 93 | } else { 94 | Ok(false) 95 | } 96 | } 97 | 98 | fn create(&self, request: CreateDatasetRequest) -> Result<()> { 99 | request.validate()?; 100 | 101 | //let mut props = nvpair::NvList::new()?; 102 | let mut props = NvList::default(); 103 | let name_c_string = 104 | CString::new(request.name().to_str().expect("Non UTF-8 name")).expect("NULL in name"); 105 | // LZC wants _everything_ as u64 even booleans. 106 | if let Some(acl_inherit) = request.acl_inherit { 107 | props.insert_u64(AclInheritMode::nv_key(), acl_inherit.as_nv_value())?; 108 | } 109 | if let Some(acl_mode) = request.acl_mode { 110 | props.insert_u64(AclMode::nv_key(), acl_mode.as_nv_value())?; 111 | } 112 | if let Some(atime) = request.atime { 113 | props.insert_u64("atime", bool_to_u64(atime))?; 114 | } 115 | if let Some(checksum) = request.checksum { 116 | props.insert_u64(Checksum::nv_key(), checksum.as_nv_value())?; 117 | } 118 | if let Some(compression) = request.compression { 119 | props.insert_u64(Compression::nv_key(), compression.as_nv_value())?; 120 | } 121 | if let Some(copies) = request.copies() { 122 | props.insert_u64(Copies::nv_key(), copies.as_nv_value())?; 123 | } 124 | if let Some(devices) = request.devices { 125 | props.insert_u64("devices", bool_to_u64(devices))?; 126 | } 127 | if let Some(exec) = request.exec { 128 | props.insert_u64("exec", bool_to_u64(exec))?; 129 | } 130 | // saved fore mount point 131 | if let Some(primary_cache) = request.primary_cache { 132 | props.insert_u64("primarycache", primary_cache.as_nv_value())?; 133 | } 134 | if let Some(quota) = request.quota { 135 | props.insert_u64("quota", quota)?; 136 | } 137 | if let Some(readonly) = request.readonly { 138 | props.insert_u64("readonly", bool_to_u64(readonly))?; 139 | } 140 | if let Some(record_size) = request.record_size { 141 | props.insert_u64("recordsize", record_size)?; 142 | } 143 | if let Some(ref_quota) = request.ref_quota { 144 | props.insert_u64("refquota", ref_quota)?; 145 | } 146 | if let Some(ref_reservation) = request.ref_reservation { 147 | props.insert_u64("refreservation", ref_reservation)?; 148 | } 149 | if let Some(secondary_cache) = request.secondary_cache { 150 | props.insert_u64("secondarycache", secondary_cache.as_nv_value())?; 151 | } 152 | if let Some(setuid) = request.setuid { 153 | props.insert_u64("setuid", bool_to_u64(setuid))?; 154 | } 155 | if let Some(snap_dir) = request.snap_dir { 156 | props.insert_u64(SnapDir::nv_key(), snap_dir.as_nv_value())?; 157 | } 158 | 159 | if request.kind == DatasetKind::Filesystem 160 | && (request.volume_size.is_some() || request.volume_block_size.is_some()) 161 | { 162 | return Err(Error::invalid_input()); 163 | } 164 | 165 | if request.kind == DatasetKind::Volume && request.volume_size.is_none() { 166 | return Err(Error::invalid_input()); 167 | } 168 | 169 | if let Some(vol_size) = request.volume_size { 170 | props.insert_u64("volsize", vol_size)?; 171 | } 172 | if let Some(vol_block_size) = request.volume_block_size { 173 | props.insert_u64("volblocksize", vol_block_size)?; 174 | } 175 | 176 | if let Some(xattr) = request.xattr { 177 | props.insert("xattr", bool_to_u64(xattr))?; 178 | } 179 | if let Some(user_props) = request.user_properties() { 180 | for (key, value) in user_props { 181 | props.insert_string(key.as_str(), value.as_str())?; 182 | } 183 | } 184 | let errno = unsafe { 185 | zfs_core_sys::lzc_create( 186 | name_c_string.as_ref().as_ptr(), 187 | request.kind().as_c_uint(), 188 | props.as_ptr(), 189 | std::ptr::null_mut(), 190 | 0, 191 | ) 192 | }; 193 | 194 | match errno { 195 | 0 => Ok(()), 196 | _ => { 197 | let io_error = std::io::Error::from_raw_os_error(errno); 198 | Err(Error::Io(io_error)) 199 | } 200 | } 201 | } 202 | 203 | fn snapshot( 204 | &self, 205 | snapshots: &[PathBuf], 206 | user_properties: Option>, 207 | ) -> Result<()> { 208 | let validation_errors: Vec = snapshots 209 | .iter() 210 | .map(PathBuf::validate) 211 | .filter_map(Result::err) 212 | .collect(); 213 | if !validation_errors.is_empty() { 214 | return Err(ValidationErrors(validation_errors)); 215 | } 216 | 217 | let mut snapshots_list = NvList::default(); 218 | let mut props = NvList::default(); 219 | for snap in snapshots { 220 | snapshots_list.insert(&*snap.to_string_lossy(), true)?; 221 | } 222 | let mut errors_list_ptr = null_mut(); 223 | if let Some(user_properties) = user_properties { 224 | for (key, value) in user_properties { 225 | props.insert_string(key.as_str(), value.as_str())?; 226 | } 227 | } 228 | let errno = unsafe { 229 | zfs_core_sys::lzc_snapshot( 230 | snapshots_list.as_ptr(), 231 | props.as_ptr(), 232 | &mut errors_list_ptr, 233 | ) 234 | }; 235 | if !errors_list_ptr.is_null() { 236 | let errors = unsafe { NvList::from_ptr(errors_list_ptr) }; 237 | if !errors.is_empty() { 238 | return Err(Error::from(errors.into_hashmap())); 239 | } 240 | } 241 | match errno { 242 | 0 => Ok(()), 243 | _ => { 244 | let io_error = std::io::Error::from_raw_os_error(errno); 245 | Err(Error::Io(io_error)) 246 | } 247 | } 248 | } 249 | 250 | fn bookmark(&self, bookmarks: &[BookmarkRequest]) -> Result<()> { 251 | let validation_errors: Vec = bookmarks 252 | .iter() 253 | .flat_map(|BookmarkRequest { snapshot, bookmark }| vec![snapshot, bookmark]) 254 | .map(PathBuf::validate) 255 | .filter_map(Result::err) 256 | .collect(); 257 | if !validation_errors.is_empty() { 258 | return Err(ValidationErrors(validation_errors)); 259 | } 260 | 261 | let mut bookmarks_list = NvList::default(); 262 | for BookmarkRequest { snapshot, bookmark } in bookmarks { 263 | bookmarks_list.insert( 264 | &*bookmark.to_string_lossy(), 265 | snapshot.to_string_lossy().as_ref(), 266 | )?; 267 | } 268 | 269 | let mut errors_list_ptr = null_mut(); 270 | let errno = 271 | unsafe { zfs_core_sys::lzc_bookmark(bookmarks_list.as_ptr(), &mut errors_list_ptr) }; 272 | if !errors_list_ptr.is_null() { 273 | let errors = unsafe { NvList::from_ptr(errors_list_ptr) }; 274 | if !errors.is_empty() { 275 | return Err(Error::from(errors.into_hashmap())); 276 | } 277 | } 278 | match errno { 279 | 0 => Ok(()), 280 | _ => { 281 | let io_error = std::io::Error::from_raw_os_error(errno); 282 | Err(Error::Io(io_error)) 283 | } 284 | } 285 | } 286 | 287 | fn destroy_snapshots(&self, snapshots: &[PathBuf], timing: DestroyTiming) -> Result<()> { 288 | let validation_errors: Vec = snapshots 289 | .iter() 290 | .map(PathBuf::validate) 291 | .filter(Result::is_err) 292 | .map(Result::unwrap_err) 293 | .collect(); 294 | if !validation_errors.is_empty() { 295 | return Err(ValidationErrors(validation_errors)); 296 | } 297 | 298 | let mut snapshots_list = NvList::default(); 299 | 300 | for snap in snapshots { 301 | snapshots_list.insert(&*snap.to_string_lossy(), true)?; 302 | } 303 | 304 | let mut errors_list_ptr = null_mut(); 305 | let errno = unsafe { 306 | zfs_core_sys::lzc_destroy_snaps( 307 | snapshots_list.as_ptr(), 308 | timing.as_c_uint(), 309 | &mut errors_list_ptr, 310 | ) 311 | }; 312 | if !errors_list_ptr.is_null() { 313 | let errors = unsafe { NvList::from_ptr(errors_list_ptr) }; 314 | if !errors.is_empty() { 315 | return Err(Error::from(errors.into_hashmap())); 316 | } 317 | } 318 | match errno { 319 | 0 => Ok(()), 320 | _ => { 321 | let io_error = std::io::Error::from_raw_os_error(errno); 322 | Err(Error::Io(io_error)) 323 | } 324 | } 325 | } 326 | 327 | fn destroy_bookmarks(&self, bookmarks: &[PathBuf]) -> Result<()> { 328 | let validation_errors: Vec = bookmarks 329 | .iter() 330 | .map(PathBuf::validate) 331 | .filter(Result::is_err) 332 | .map(Result::unwrap_err) 333 | .collect(); 334 | if !validation_errors.is_empty() { 335 | return Err(ValidationErrors(validation_errors)); 336 | } 337 | 338 | let mut bookmarks_list = NvList::default(); 339 | 340 | for bookmark in bookmarks { 341 | bookmarks_list.insert_boolean(&*bookmark.to_string_lossy())?; 342 | } 343 | 344 | let mut errors_list_ptr = null_mut(); 345 | let errno = unsafe { 346 | zfs_core_sys::lzc_destroy_bookmarks(bookmarks_list.as_ptr(), &mut errors_list_ptr) 347 | }; 348 | if !errors_list_ptr.is_null() { 349 | let errors = unsafe { NvList::from_ptr(errors_list_ptr) }; 350 | if !errors.is_empty() { 351 | return Err(Error::from(errors.into_hashmap())); 352 | } 353 | } 354 | match errno { 355 | 0 => Ok(()), 356 | _ => { 357 | let io_error = std::io::Error::from_raw_os_error(errno); 358 | Err(Error::Io(io_error)) 359 | } 360 | } 361 | } 362 | 363 | fn send_full, FD: AsRawFd>( 364 | &self, 365 | path: N, 366 | fd: FD, 367 | flags: SendFlags, 368 | ) -> Result<()> { 369 | self.send(path.into(), None, fd.as_raw_fd(), flags) 370 | } 371 | 372 | fn send_incremental, F: Into, FD: AsRawFd>( 373 | &self, 374 | path: N, 375 | from: F, 376 | fd: FD, 377 | flags: SendFlags, 378 | ) -> Result<()> { 379 | self.send(path.into(), Some(from.into()), fd.as_raw_fd(), flags) 380 | } 381 | 382 | fn run_channel_program>( 383 | &self, 384 | pool: N, 385 | program: &str, 386 | instr_limit: u64, 387 | mem_limit: u64, 388 | sync: bool, 389 | args: NvList, 390 | ) -> Result { 391 | let pool = pool.into(); 392 | let pool_c_string = pool.to_str().expect("Non UTF-8 pool name").into_cstr(); 393 | let prog_c_string = program.into_cstr(); 394 | 395 | let mut out_nvlist_ptr = null_mut(); 396 | let errno = unsafe { 397 | if sync { 398 | zfs_core_sys::lzc_channel_program( 399 | pool_c_string.as_ref().as_ptr(), 400 | prog_c_string.as_ref().as_ptr(), 401 | instr_limit, 402 | mem_limit, 403 | args.as_ptr(), 404 | &mut out_nvlist_ptr, 405 | ) 406 | } else { 407 | zfs_core_sys::lzc_channel_program_nosync( 408 | pool_c_string.as_ref().as_ptr(), 409 | prog_c_string.as_ref().as_ptr(), 410 | instr_limit, 411 | mem_limit, 412 | args.as_ptr(), 413 | &mut out_nvlist_ptr, 414 | ) 415 | } 416 | }; 417 | match errno { 418 | 0 => Ok(unsafe { NvList::from_ptr(out_nvlist_ptr) }), 419 | libc::EINVAL => Err(Error::ChanProgInval( 420 | unsafe { NvList::from_ptr(out_nvlist_ptr) }.into_hashmap(), 421 | )), 422 | ECHRNG => Err(Error::ChanProgRuntime( 423 | unsafe { NvList::from_ptr(out_nvlist_ptr) }.into_hashmap(), 424 | )), 425 | _ => { 426 | let io_error = std::io::Error::from_raw_os_error(errno); 427 | Err(Error::Io(io_error)) 428 | } 429 | } 430 | } 431 | } 432 | 433 | // This should be mapped to values from nvpair. 434 | fn bool_to_u64(src: bool) -> u64 { 435 | if src { 436 | 1 437 | } else { 438 | 0 439 | } 440 | } 441 | -------------------------------------------------------------------------------- /src/zfs/mod.rs: -------------------------------------------------------------------------------- 1 | use std::{os::unix::io::AsRawFd, path::PathBuf}; 2 | 3 | use bitflags::bitflags; 4 | 5 | pub mod description; 6 | pub use description::DatasetKind; 7 | 8 | pub mod delegating; 9 | pub use delegating::DelegatingZfsEngine; 10 | pub mod open3; 11 | pub use open3::ZfsOpen3; 12 | 13 | pub mod lzc; 14 | use crate::zfs::properties::{AclInheritMode, AclMode}; 15 | pub use lzc::ZfsLzc; 16 | use std::collections::HashMap; 17 | 18 | pub mod properties; 19 | pub use properties::{ 20 | CacheMode, CanMount, Checksum, Compression, Copies, FilesystemProperties, Properties, SnapDir, 21 | VolumeProperties, 22 | }; 23 | 24 | mod pathext; 25 | pub use pathext::PathExt; 26 | 27 | pub static DATASET_NAME_MAX_LENGTH: usize = 255; 28 | 29 | mod errors; 30 | 31 | pub use errors::{Error, ErrorKind, Result, ValidationError, ValidationResult}; 32 | 33 | /// Whether to mark busy snapshots for deferred destruction rather than immediately failing if can't 34 | /// be destroyed right now. 35 | #[derive(Clone, PartialEq, Eq, Debug)] 36 | pub enum DestroyTiming { 37 | /// If a snapshot has user holds or clones, destroy operation will fail and none of the 38 | /// snapshots will be destroyed. 39 | RightNow, 40 | /// If a snapshot has user holds or clones, it will be marked for deferred destruction, and 41 | /// will be destroyed when the last hold or clone is removed/destroyed. 42 | Defer, 43 | } 44 | 45 | impl DestroyTiming { 46 | pub fn as_c_uint(&self) -> std::os::raw::c_uint { 47 | match self { 48 | DestroyTiming::Defer => 1, 49 | DestroyTiming::RightNow => 0, 50 | } 51 | } 52 | } 53 | 54 | pub struct BookmarkRequest { 55 | pub snapshot: PathBuf, 56 | pub bookmark: PathBuf, 57 | } 58 | 59 | impl BookmarkRequest { 60 | pub fn new(snapshot: PathBuf, bookmark: PathBuf) -> Self { 61 | BookmarkRequest { snapshot, bookmark } 62 | } 63 | } 64 | 65 | bitflags! { 66 | #[derive(Default)] 67 | pub struct SendFlags: u32 { 68 | const LZC_SEND_FLAG_EMBED_DATA = 1 << 0; 69 | const LZC_SEND_FLAG_LARGE_BLOCK = 1 << 1; 70 | const LZC_SEND_FLAG_COMPRESS = 1 << 2; 71 | const LZC_SEND_FLAG_RAW = 1 << 3; 72 | const LZC_SEND_FLAG_SAVED = 1 << 4; 73 | } 74 | } 75 | pub trait ZfsEngine { 76 | /// Check if a dataset (a filesystem, or a volume, or a snapshot with the given name exists. 77 | /// 78 | /// NOTE: Can't be used to check for existence of bookmarks. 79 | /// * `name` - The dataset name to check. 80 | #[cfg_attr(tarpaulin, skip)] 81 | fn exists>(&self, _name: N) -> Result { 82 | Err(Error::Unimplemented) 83 | } 84 | 85 | /// Create a new dataset. 86 | #[cfg_attr(tarpaulin, skip)] 87 | fn create(&self, _request: CreateDatasetRequest) -> Result<()> { 88 | Err(Error::Unimplemented) 89 | } 90 | 91 | /// Create snapshots as one atomic operation. 92 | #[cfg_attr(tarpaulin, skip)] 93 | fn snapshot( 94 | &self, 95 | _snapshots: &[PathBuf], 96 | _user_properties: Option>, 97 | ) -> Result<()> { 98 | Err(Error::Unimplemented) 99 | } 100 | 101 | /// Create bookmarks as one atomic operation. 102 | #[cfg_attr(tarpaulin, skip)] 103 | fn bookmark(&self, _snapshots: &[BookmarkRequest]) -> Result<()> { 104 | Err(Error::Unimplemented) 105 | } 106 | 107 | /// Deletes the dataset 108 | /// Deletes the dataset 109 | #[cfg_attr(tarpaulin, skip)] 110 | fn destroy>(&self, _name: N) -> Result<()> { 111 | Err(Error::Unimplemented) 112 | } 113 | 114 | /// Delete snapshots as one atomic operation 115 | #[cfg_attr(tarpaulin, skip)] 116 | fn destroy_snapshots(&self, _snapshots: &[PathBuf], _timing: DestroyTiming) -> Result<()> { 117 | Err(Error::Unimplemented) 118 | } 119 | 120 | /// Delete bookmarks as one atomic operation 121 | #[cfg_attr(tarpaulin, skip)] 122 | fn destroy_bookmarks(&self, _bookmarks: &[PathBuf]) -> Result<()> { 123 | Err(Error::Unimplemented) 124 | } 125 | 126 | #[cfg_attr(tarpaulin, skip)] 127 | fn list>(&self, _pool: N) -> Result> { 128 | Err(Error::Unimplemented) 129 | } 130 | #[cfg_attr(tarpaulin, skip)] 131 | fn list_filesystems>(&self, _pool: N) -> Result> { 132 | Err(Error::Unimplemented) 133 | } 134 | #[cfg_attr(tarpaulin, skip)] 135 | fn list_snapshots>(&self, _pool: N) -> Result> { 136 | Err(Error::Unimplemented) 137 | } 138 | #[cfg_attr(tarpaulin, skip)] 139 | fn list_bookmarks>(&self, _pool: N) -> Result> { 140 | Err(Error::Unimplemented) 141 | } 142 | #[cfg_attr(tarpaulin, skip)] 143 | fn list_volumes>(&self, _pool: N) -> Result> { 144 | Err(Error::Unimplemented) 145 | } 146 | /// Read all properties of filesystem/volume/snapshot/bookmark. 147 | #[cfg_attr(tarpaulin, skip)] 148 | fn read_properties>(&self, _path: N) -> Result { 149 | Err(Error::Unimplemented) 150 | } 151 | 152 | /// Send a full snapshot to a specified file descriptor. 153 | #[cfg_attr(tarpaulin, skip)] 154 | fn send_full, FD: AsRawFd>( 155 | &self, 156 | _path: N, 157 | _fd: FD, 158 | _flags: SendFlags, 159 | ) -> Result<()> { 160 | Err(Error::Unimplemented) 161 | } 162 | 163 | /// Send an incremental snapshot to a specified file descriptor. 164 | #[cfg_attr(tarpaulin, skip)] 165 | fn send_incremental, F: Into, FD: AsRawFd>( 166 | &self, 167 | _path: N, 168 | _from: F, 169 | _fd: FD, 170 | _flags: SendFlags, 171 | ) -> Result<()> { 172 | Err(Error::Unimplemented) 173 | } 174 | 175 | /// Run a channel program 176 | #[cfg_attr(tarpaulin, skip)] 177 | fn run_channel_program>( 178 | &self, 179 | _pool: N, 180 | _program: &str, 181 | _instr_limit: u64, 182 | _mem_limit: u64, 183 | _sync: bool, 184 | _args: libnv::nvpair::NvList, 185 | ) -> Result { 186 | Err(Error::Unimplemented) 187 | } 188 | } 189 | 190 | #[derive(Default, Builder, Debug, Clone, Getters)] 191 | #[builder(setter(into))] 192 | #[get = "pub"] 193 | /// Consumer friendly builder for NvPair. Use this to create your datasets. Some properties only 194 | /// work on filesystems, some only on volumes. 195 | pub struct CreateDatasetRequest { 196 | /// Name of the dataset. First crumb of path is name of zpool. 197 | name: PathBuf, 198 | /// Filesystem or Volume. 199 | kind: DatasetKind, 200 | /// Optional user defined properties. User property names must conform to the following 201 | /// characteristics: 202 | /// 203 | /// - Contain a colon (':') character to distinguish them from native properties. 204 | /// - Contain lowercase letters, numbers, and the following punctuation characters: ':', 205 | /// '+','.', '_'. 206 | /// - Maximum user property name is 256 characters. 207 | #[builder(default)] 208 | user_properties: Option>, 209 | 210 | // 211 | // the rest is zfs native properties 212 | /// Controls how ACL entries inherited when files and directories created. 213 | #[builder(default)] 214 | acl_inherit: Option, 215 | /// Controls how an ACL entry modified during a `chmod` operation. 216 | #[builder(default)] 217 | acl_mode: Option, 218 | /// Controls whether the access time for files updated when they are read. 219 | #[builder(default)] 220 | atime: Option, 221 | /// Controls whether a file system can be mounted. 222 | #[builder(default)] 223 | can_mount: CanMount, 224 | /// Controls the checksum used to verify data integrity. 225 | #[builder(default)] 226 | checksum: Option, 227 | /// Enables or disables compression for a dataset. 228 | #[builder(default)] 229 | compression: Option, 230 | /// Sets the number of copies of user data per file system. Available values are 1, 2, or 3. 231 | /// These copies are in addition to any pool-level redundancy. Disk space used by multiple 232 | /// copies of user data charged to the corresponding file and dataset, and counts against 233 | /// quotas and reservations. In addition, the used property updated when multiple copies 234 | /// enabled. Consider setting this property when the file system created because changing this 235 | /// property on an existing file system only affects newly written data. 236 | #[builder(default)] 237 | copies: Option, 238 | /// Controls whether device files in a file system can be opened. 239 | #[builder(default)] 240 | devices: Option, 241 | /// Controls whether programs in a file system allowed to be executed. Also, when set to 242 | /// `false`, `mmap(2)` calls with `PROT_EXEC` disallowed. 243 | #[builder(default)] 244 | exec: Option, 245 | /// Controls the mount point used for this file system. 246 | #[builder(default)] 247 | mount_point: Option, 248 | /// Controls what is cached in the primary cache (ARC). 249 | #[builder(default)] 250 | primary_cache: Option, 251 | /// Limits the amount of disk space a dataset and its descendants can consume. 252 | #[builder(default)] 253 | quota: Option, 254 | /// Controls whether a dataset can be modified. 255 | #[builder(default)] 256 | readonly: Option, 257 | /// Specifies a suggested block size for files in a file system in bytes. The size specified 258 | /// must be a power of two greater than or equal to 512 and less than or equal to 128 KiB. 259 | /// If the large_blocks feature is enabled on the pool, the size may be up to 1 MiB. 260 | #[builder(default)] 261 | record_size: Option, 262 | /// Sets the amount of disk space a dataset can consume. This property enforces a hard limit on 263 | /// the amount of space used. This hard limit does not include disk space used by descendents, 264 | /// such as snapshots and clones. 265 | #[builder(default)] 266 | ref_quota: Option, 267 | /// Sets the minimum amount of disk space is guaranteed to a dataset, not including 268 | /// descendants, such as snapshots and clones. 269 | #[builder(default)] 270 | ref_reservation: Option, 271 | /// Sets the minimum amount of disk space guaranteed to a dataset and its descendants. 272 | #[builder(default)] 273 | reservation: Option, 274 | /// Controls what is cached in the secondary cache (L2ARC). 275 | #[builder(default)] 276 | secondary_cache: Option, 277 | /// Controls whether the `setuid` bit is honored in a file system. 278 | #[builder(default)] 279 | setuid: Option, 280 | /// Controls whether the .zfs directory is hidden or visible in the root of the file system 281 | #[builder(default)] 282 | snap_dir: Option, 283 | /// For volumes, specifies the logical size of the volume. 284 | #[builder(default)] 285 | volume_size: Option, 286 | /// For volumes, specifies the block size of the volume in bytes. The block size cannot be 287 | /// changed after the volume has been written, so set the block size at volume creation time. 288 | /// The default block size for volumes is 8 KB. Any power of 2 from 512 bytes to 128 KB is 289 | /// valid. 290 | #[builder(default)] 291 | volume_block_size: Option, 292 | /// Indicates whether extended attributes are enabled or disabled. 293 | #[builder(default)] 294 | xattr: Option, 295 | } 296 | 297 | impl CreateDatasetRequest { 298 | pub fn builder() -> CreateDatasetRequestBuilder { 299 | CreateDatasetRequestBuilder::default() 300 | } 301 | 302 | pub fn validate(&self) -> Result<()> { 303 | let mut errors = Vec::new(); 304 | 305 | if let Err(e) = validators::validate_name(self.name()) { 306 | errors.push(e); 307 | } 308 | 309 | if errors.is_empty() { 310 | Ok(()) 311 | } else { 312 | Err(errors.into()) 313 | } 314 | } 315 | } 316 | 317 | pub(crate) mod validators { 318 | use crate::zfs::{errors::ValidationResult, ValidationError, DATASET_NAME_MAX_LENGTH}; 319 | use std::path::Path; 320 | 321 | pub fn validate_name>(dataset: P) -> ValidationResult { 322 | _validate_name(dataset.as_ref()) 323 | } 324 | 325 | pub fn _validate_name(dataset: &Path) -> ValidationResult { 326 | let name = dataset.to_string_lossy(); 327 | if name.ends_with('/') { 328 | return Err(ValidationError::MissingName(dataset.to_owned())); 329 | } 330 | if dataset.has_root() { 331 | return Err(ValidationError::MissingPool(dataset.to_owned())); 332 | } 333 | dataset 334 | .file_name() 335 | .ok_or_else(|| ValidationError::MissingName(dataset.to_owned())) 336 | .and_then(|name| { 337 | if name.len() > DATASET_NAME_MAX_LENGTH { 338 | return Err(ValidationError::NameTooLong(dataset.to_owned())); 339 | } 340 | Ok(()) 341 | }) 342 | } 343 | } 344 | 345 | #[cfg(test)] 346 | mod test { 347 | use super::{CreateDatasetRequest, DatasetKind, Error, ErrorKind, ValidationError}; 348 | use std::path::PathBuf; 349 | 350 | #[test] 351 | fn test_error_ds_not_found() { 352 | let stderr = b"cannot open 's/asd/asd': dataset does not exist"; 353 | 354 | let err = Error::from_stderr(stderr); 355 | assert_eq!(Error::DatasetNotFound(PathBuf::from("s/asd/asd")), err); 356 | assert_eq!(ErrorKind::DatasetNotFound, err.kind()); 357 | } 358 | 359 | #[test] 360 | fn test_error_rubbish() { 361 | let stderr = b"there is no way there is an error like this"; 362 | let stderr_string = String::from_utf8_lossy(stderr).to_string(); 363 | 364 | let err = Error::from_stderr(stderr); 365 | assert_eq!(Error::UnknownSoFar(stderr_string), err); 366 | assert_eq!(ErrorKind::Unknown, err.kind()); 367 | } 368 | 369 | #[test] 370 | fn test_name_validator() { 371 | let path = PathBuf::from("z/asd/"); 372 | let request = CreateDatasetRequest::builder() 373 | .name(path.clone()) 374 | .kind(DatasetKind::Filesystem) 375 | .build() 376 | .unwrap(); 377 | 378 | let result = request.validate().unwrap_err(); 379 | let expected = Error::from(vec![ValidationError::MissingName(path.clone())]); 380 | assert_eq!(expected, result); 381 | 382 | let path = PathBuf::from("z/asd/jnmgyfklueiodyfryvopvyfidvdgxqxsesjmqeoevdgmzsqmesuqzqoxhjfltmsvltdyiilgkvklinlfhaanfqisdazjpfmwttnuosdfijickudhwegburxsoesvunamysaigtagymxcyfeyqiqphtalmbkskrjdndbbcjqiiwucsxzezqmvpzmkylrojumtvatfvrpfkxubfujyioyylmffvrvtfetnzghkwaqzxkqmialkaaekotuhgiivwvbsoqqa"); 383 | let request = CreateDatasetRequest::builder() 384 | .name(path.clone()) 385 | .kind(DatasetKind::Filesystem) 386 | .build() 387 | .unwrap(); 388 | 389 | let result = request.validate().unwrap_err(); 390 | let expected = Error::from(vec![ValidationError::NameTooLong(path.clone())]); 391 | assert_eq!(expected, result); 392 | } 393 | } 394 | -------------------------------------------------------------------------------- /src/zfs/pathext.rs: -------------------------------------------------------------------------------- 1 | use crate::zfs::ValidationResult; 2 | use std::path::Path; 3 | 4 | pub trait PathExt { 5 | fn get_pool(&self) -> Option; 6 | fn get_snapshot(&self) -> Option; 7 | fn get_bookmark(&self) -> Option; 8 | 9 | fn is_snapshot(&self) -> bool { 10 | self.get_snapshot().is_some() 11 | } 12 | fn is_bookmark(&self) -> bool { 13 | self.get_bookmark().is_some() 14 | } 15 | fn is_volume_or_dataset(&self) -> bool { 16 | !self.is_bookmark() && !self.is_snapshot() 17 | } 18 | 19 | fn is_valid(&self) -> bool { 20 | if let Ok(()) = self.validate() { 21 | true 22 | } else { 23 | false 24 | } 25 | } 26 | 27 | fn validate(&self) -> ValidationResult; 28 | } 29 | 30 | impl PathExt for Path { 31 | fn get_pool(&self) -> Option { 32 | if self.has_root() || self.components().count() < 2 { 33 | return None; 34 | } 35 | if let Some(root) = self.iter().next() { 36 | Some(root.to_string_lossy().to_string()) 37 | } else { 38 | None 39 | } 40 | } 41 | 42 | fn get_snapshot(&self) -> Option { 43 | if let Some(last) = self.file_name() { 44 | let as_str = last.to_string_lossy(); 45 | if as_str.contains('@') { 46 | return as_str.rsplit('@').next().map(String::from); 47 | } 48 | } 49 | None 50 | } 51 | 52 | fn get_bookmark(&self) -> Option { 53 | if let Some(last) = self.file_name() { 54 | let as_str = last.to_string_lossy(); 55 | if as_str.contains('#') { 56 | return as_str.rsplit('#').next().map(String::from); 57 | } 58 | } 59 | None 60 | } 61 | 62 | fn validate(&self) -> ValidationResult { 63 | crate::zfs::validators::validate_name(self) 64 | } 65 | } 66 | 67 | impl> PathExt for P { 68 | fn get_pool(&self) -> Option { 69 | self.as_ref().get_pool() 70 | } 71 | 72 | fn get_snapshot(&self) -> Option { 73 | self.as_ref().get_snapshot() 74 | } 75 | 76 | fn get_bookmark(&self) -> Option { 77 | self.as_ref().get_bookmark() 78 | } 79 | 80 | fn validate(&self) -> ValidationResult { 81 | self.as_ref().validate() 82 | } 83 | } 84 | 85 | #[cfg(test)] 86 | mod test { 87 | use super::PathExt; 88 | use std::path::PathBuf; 89 | 90 | #[test] 91 | fn valid_dataset_no_bookmarks_or_snapshots() { 92 | let path = PathBuf::from("tank/usr/home"); 93 | 94 | assert_eq!(Some(String::from("tank")), path.get_pool()); 95 | assert!(!path.is_snapshot()); 96 | assert!(!path.is_bookmark()); 97 | assert_eq!(None, path.get_snapshot()); 98 | assert_eq!(None, path.get_bookmark()); 99 | assert!(path.is_volume_or_dataset()); 100 | assert!(path.is_valid()); 101 | } 102 | 103 | #[test] 104 | fn not_valid_just_dataset() { 105 | let path = PathBuf::from("/usr/home"); 106 | assert_eq!(None, path.get_pool()); 107 | assert!(!path.is_snapshot()); 108 | assert!(!path.is_bookmark()); 109 | assert_eq!(None, path.get_snapshot()); 110 | assert_eq!(None, path.get_bookmark()); 111 | assert!(path.is_volume_or_dataset()); 112 | assert!(!path.is_valid()); 113 | } 114 | 115 | #[test] 116 | fn valid_snapshot() { 117 | let path = PathBuf::from("tank/usr/home@snap"); 118 | 119 | assert_eq!(Some(String::from("tank")), path.get_pool()); 120 | assert!(path.is_snapshot()); 121 | assert!(!path.is_bookmark()); 122 | assert_eq!(Some(String::from("snap")), path.get_snapshot()); 123 | assert_eq!(None, path.get_bookmark()); 124 | assert!(!path.is_volume_or_dataset()); 125 | assert!(path.is_valid()); 126 | } 127 | #[test] 128 | fn valid_bookmark() { 129 | let path = PathBuf::from("tank/usr/home#bookmark"); 130 | 131 | assert_eq!(Some(String::from("tank")), path.get_pool()); 132 | assert!(!path.is_snapshot()); 133 | assert!(path.is_bookmark()); 134 | assert_eq!(None, path.get_snapshot()); 135 | assert_eq!(Some(String::from("bookmark")), path.get_bookmark()); 136 | assert!(!path.is_volume_or_dataset()); 137 | assert!(path.is_valid()); 138 | } 139 | 140 | #[test] 141 | fn at_in_wrong_place() { 142 | let path = PathBuf::from("tank/usr@wat/home"); 143 | assert!(!path.is_snapshot()); 144 | } 145 | 146 | #[test] 147 | fn pound_in_wrong_place() { 148 | let path = PathBuf::from("tank/usr#wat/home"); 149 | assert!(!path.is_bookmark()); 150 | } 151 | } 152 | -------------------------------------------------------------------------------- /src/zpool/description.rs: -------------------------------------------------------------------------------- 1 | //! If anyone has a better name for this module - hit me up. This module is where consumer friendly 2 | //! representation of Zpool is defined. This is where pest's 3 | //! [Pairs](../../../pest/iterators/struct.Pair.html) turned into [Zpool](struct.Zpool.html). 4 | use std::{path::PathBuf, str::FromStr}; 5 | 6 | use pest::iterators::{Pair, Pairs}; 7 | 8 | use crate::{ 9 | parsers::Rule, 10 | zpool::{ 11 | vdev::{ErrorStatistics, Vdev, VdevType}, 12 | CreateZpoolRequest, Disk, Health, 13 | }, 14 | }; 15 | 16 | /// The reason why zpool is in this state. Right now it's just a wrapper around `String`, but in the 17 | /// future there _might_ be a more machine friendly format. 18 | #[derive(Clone, Debug, Eq, PartialEq)] 19 | pub enum Reason { 20 | /// Not yet classified reason. 21 | Other(String), 22 | } 23 | /// Consumer friendly Zpool representation. It has generic health status information, structure of 24 | /// vdevs, devices used to create said vdevs as well as error statistics. 25 | #[derive(Getters, Builder, Debug, Eq, PartialEq, Clone)] 26 | #[builder(setter(into))] 27 | #[get = "pub"] 28 | pub struct Zpool { 29 | /// Name of the pool 30 | name: String, 31 | /// UID of the pool. Only visible during import 32 | #[builder(default)] 33 | id: Option, 34 | /// Current Health status of the pool. 35 | health: Health, 36 | /// List of VDEVs 37 | vdevs: Vec, 38 | /// List of cache devices. 39 | #[builder(default)] 40 | caches: Vec, 41 | /// ZFS Intent Log (ZIL) devices. 42 | #[builder(default)] 43 | logs: Vec, 44 | /// Spare devices. 45 | #[builder(default)] 46 | spares: Vec, 47 | /// Special vdevs. 48 | #[builder(default)] 49 | special: Vec, 50 | /// Value of action field whatever it is. 51 | #[builder(default)] 52 | action: Option, 53 | /// Errors? 54 | #[builder(default)] 55 | errors: Option, 56 | /// Reason why this Zpool is not healthy. 57 | #[builder(default)] 58 | reason: Option, 59 | /// Error statistics 60 | #[builder(default)] 61 | error_statistics: ErrorStatistics, 62 | } 63 | 64 | impl Zpool { 65 | /// Create a builder - the preferred way to create a structure. 66 | pub fn builder() -> ZpoolBuilder { 67 | ZpoolBuilder::default() 68 | } 69 | 70 | #[allow(clippy::option_unwrap_used, clippy::wildcard_enum_match_arm)] 71 | pub(crate) fn from_pest_pair(pair: Pair<'_, Rule>) -> Zpool { 72 | debug_assert!(pair.as_rule() == Rule::zpool); 73 | let pairs = pair.into_inner(); 74 | let mut zpool = ZpoolBuilder::default(); 75 | for pair in pairs { 76 | match pair.as_rule() { 77 | Rule::pool_name => { 78 | zpool.name(get_string_from_pair(pair)); 79 | } 80 | Rule::pool_id => { 81 | zpool.id(Some(get_u64_from_pair(pair))); 82 | } 83 | Rule::state => { 84 | zpool.health(get_health_from_pair(pair)); 85 | } 86 | Rule::action => { 87 | zpool.action(Some(get_string_from_pair(pair))); 88 | } 89 | Rule::errors => { 90 | zpool.errors(get_error_from_pair(pair)); 91 | } 92 | Rule::vdevs => { 93 | zpool.vdevs(get_vdevs_from_pair(pair)); 94 | } 95 | Rule::pool_line => { 96 | set_stats_and_reason_from_pool_line(pair, &mut zpool); 97 | } 98 | Rule::logs => { 99 | zpool.logs(get_logs_from_pair(pair)); 100 | } 101 | Rule::caches => { 102 | zpool.caches(get_caches_from_pair(pair)); 103 | } 104 | Rule::spares => { 105 | zpool.spares(get_spares_from_pair(pair)); 106 | } 107 | Rule::special => { 108 | zpool.special(get_special_from_pair(pair)); 109 | } 110 | Rule::config | Rule::status | Rule::see | Rule::pool_headers | Rule::comment => {} 111 | Rule::scan_line => {} 112 | _ => unreachable!(), 113 | } 114 | } 115 | zpool.build().expect("Can't build zpool out of pair. Please report at: https://github.com/Inner-Heaven/libzetta-rs") 116 | } 117 | } 118 | 119 | impl PartialEq for Zpool { 120 | fn eq(&self, other: &CreateZpoolRequest) -> bool { 121 | &self.logs == other.logs() 122 | && &self.name == other.name() 123 | && &self.caches == other.caches() 124 | && &self.vdevs == other.vdevs() 125 | && &self.spares == other.spares() 126 | } 127 | } 128 | 129 | impl PartialEq for CreateZpoolRequest { 130 | fn eq(&self, other: &Zpool) -> bool { 131 | other == self 132 | } 133 | } 134 | 135 | #[inline] 136 | #[allow( 137 | clippy::option_unwrap_used, 138 | clippy::result_unwrap_used, 139 | clippy::wildcard_enum_match_arm 140 | )] 141 | fn get_error_statistics_from_pair(pair: Pair<'_, Rule>) -> ErrorStatistics { 142 | debug_assert_eq!(Rule::error_statistics, pair.as_rule()); 143 | let mut inner = pair.into_inner(); 144 | ErrorStatistics { 145 | read: inner 146 | .next() 147 | .unwrap() 148 | .as_span() 149 | .as_str() 150 | .parse() 151 | .unwrap_or(std::u64::MAX), 152 | write: inner 153 | .next() 154 | .unwrap() 155 | .as_span() 156 | .as_str() 157 | .parse() 158 | .unwrap_or(std::u64::MAX), 159 | checksum: inner 160 | .next() 161 | .unwrap() 162 | .as_span() 163 | .as_str() 164 | .parse() 165 | .unwrap_or(std::u64::MAX), 166 | } 167 | } 168 | 169 | #[inline] 170 | #[allow(clippy::option_unwrap_used, clippy::wildcard_enum_match_arm)] 171 | fn set_stats_and_reason_from_pool_line(pool_line: Pair<'_, Rule>, zpool: &mut ZpoolBuilder) { 172 | debug_assert_eq!(pool_line.as_rule(), Rule::pool_line); 173 | 174 | for pair in pool_line.into_inner() { 175 | match pair.as_rule() { 176 | Rule::reason => { 177 | zpool.reason(Some(Reason::Other(String::from(pair.as_span().as_str())))); 178 | } 179 | Rule::error_statistics => { 180 | zpool.error_statistics(get_error_statistics_from_pair(pair)); 181 | } 182 | _ => { /* no-op */ } 183 | }; 184 | } 185 | } 186 | 187 | #[inline] 188 | fn get_vdev_type(raid_name: Pair<'_, Rule>) -> VdevType { 189 | let raid_enum = raid_name 190 | .into_inner() 191 | .next() 192 | .expect("Failed to parse raid_enum"); 193 | debug_assert!(raid_enum.as_rule() == Rule::raid_enum); 194 | VdevType::from_str(raid_enum.as_str()).expect("Failed to parse raid type") 195 | } 196 | 197 | #[inline] 198 | fn get_path_from_path(path: Option>) -> PathBuf { 199 | let path = path.expect("Missing path from disk line"); 200 | debug_assert!(path.as_rule() == Rule::path); 201 | PathBuf::from(path.as_span().as_str()) 202 | } 203 | 204 | #[inline] 205 | fn get_health_from_health(health: Option>) -> Health { 206 | let health = health.expect("Missing health from disk line"); 207 | debug_assert!(health.as_rule() == Rule::state_enum); 208 | Health::try_from_str(Some(health.as_span().as_str())).expect("Failed to parse Health") 209 | } 210 | 211 | #[inline] 212 | fn get_disk_from_disk_line(disk_line: Pair<'_, Rule>) -> Disk { 213 | debug_assert!(disk_line.as_rule() == Rule::disk_line); 214 | 215 | let mut inner = disk_line.into_inner(); 216 | 217 | let path = get_path_from_path(inner.next()); 218 | let health = get_health_from_health(inner.next()); 219 | 220 | let (error_statics, reason) = get_stats_and_reason_from_pairs(inner); 221 | Disk::builder() 222 | .path(path) 223 | .health(health) 224 | .error_statistics(error_statics) 225 | .reason(reason) 226 | .build() 227 | .expect("Failed to build disk") 228 | } 229 | 230 | #[inline] 231 | #[allow(clippy::option_unwrap_used, clippy::wildcard_enum_match_arm)] 232 | fn get_stats_and_reason_from_pairs(pairs: Pairs<'_, Rule>) -> (ErrorStatistics, Option) { 233 | let mut stats = None; 234 | let mut reason = None; 235 | for pair in pairs { 236 | match pair.as_rule() { 237 | Rule::error_statistics => stats = Some(get_error_statistics_from_pair(pair)), 238 | Rule::reason => reason = Some(Reason::Other(String::from(pair.as_span().as_str()))), 239 | _ => { 240 | unreachable!(); 241 | } 242 | } 243 | } 244 | (stats.unwrap_or_default(), reason) 245 | } 246 | 247 | #[inline] 248 | #[allow(clippy::option_unwrap_used, clippy::wildcard_enum_match_arm)] 249 | fn get_vdevs_from_pair(pair: Pair<'_, Rule>) -> Vec { 250 | debug_assert!(pair.as_rule() == Rule::vdevs); 251 | 252 | pair.into_inner() 253 | .map(|vdev| match vdev.as_rule() { 254 | Rule::naked_vdev => { 255 | let disk_line = vdev.into_inner().next().unwrap(); 256 | 257 | let disk = get_disk_from_disk_line(disk_line); 258 | 259 | Vdev::builder() 260 | .kind(VdevType::SingleDisk) 261 | .health(disk.health().clone()) 262 | .reason(None) 263 | .disks(vec![disk]) 264 | .build() 265 | .expect("Failed to build Vdev") 266 | } 267 | Rule::raided_vdev => { 268 | let mut inner = vdev.into_inner(); 269 | let raid_line = inner.next().unwrap(); 270 | debug_assert!(raid_line.as_rule() == Rule::raid_line); 271 | let mut raid_line = raid_line.into_inner(); 272 | let raid_name = raid_line.next().unwrap(); 273 | 274 | let health = get_health_from_health(raid_line.next()); 275 | 276 | let disks = inner 277 | .filter(|line| line.as_rule() != Rule::pseudo_vdev_line) 278 | .map(get_disk_from_disk_line) 279 | .collect(); 280 | 281 | let (error_statics, reason) = get_stats_and_reason_from_pairs(raid_line); 282 | 283 | Vdev::builder() 284 | .kind(get_vdev_type(raid_name)) 285 | .health(health) 286 | .disks(disks) 287 | .error_statistics(error_statics) 288 | .reason(reason) 289 | .build() 290 | .expect("Failed to build vdev") 291 | } 292 | _ => { 293 | unreachable!(); 294 | } 295 | }) 296 | .collect() 297 | } 298 | 299 | #[inline] 300 | fn get_health_from_pair(pair: Pair<'_, Rule>) -> Health { 301 | let health = get_string_from_pair(pair); 302 | Health::try_from_str(Some(&health)).expect("Failed to unwrap health") 303 | } 304 | 305 | #[inline] 306 | fn get_u64_from_pair(pair: Pair<'_, Rule>) -> u64 { 307 | get_value_from_pair(pair) 308 | .as_str() 309 | .parse() 310 | .expect("Failed to unwrap u64") 311 | } 312 | 313 | #[inline] 314 | fn get_string_from_pair(pair: Pair<'_, Rule>) -> String { 315 | String::from(get_value_from_pair(pair).as_str()) 316 | } 317 | 318 | #[inline] 319 | fn get_value_from_pair(pair: Pair<'_, Rule>) -> Pair<'_, Rule> { 320 | let mut pairs = pair.into_inner(); 321 | pairs.next().expect("Failed to unwrap value") 322 | } 323 | 324 | #[inline] 325 | #[allow(clippy::option_unwrap_used, clippy::wildcard_enum_match_arm)] 326 | fn get_error_from_pair(pair: Pair<'_, Rule>) -> Option { 327 | let mut pairs = pair.into_inner(); 328 | let error_pair = pairs.next().expect("Failed to unwrap error"); 329 | match error_pair.as_rule() { 330 | Rule::no_errors => None, 331 | _ => Some(String::from(error_pair.as_str())), 332 | } 333 | } 334 | 335 | #[inline] 336 | fn get_logs_from_pair(pair: Pair<'_, Rule>) -> Vec { 337 | debug_assert!(pair.as_rule() == Rule::logs); 338 | if let Some(vdevs) = pair.into_inner().next() { 339 | get_vdevs_from_pair(vdevs) 340 | } else { 341 | Vec::new() 342 | } 343 | } 344 | 345 | #[inline] 346 | fn get_special_from_pair(pair: Pair<'_, Rule>) -> Vec { 347 | debug_assert!(pair.as_rule() == Rule::special); 348 | if let Some(vdevs) = pair.into_inner().next() { 349 | get_vdevs_from_pair(vdevs) 350 | } else { 351 | Vec::new() 352 | } 353 | } 354 | 355 | #[inline] 356 | fn get_caches_from_pair(pair: Pair<'_, Rule>) -> Vec { 357 | debug_assert!(pair.as_rule() == Rule::caches); 358 | pair.into_inner().map(get_disk_from_disk_line).collect() 359 | } 360 | #[inline] 361 | fn get_spares_from_pair(pair: Pair<'_, Rule>) -> Vec { 362 | debug_assert!(pair.as_rule() == Rule::spares); 363 | pair.into_inner().map(get_disk_from_disk_line).collect() 364 | } 365 | 366 | // This module can have better tests. Issue #65 367 | #[cfg(test)] 368 | mod test { 369 | use std::path::PathBuf; 370 | 371 | use crate::zpool::{CreateVdevRequest, Disk, Health, Vdev, VdevType}; 372 | 373 | use super::{CreateZpoolRequest, Zpool}; 374 | 375 | #[test] 376 | fn test_eq_zpool() { 377 | let request = CreateZpoolRequest::builder() 378 | .name("wat") 379 | .zil(CreateVdevRequest::SingleDisk(PathBuf::from("hd0"))) 380 | .cache(PathBuf::from("hd1")) 381 | .build() 382 | .unwrap(); 383 | let zpool = Zpool::builder() 384 | .name("wat") 385 | .health(Health::Online) 386 | .caches(vec![Disk::builder() 387 | .path("hd1") 388 | .health(Health::Online) 389 | .build() 390 | .unwrap()]) 391 | .logs(vec![Vdev::builder() 392 | .kind(VdevType::SingleDisk) 393 | .health(Health::Online) 394 | .disks(vec![Disk::builder() 395 | .path("hd0") 396 | .health(Health::Online) 397 | .build() 398 | .unwrap()]) 399 | .build() 400 | .unwrap()]) 401 | .vdevs(vec![]) 402 | .build() 403 | .unwrap(); 404 | 405 | assert_eq!(request, zpool); 406 | } 407 | 408 | #[test] 409 | fn test_ne_zpool() { 410 | let request = CreateZpoolRequest::builder() 411 | .name("wat") 412 | .zil(CreateVdevRequest::SingleDisk(PathBuf::from("hd0"))) 413 | .build() 414 | .unwrap(); 415 | let zpool = Zpool::builder() 416 | .name("wat") 417 | .health(Health::Online) 418 | .vdevs(vec![]) 419 | .build() 420 | .unwrap(); 421 | assert_ne!(request, zpool); 422 | } 423 | } 424 | -------------------------------------------------------------------------------- /src/zpool/fixtures/import_with_empty_comment: -------------------------------------------------------------------------------- 1 | pool: t2 2 | id: 5333885354421686613 3 | state: ONLINE 4 | action: The pool can be imported using its name or numeric identifier. 5 | comment: 6 | config: 7 | 8 | t2 ONLINE 9 | sdc ONLINE 10 | -------------------------------------------------------------------------------- /src/zpool/fixtures/status_with_block_device_nested: -------------------------------------------------------------------------------- 1 | pool: eden 2 | state: ONLINE 3 | scan: scrub repaired 1M in 15:03:34 with 0 errors on Sat Mar 4 01:12:20 2023 4 | config: 5 | 6 | NAME STATE READ WRITE CKSUM 7 | eden ONLINE 0 0 0 8 | raidz2-0 ONLINE 0 0 0 9 | /dev/diskid/DISK-ZCT2K2R6 ONLINE 0 0 0 10 | /dev/diskid/DISK-ZCT2QVET ONLINE 0 0 0 11 | /dev/diskid/DISK-WSD6B5L6 ONLINE 0 0 0 12 | /dev/diskid/DISK-ZCT2QWL9 ONLINE 0 0 0 13 | /dev/diskid/DISK-ZCT2QXEL ONLINE 0 0 0 14 | /dev/diskid/DISK-ZCT2RH0W ONLINE 0 0 0 15 | -------------------------------------------------------------------------------- /src/zpool/open3.rs: -------------------------------------------------------------------------------- 1 | //! Open3 implementation of [`ZpoolEngine`](trait.ZpoolEngine.html). 2 | //! 3 | //! Easy way - [`ZpoolOpen3::default()`](struct.ZpoolOpen3.html#impl-Default). 4 | //! It will look for `ZPOOL_CMD` in current 5 | //! environment and fall back to `zpool` in `PATH`. 6 | //! 7 | //! Another way to specify is to use `ZpoolOpen3::new("/path/to/my/zpool")`. 8 | //! 9 | //! ### Usage 10 | //! ```rust,no_run 11 | //! use libzetta::zpool::{ZpoolEngine, ZpoolOpen3}; 12 | //! let engine = ZpoolOpen3::default(); 13 | //! 14 | //! // Check that pool with name z exists. 15 | //! assert!(engine.exists("z").unwrap()); 16 | //! 17 | //! let remote = ZpoolOpen3::with_cmd("zpool.sh"); 18 | //! 19 | //! assert!(engine.exists("z").unwrap()); 20 | //! ``` 21 | //! 22 | //! It's called [open3](https://docs.ruby-lang.org/en/2.0.0/Open3.html) because it opens `stdin`, `stdout`, `stderr`. 23 | 24 | use std::{ 25 | env, 26 | ffi::{OsStr, OsString}, 27 | path::PathBuf, 28 | process::{Command, Output, Stdio}, 29 | }; 30 | 31 | use crate::{ 32 | parsers::{Rule, StdoutParser}, 33 | zpool::description::Zpool, 34 | GlobalLogger, 35 | }; 36 | use pest::Parser; 37 | use slog::Logger; 38 | 39 | use super::{ 40 | CreateMode, CreateVdevRequest, CreateZpoolRequest, DestroyMode, ExportMode, OfflineMode, 41 | OnlineMode, PropPair, ZpoolEngine, ZpoolError, ZpoolProperties, ZpoolResult, 42 | }; 43 | 44 | lazy_static! { 45 | static ref ZPOOL_PROP_ARG: OsString = { 46 | let mut arg = OsString::with_capacity(171); 47 | arg.push("alloc,cap,comment,dedupratio,expandsize,fragmentation,free,"); 48 | arg.push("freeing,guid,health,size,leaked,altroot,readonly,autoexpand,"); 49 | arg.push("autoreplace,bootfs,cachefile,dedupditto,delegation,failmode"); 50 | arg 51 | }; 52 | } 53 | /// Open3 implementation of [`ZpoolEngine`](../trait.ZpoolEngine.html). You can use 54 | /// `ZpoolOpen3::default` to create it. 55 | pub struct ZpoolOpen3 { 56 | cmd_name: OsString, 57 | logger: Logger, 58 | } 59 | 60 | impl Default for ZpoolOpen3 { 61 | /// Uses `log` crate as drain for `Slog`. Tries to use `ZPOOL_CMD` from environment if variable 62 | /// is missing then it uses `zpool` from `$PATH`. 63 | fn default() -> ZpoolOpen3 { 64 | let cmd_name = match env::var_os("ZPOOL_CMD") { 65 | Some(val) => val, 66 | None => "zpool".into(), 67 | }; 68 | 69 | let logger = 70 | GlobalLogger::get().new(o!("zetta_module" => "zpool", "zpool_impl" => "open3")); 71 | ZpoolOpen3 { cmd_name, logger } 72 | } 73 | } 74 | impl ZpoolOpen3 { 75 | /// Create new using supplied path as zpool cmd using "log" as backend for 76 | /// logging. 77 | pub fn with_cmd>(cmd_name: I) -> ZpoolOpen3 { 78 | let mut z = ZpoolOpen3::default(); 79 | z.cmd_name = cmd_name.into(); 80 | z 81 | } 82 | 83 | fn zpool(&self) -> Command { 84 | Command::new(&self.cmd_name) 85 | } 86 | 87 | #[allow(dead_code)] 88 | /// Force disable logging by using `/dev/null` as drain. 89 | fn zpool_mute(&self) -> Command { 90 | let mut z = self.zpool(); 91 | z.stdout(Stdio::null()); 92 | z.stderr(Stdio::null()); 93 | z 94 | } 95 | 96 | fn zpools_from_import(&self, out: Output) -> ZpoolResult> { 97 | if out.status.success() { 98 | let stdout: String = String::from_utf8_lossy(&out.stdout).into(); 99 | StdoutParser::parse(Rule::zpools, stdout.as_ref()) 100 | .map_err(|_| ZpoolError::ParseError) 101 | .map(|pairs| pairs.map(Zpool::from_pest_pair).collect()) 102 | } else { 103 | if out.stderr.is_empty() && out.stdout.is_empty() { 104 | return Ok(Vec::new()); 105 | } 106 | Err(ZpoolError::from_stderr(&out.stderr)) 107 | } 108 | } 109 | } 110 | 111 | #[derive(Default, Builder, Debug, Clone, Getters)] 112 | #[builder(setter(into))] 113 | #[get = "pub"] 114 | pub struct StatusOptions { 115 | #[builder(default)] 116 | full_paths: bool, 117 | #[builder(default)] 118 | resolve_links: bool, 119 | } 120 | 121 | impl ZpoolEngine for ZpoolOpen3 { 122 | fn exists>(&self, name: N) -> ZpoolResult { 123 | let mut z = self.zpool_mute(); 124 | z.arg("list").arg(name.as_ref()); 125 | debug!(self.logger, "executing"; "cmd" => format_args!("{:?}", z)); 126 | let status = z.status()?; 127 | Ok(status.success()) 128 | } 129 | 130 | fn create(&self, request: CreateZpoolRequest) -> ZpoolResult<()> { 131 | if !request.is_suitable_for_create() { 132 | return Err(ZpoolError::InvalidTopology); 133 | } 134 | let mut z = self.zpool(); 135 | z.arg("create"); 136 | if request.create_mode() == &CreateMode::Force { 137 | z.arg("-f"); 138 | } 139 | if let Some(props) = request.props().clone() { 140 | for arg in props.into_args() { 141 | z.arg("-o"); 142 | z.arg(arg); 143 | } 144 | } 145 | if let Some(mount) = request.mount().clone() { 146 | z.arg("-m"); 147 | z.arg(mount); 148 | } 149 | if let Some(altroot) = request.altroot().clone() { 150 | z.arg("-R"); 151 | z.arg(altroot); 152 | } 153 | z.arg(request.name()); 154 | z.args(request.into_args()); 155 | debug!(self.logger, "executing"; "cmd" => format_args!("{:?}", z)); 156 | let out = z.output()?; 157 | if out.status.success() { 158 | Ok(()) 159 | } else { 160 | Err(ZpoolError::from_stderr(&out.stderr)) 161 | } 162 | } 163 | 164 | fn destroy>(&self, name: N, mode: DestroyMode) -> ZpoolResult<()> { 165 | let mut z = self.zpool_mute(); 166 | z.arg("destroy"); 167 | if let DestroyMode::Force = mode { 168 | z.arg("-f"); 169 | } 170 | z.arg(name.as_ref()); 171 | debug!(self.logger, "executing"; "cmd" => format_args!("{:?}", z)); 172 | z.status().map(|_| Ok(()))? 173 | } 174 | 175 | fn read_properties>(&self, name: N) -> ZpoolResult { 176 | let mut z = self.zpool(); 177 | z.args(&["list", "-p", "-H", "-o"]); 178 | z.arg(&*ZPOOL_PROP_ARG); 179 | z.arg(name.as_ref()); 180 | debug!(self.logger, "executing"; "cmd" => format_args!("{:?}", z)); 181 | let out = z.output()?; 182 | if out.status.success() { 183 | ZpoolProperties::try_from_stdout(&out.stdout) 184 | } else { 185 | Err(ZpoolError::from_stderr(&out.stderr)) 186 | } 187 | } 188 | 189 | fn set_property, P: PropPair>( 190 | &self, 191 | name: N, 192 | key: &str, 193 | value: &P, 194 | ) -> ZpoolResult<()> { 195 | let mut z = self.zpool(); 196 | z.arg("set"); 197 | z.arg(OsString::from(PropPair::to_pair(value, key))); 198 | z.arg(name.as_ref()); 199 | debug!(self.logger, "executing"; "cmd" => format_args!("{:?}", z)); 200 | let out = z.output()?; 201 | if out.status.success() { 202 | Ok(()) 203 | } else { 204 | Err(ZpoolError::from_stderr(&out.stderr)) 205 | } 206 | } 207 | 208 | fn export>(&self, name: N, mode: ExportMode) -> ZpoolResult<()> { 209 | let mut z = self.zpool(); 210 | z.arg("export"); 211 | if let ExportMode::Force = mode { 212 | z.arg("-f"); 213 | } 214 | z.arg(name.as_ref()); 215 | debug!(self.logger, "executing"; "cmd" => format_args!("{:?}", z)); 216 | let out = z.output()?; 217 | if out.status.success() { 218 | Ok(()) 219 | } else { 220 | Err(ZpoolError::from_stderr(&out.stderr)) 221 | } 222 | } 223 | 224 | fn available(&self) -> ZpoolResult> { 225 | let mut z = self.zpool(); 226 | z.arg("import"); 227 | debug!(self.logger, "executing"; "cmd" => format_args!("{:?}", z)); 228 | let out = z.output()?; 229 | self.zpools_from_import(out) 230 | } 231 | 232 | fn available_in_dir(&self, dir: PathBuf) -> ZpoolResult> { 233 | let mut z = self.zpool(); 234 | z.arg("import"); 235 | z.arg("-d"); 236 | z.arg(dir); 237 | debug!(self.logger, "executing"; "cmd" => format_args!("{:?}", z)); 238 | let out = z.output()?; 239 | self.zpools_from_import(out) 240 | } 241 | 242 | fn import>(&self, name: N) -> Result<(), ZpoolError> { 243 | let mut z = self.zpool(); 244 | z.arg("import"); 245 | z.arg(name.as_ref()); 246 | debug!(self.logger, "executing"; "cmd" => format_args!("{:?}", z)); 247 | let out = z.output()?; 248 | if out.status.success() { 249 | Ok(()) 250 | } else { 251 | Err(ZpoolError::from_stderr(&out.stderr)) 252 | } 253 | } 254 | 255 | fn import_from_dir>(&self, name: N, dir: PathBuf) -> ZpoolResult<()> { 256 | let mut z = self.zpool(); 257 | z.arg("import"); 258 | z.arg("-d"); 259 | z.arg(dir); 260 | z.arg(name.as_ref()); 261 | debug!(self.logger, "executing"; "cmd" => format_args!("{:?}", z)); 262 | let out = z.output()?; 263 | if out.status.success() { 264 | Ok(()) 265 | } else { 266 | Err(ZpoolError::from_stderr(&out.stderr)) 267 | } 268 | } 269 | 270 | fn status>(&self, name: N, opts: StatusOptions) -> ZpoolResult { 271 | let mut z = self.zpool(); 272 | z.arg("status"); 273 | z.arg("-p"); 274 | if opts.full_paths { 275 | z.arg("-P"); 276 | } 277 | if opts.resolve_links { 278 | z.arg("-L"); 279 | } 280 | z.arg(name.as_ref()); 281 | debug!(self.logger, "executing"; "cmd" => format_args!("{:?}", z)); 282 | let out = z.output()?; 283 | let zpools = self 284 | .zpools_from_import(out) 285 | .expect("Failed to unwrap zpool from status check"); 286 | if zpools.is_empty() { 287 | return Err(ZpoolError::PoolNotFound); 288 | } 289 | let zpool = zpools.into_iter().next().expect("Can't build zpool out of pair. Please report at: https://github.com/Inner-Heaven/libzetta-rs"); 290 | if zpool.name().as_str() != name.as_ref() { 291 | unreachable!(); 292 | } 293 | Ok(zpool) 294 | } 295 | 296 | fn status_all(&self, opts: StatusOptions) -> ZpoolResult> { 297 | let mut z = self.zpool(); 298 | z.arg("status"); 299 | if opts.full_paths { 300 | z.arg("-P"); 301 | } 302 | if opts.resolve_links { 303 | z.arg("-L"); 304 | } 305 | debug!(self.logger, "executing"; "cmd" => format_args!("{:?}", z)); 306 | let out = z.output()?; 307 | self.zpools_from_import(out) 308 | } 309 | 310 | fn scrub>(&self, name: N) -> ZpoolResult<()> { 311 | let mut z = self.zpool(); 312 | z.arg("scrub"); 313 | z.arg(name.as_ref()); 314 | debug!(self.logger, "executing"; "cmd" => format_args!("{:?}", z)); 315 | let out = z.output()?; 316 | if out.status.success() { 317 | Ok(()) 318 | } else { 319 | Err(ZpoolError::from_stderr(&out.stderr)) 320 | } 321 | } 322 | 323 | fn pause_scrub>(&self, name: N) -> ZpoolResult<()> { 324 | let mut z = self.zpool(); 325 | z.arg("scrub"); 326 | z.arg("-p"); 327 | z.arg(name.as_ref()); 328 | debug!(self.logger, "executing"; "cmd" => format_args!("{:?}", z)); 329 | let out = z.output()?; 330 | if out.status.success() { 331 | Ok(()) 332 | } else { 333 | Err(ZpoolError::from_stderr(&out.stderr)) 334 | } 335 | } 336 | 337 | fn stop_scrub>(&self, name: N) -> ZpoolResult<()> { 338 | let mut z = self.zpool(); 339 | z.arg("scrub"); 340 | z.arg("-s"); 341 | z.arg(name.as_ref()); 342 | debug!(self.logger, "executing"; "cmd" => format_args!("{:?}", z)); 343 | let out = z.output()?; 344 | if out.status.success() { 345 | Ok(()) 346 | } else { 347 | Err(ZpoolError::from_stderr(&out.stderr)) 348 | } 349 | } 350 | 351 | fn take_offline, D: AsRef>( 352 | &self, 353 | name: N, 354 | device: D, 355 | mode: OfflineMode, 356 | ) -> ZpoolResult<()> { 357 | let mut z = self.zpool(); 358 | z.arg("offline"); 359 | if mode == OfflineMode::UntilReboot { 360 | z.arg("-t"); 361 | } 362 | z.arg(name.as_ref()); 363 | z.arg(device.as_ref()); 364 | debug!(self.logger, "executing"; "cmd" => format_args!("{:?}", z)); 365 | let out = z.output()?; 366 | if out.status.success() { 367 | Ok(()) 368 | } else { 369 | Err(ZpoolError::from_stderr(&out.stderr)) 370 | } 371 | } 372 | 373 | fn bring_online, D: AsRef>( 374 | &self, 375 | name: N, 376 | device: D, 377 | mode: OnlineMode, 378 | ) -> ZpoolResult<()> { 379 | let mut z = self.zpool(); 380 | z.arg("online"); 381 | if mode == OnlineMode::Expand { 382 | z.arg("-e"); 383 | } 384 | z.arg(name.as_ref()); 385 | z.arg(device.as_ref()); 386 | debug!(self.logger, "executing"; "cmd" => format_args!("{:?}", z)); 387 | let out = z.output()?; 388 | if out.status.success() { 389 | Ok(()) 390 | } else { 391 | Err(ZpoolError::from_stderr(&out.stderr)) 392 | } 393 | } 394 | 395 | fn attach, D: AsRef>( 396 | &self, 397 | name: N, 398 | device: D, 399 | new_device: D, 400 | ) -> ZpoolResult<()> { 401 | let mut z = self.zpool(); 402 | z.arg("attach"); 403 | z.arg(name.as_ref()); 404 | z.arg(device.as_ref()); 405 | z.arg(new_device.as_ref()); 406 | debug!(self.logger, "executing"; "cmd" => format_args!("{:?}", z)); 407 | let out = z.output()?; 408 | if out.status.success() { 409 | Ok(()) 410 | } else { 411 | Err(ZpoolError::from_stderr(&out.stderr)) 412 | } 413 | } 414 | 415 | fn detach, D: AsRef>(&self, name: N, device: D) -> ZpoolResult<()> { 416 | let mut z = self.zpool(); 417 | z.arg("detach"); 418 | z.arg(name.as_ref()); 419 | z.arg(device.as_ref()); 420 | debug!(self.logger, "executing"; "cmd" => format_args!("{:?}", z)); 421 | let out = z.output()?; 422 | if out.status.success() { 423 | Ok(()) 424 | } else { 425 | Err(ZpoolError::from_stderr(&out.stderr)) 426 | } 427 | } 428 | 429 | fn add_vdev>( 430 | &self, 431 | name: N, 432 | new_vdev: CreateVdevRequest, 433 | add_mode: CreateMode, 434 | ) -> Result<(), ZpoolError> { 435 | let mut z = self.zpool(); 436 | z.arg("add"); 437 | if add_mode == CreateMode::Force { 438 | z.arg("-f"); 439 | } 440 | z.arg(name.as_ref()); 441 | z.args(new_vdev.into_args()); 442 | debug!(self.logger, "executing"; "cmd" => format_args!("{:?}", z)); 443 | let out = z.output()?; 444 | if out.status.success() { 445 | Ok(()) 446 | } else { 447 | Err(ZpoolError::from_stderr(&out.stderr)) 448 | } 449 | } 450 | 451 | fn add_zil>( 452 | &self, 453 | name: N, 454 | new_zil: CreateVdevRequest, 455 | add_mode: CreateMode, 456 | ) -> Result<(), ZpoolError> { 457 | let mut z = self.zpool(); 458 | z.arg("add"); 459 | if add_mode == CreateMode::Force { 460 | z.arg("-f"); 461 | } 462 | z.arg(name.as_ref()); 463 | z.arg("log"); 464 | z.args(new_zil.into_args()); 465 | debug!(self.logger, "executing"; "cmd" => format_args!("{:?}", z)); 466 | let out = z.output()?; 467 | if out.status.success() { 468 | Ok(()) 469 | } else { 470 | Err(ZpoolError::from_stderr(&out.stderr)) 471 | } 472 | } 473 | 474 | fn add_cache, D: AsRef>( 475 | &self, 476 | name: N, 477 | new_cache: D, 478 | add_mode: CreateMode, 479 | ) -> Result<(), ZpoolError> { 480 | let mut z = self.zpool(); 481 | z.arg("add"); 482 | if add_mode == CreateMode::Force { 483 | z.arg("-f"); 484 | } 485 | z.arg(name.as_ref()); 486 | z.arg("cache"); 487 | z.arg(new_cache.as_ref()); 488 | debug!(self.logger, "executing"; "cmd" => format_args!("{:?}", z)); 489 | let out = z.output()?; 490 | if out.status.success() { 491 | Ok(()) 492 | } else { 493 | Err(ZpoolError::from_stderr(&out.stderr)) 494 | } 495 | } 496 | 497 | fn add_spare, D: AsRef>( 498 | &self, 499 | name: N, 500 | new_spare: D, 501 | add_mode: CreateMode, 502 | ) -> Result<(), ZpoolError> { 503 | let mut z = self.zpool(); 504 | z.arg("add"); 505 | if add_mode == CreateMode::Force { 506 | z.arg("-f"); 507 | } 508 | z.arg(name.as_ref()); 509 | z.arg("spare"); 510 | z.arg(new_spare.as_ref()); 511 | debug!(self.logger, "executing"; "cmd" => format_args!("{:?}", z)); 512 | let out = z.output()?; 513 | if out.status.success() { 514 | Ok(()) 515 | } else { 516 | Err(ZpoolError::from_stderr(&out.stderr)) 517 | } 518 | } 519 | 520 | fn replace_disk, D: AsRef, O: AsRef>( 521 | &self, 522 | name: N, 523 | old_disk: D, 524 | new_disk: O, 525 | ) -> Result<(), ZpoolError> { 526 | let mut z = self.zpool(); 527 | z.arg("replace"); 528 | z.arg(name.as_ref()); 529 | z.arg(old_disk.as_ref()); 530 | z.arg(new_disk.as_ref()); 531 | debug!(self.logger, "executing"; "cmd" => format_args!("{:?}", z)); 532 | let out = z.output()?; 533 | if out.status.success() { 534 | Ok(()) 535 | } else { 536 | Err(ZpoolError::from_stderr(&out.stderr)) 537 | } 538 | } 539 | 540 | fn remove, D: AsRef>(&self, name: N, device: D) -> ZpoolResult<()> { 541 | let mut z = self.zpool(); 542 | z.arg("remove"); 543 | z.arg(name.as_ref()); 544 | z.arg(device.as_ref()); 545 | debug!(self.logger, "executing"; "cmd" => format_args!("{:?}", z)); 546 | let out = z.output()?; 547 | if out.status.success() { 548 | Ok(()) 549 | } else { 550 | Err(ZpoolError::from_stderr(&out.stderr)) 551 | } 552 | } 553 | } 554 | 555 | #[cfg(test)] 556 | mod test { 557 | use std::assert_eq; 558 | 559 | use super::*; 560 | fn parse_zpools(stdout: &str) -> Result, ZpoolError> { 561 | StdoutParser::parse(Rule::zpools, stdout.as_ref()) 562 | .map_err(|_| ZpoolError::ParseError) 563 | .map(|pairs| pairs.map(Zpool::from_pest_pair).collect()) 564 | } 565 | #[test] 566 | fn correctly_parses_vdevs() { 567 | let stdout = include_str!("fixtures/status_with_block_device_nested"); 568 | let zpools: Vec = parse_zpools(stdout).unwrap(); 569 | let drives = &zpools[0] 570 | .vdevs() 571 | .iter() 572 | .flat_map(|vdev| vdev.disks().iter()) 573 | .map(|drive| drive.path().display().to_string()) 574 | .collect::>(); 575 | 576 | let expected: Vec = [ 577 | "/dev/diskid/DISK-ZCT2K2R6", 578 | "/dev/diskid/DISK-ZCT2QVET", 579 | "/dev/diskid/DISK-WSD6B5L6", 580 | "/dev/diskid/DISK-ZCT2QWL9", 581 | "/dev/diskid/DISK-ZCT2QXEL", 582 | "/dev/diskid/DISK-ZCT2RH0W", 583 | ] 584 | .iter() 585 | .map(|d| d.to_string()) 586 | .collect(); 587 | assert_eq!(&expected, drives); 588 | } 589 | 590 | #[test] 591 | fn correctly_parse_import_with_empty_comment() { 592 | let stdout = include_str!("fixtures/import_with_empty_comment"); 593 | let zpools = parse_zpools(stdout).unwrap(); 594 | assert_eq!("t2", zpools[0].name()); 595 | assert_eq!(5333885354421686613 as u64, zpools[0].id().unwrap()); 596 | } 597 | } 598 | -------------------------------------------------------------------------------- /src/zpool/topology.rs: -------------------------------------------------------------------------------- 1 | //! Structure representing what zpool consist of. This structure is used in zpool creation and when 2 | //! new drives are attached. 3 | //! 4 | //! ### Examples 5 | //! 6 | //! Let's create simple topology: 2 drives in mirror, no l2arc, no zil. 7 | //! 8 | //! ```rust 9 | //! use libzetta::zpool::{CreateVdevRequest, CreateZpoolRequest}; 10 | //! use std::path::PathBuf; 11 | //! 12 | //! let drives = vec![PathBuf::from("sd0"), PathBuf::from("sd1")]; 13 | //! let topo = CreateZpoolRequest::builder() 14 | //! .name(String::from("tank")) 15 | //! .vdevs(vec![CreateVdevRequest::Mirror(drives)]) 16 | //! .build() 17 | //! .unwrap(); 18 | //! ``` 19 | //! Overkill example: 2 drives in mirror and a single drive, zil on double 20 | //! mirror and 2 l2rc. 21 | //! 22 | //! ```rust, norun 23 | //! use libzetta::zpool::{CreateZpoolRequest, CreateVdevRequest}; 24 | //! use std::path::PathBuf; 25 | //! 26 | //! let zil_drives = vec![PathBuf::from("hd0"), PathBuf::from("hd1")]; 27 | //! let mirror_drives = vec![PathBuf::from("hd2"), PathBuf::from("hd3")]; 28 | //! let cache_drives = vec![PathBuf::from("hd4"), PathBuf::from("hd5")]; 29 | //! let topo = CreateZpoolRequest::builder() 30 | //! .name("tank") 31 | //! .vdevs(vec![CreateVdevRequest::Mirror(mirror_drives)]) 32 | //! .cache("/tmp/sparse.file".into()) 33 | //! .vdev(CreateVdevRequest::SingleDisk(PathBuf::from("hd6"))) 34 | //! .caches(cache_drives) 35 | //! .zil(CreateVdevRequest::Mirror(zil_drives)) 36 | //! .altroot(PathBuf::from("/mnt")) 37 | //! .mount(PathBuf::from("/mnt")) 38 | //! .build() 39 | //! .unwrap(); 40 | //! ``` 41 | 42 | use std::{ffi::OsString, path::PathBuf}; 43 | 44 | use crate::zpool::{properties::ZpoolPropertiesWrite, vdev::CreateVdevRequest, CreateMode}; 45 | #[derive(Default, Builder, Debug, Clone, Getters, PartialEq, Eq)] 46 | #[builder(setter(into))] 47 | #[get = "pub"] 48 | /// Consumer friendly representation of zpool structure. 49 | pub struct CreateZpoolRequest { 50 | /// Name to give new zpool 51 | name: String, 52 | /// Properties if new zpool 53 | #[builder(default)] 54 | props: Option, 55 | /// Altroot for zpool 56 | #[builder(default)] 57 | altroot: Option, 58 | /// Mount mount point for zpool 59 | #[builder(default)] 60 | mount: Option, 61 | /// Use `-f` or not; 62 | #[builder(default)] 63 | create_mode: CreateMode, 64 | /// Devices used to store data 65 | #[builder(default)] 66 | vdevs: Vec, 67 | /// Adding a cache vdev to a pool will add the storage of the cache to the 68 | /// [L2ARC](https://www.freebsd.org/doc/handbook/zfs-term.html#zfs-term-l2arc). Cache devices 69 | /// cannot be mirrored. Since a cache device only stores additional copies 70 | /// of existing data, there is no risk of data loss. 71 | #[builder(default)] 72 | caches: Vec, 73 | /// ZFS Log Devices, also known as ZFS Intent Log ([ZIL](https://www.freebsd.org/doc/handbook/zfs-term.html#zfs-term-zil)) move the intent log from the regular 74 | /// pool devices to a dedicated device, typically an SSD. Having a dedicated 75 | /// log device can significantly improve the performance of applications 76 | /// with a high volume of *synchronous* writes, especially databases. 77 | /// Log devices can be mirrored, but RAID-Z is not supported. 78 | /// If multiple log devices are used, writes will be load balanced across 79 | /// them 80 | #[builder(default)] 81 | logs: Vec, 82 | /// The hot spares feature enables you to identify disks that could be used to replace a failed 83 | /// or faulted device in one or more storage pools. Designating a device as a hot spare means 84 | /// that the device is not an active device in the pool, but if an active device in the pool 85 | /// fails, the hot spare automatically replaces the failed device. 86 | #[builder(default)] 87 | spares: Vec, 88 | /// Special vdevs store internal ZFS metadata, deduplication tables, and optionally small blocks as defined by tunables. 89 | /// See `zfsconcepts(7)` for more information. 90 | #[builder(default)] 91 | special: Vec, 92 | } 93 | 94 | impl CreateZpoolRequest { 95 | /// A preferred way to create this. 96 | pub fn builder() -> CreateZpoolRequestBuilder { 97 | CreateZpoolRequestBuilder::default() 98 | } 99 | 100 | /// Verify that given topology can be used to update existing pool. 101 | pub fn is_suitable_for_update(&self) -> bool { 102 | let valid_vdevs = self.vdevs.iter().all(CreateVdevRequest::is_valid); 103 | if !valid_vdevs { 104 | return false; 105 | } 106 | 107 | let valid_logs = self.logs.iter().all(CreateVdevRequest::is_valid); 108 | if !valid_logs { 109 | return false; 110 | } 111 | true 112 | } 113 | 114 | /// Verify that given topology can be used to create new zpool. 115 | /// 116 | /// That means it as at least one valid vdev and all optional devices are 117 | /// valid if present. 118 | pub fn is_suitable_for_create(&self) -> bool { 119 | if self.vdevs.is_empty() { 120 | return false; 121 | } 122 | self.is_suitable_for_update() 123 | } 124 | 125 | /// Make CreateZpoolRequest usable as arg for [`Command`](https://doc.rust-lang.org/std/process/struct.Command.html). 126 | pub(crate) fn into_args(self) -> Vec { 127 | let mut ret: Vec = Vec::with_capacity(13); 128 | 129 | let vdevs = self 130 | .vdevs 131 | .into_iter() 132 | .flat_map(CreateVdevRequest::into_args); 133 | ret.extend(vdevs); 134 | 135 | if !self.logs.is_empty() { 136 | let log_vdevs = self.logs.into_iter().flat_map(CreateVdevRequest::into_args); 137 | ret.push("log".into()); 138 | ret.extend(log_vdevs); 139 | } 140 | 141 | if !self.caches.is_empty() { 142 | let caches = self.caches.into_iter().map(PathBuf::into_os_string); 143 | ret.push("cache".into()); 144 | ret.extend(caches); 145 | } 146 | 147 | if !self.spares.is_empty() { 148 | let spares = self.spares.into_iter().map(PathBuf::into_os_string); 149 | ret.push("spare".into()); 150 | ret.extend(spares); 151 | } 152 | ret 153 | } 154 | } 155 | 156 | impl CreateZpoolRequestBuilder { 157 | /// Add vdev to request. 158 | /// 159 | /// * `vdev` - [CreateVdevRequest](struct.CreateVdevRequest.html) for vdev. 160 | pub fn vdev(&mut self, vdev: CreateVdevRequest) -> &mut CreateZpoolRequestBuilder { 161 | match self.vdevs { 162 | Some(ref mut vec) => vec.push(vdev), 163 | None => { 164 | self.vdevs = Some(Vec::new()); 165 | return self.vdev(vdev); 166 | } 167 | } 168 | self 169 | } 170 | 171 | /// Add cache device to request. 172 | /// 173 | /// * `disk` - path to file or name of block device in `/dev/`. Some ZFS implementations forbid 174 | /// using files as cache. 175 | pub fn cache(&mut self, disk: PathBuf) -> &mut CreateZpoolRequestBuilder { 176 | match self.caches { 177 | Some(ref mut vec) => vec.push(disk), 178 | None => { 179 | self.caches = Some(Vec::new()); 180 | return self.cache(disk); 181 | } 182 | } 183 | self 184 | } 185 | 186 | /// Add Vdev that will be used as ZFS Intent Log to request. 187 | /// 188 | /// * `vdev` - [CreateVdevRequest](struct.CreateVdevRequest.html) for ZIL device. 189 | pub fn zil(&mut self, log: CreateVdevRequest) -> &mut CreateZpoolRequestBuilder { 190 | match self.logs { 191 | Some(ref mut vec) => vec.push(log), 192 | None => { 193 | self.logs = Some(Vec::with_capacity(1)); 194 | return self.zil(log); 195 | } 196 | } 197 | self 198 | } 199 | 200 | /// Add spare disk that will be used to replace failed device in zpool. 201 | /// 202 | /// * `disk` - path to file or name of block device in `/dev/`. 203 | pub fn spare(&mut self, disk: PathBuf) -> &mut CreateZpoolRequestBuilder { 204 | match self.spares { 205 | Some(ref mut vec) => vec.push(disk), 206 | None => { 207 | self.spares = Some(Vec::new()); 208 | return self.spare(disk); 209 | } 210 | } 211 | self 212 | } 213 | } 214 | 215 | #[cfg(test)] 216 | mod test { 217 | use std::{fs::File, path::PathBuf}; 218 | 219 | use tempdir::TempDir; 220 | 221 | use super::*; 222 | 223 | fn get_disks(num: usize, path: &PathBuf) -> Vec { 224 | (0..num).map(|_| path.clone()).collect() 225 | } 226 | 227 | fn args_from_slice(args: &[&str]) -> Vec { 228 | args.to_vec().into_iter().map(OsString::from).collect() 229 | } 230 | 231 | #[test] 232 | fn test_validators() { 233 | let tmp_dir = TempDir::new("zpool-tests").unwrap(); 234 | let file_path = tmp_dir.path().join("block-device"); 235 | let _valid_file = File::create(file_path.clone()).unwrap(); 236 | 237 | // Zpool with one valid mirror 238 | let topo = CreateZpoolRequestBuilder::default() 239 | .name("tank") 240 | .vdevs(vec![CreateVdevRequest::Mirror(get_disks(2, &file_path))]) 241 | .build() 242 | .unwrap(); 243 | 244 | assert!(topo.is_suitable_for_create()); 245 | 246 | // Zpool with invalid mirror 247 | let topo = CreateZpoolRequestBuilder::default() 248 | .name("tank") 249 | .vdevs(vec![CreateVdevRequest::Mirror(get_disks(1, &file_path))]) 250 | .build() 251 | .unwrap(); 252 | 253 | assert!(!topo.is_suitable_for_create()); 254 | 255 | // Zpool with valid cache and valid vdev 256 | let topo = CreateZpoolRequestBuilder::default() 257 | .name("tank") 258 | .vdevs(vec![CreateVdevRequest::Mirror(get_disks(2, &file_path))]) 259 | .caches(get_disks(2, &file_path)) 260 | .build() 261 | .unwrap(); 262 | 263 | assert!(topo.is_suitable_for_create()); 264 | 265 | // Just add L2ARC to zpool 266 | let topo = CreateZpoolRequestBuilder::default() 267 | .name("tank") 268 | .cache(file_path) 269 | .build() 270 | .unwrap(); 271 | 272 | assert!(topo.is_suitable_for_update()); 273 | assert!(!topo.is_suitable_for_create()); 274 | } 275 | 276 | #[test] 277 | fn test_builder() { 278 | let result = CreateZpoolRequest::builder().build(); 279 | assert!(result.is_err()); 280 | } 281 | 282 | #[test] 283 | fn test_args() { 284 | let tmp_dir = TempDir::new("zpool-tests").unwrap(); 285 | let file_path = tmp_dir.path().join("block-device"); 286 | let path = file_path.to_str().unwrap(); 287 | let _valid_file = File::create(file_path.clone()).unwrap(); 288 | let naked_vdev = CreateVdevRequest::SingleDisk(file_path.clone()); 289 | 290 | // Just add L2ARC to zpool 291 | let topo = CreateZpoolRequestBuilder::default() 292 | .name("tank") 293 | .cache(file_path.clone()) 294 | .build() 295 | .unwrap(); 296 | 297 | let result: Vec = topo.into_args(); 298 | let expected = args_from_slice(&["cache", path]); 299 | 300 | assert_eq!(expected, result); 301 | 302 | // Zpool with mirror as ZIL and two vdevs 303 | let topo = CreateZpoolRequestBuilder::default() 304 | .name("tank") 305 | .vdev(naked_vdev.clone()) 306 | .vdev(naked_vdev.clone()) 307 | .zil(CreateVdevRequest::Mirror(get_disks(2, &file_path))) 308 | .build() 309 | .unwrap(); 310 | 311 | let result = topo.into_args(); 312 | let expected = args_from_slice(&[path, path, "log", "mirror", path, path]); 313 | assert_eq!(expected, result); 314 | 315 | // Zraid 316 | let topo = CreateZpoolRequestBuilder::default() 317 | .name("tank") 318 | .vdev(CreateVdevRequest::RaidZ(get_disks(3, &file_path))) 319 | .build() 320 | .unwrap(); 321 | 322 | let result = topo.into_args(); 323 | let expected = args_from_slice(&["raidz", path, path, path]); 324 | assert_eq!(expected, result); 325 | 326 | // Zraid 2 327 | let topo = CreateZpoolRequestBuilder::default() 328 | .name("tank") 329 | .vdev(CreateVdevRequest::RaidZ2(get_disks(5, &file_path))) 330 | .build() 331 | .unwrap(); 332 | 333 | let result = topo.into_args(); 334 | let expected = args_from_slice(&["raidz2", path, path, path, path, path]); 335 | assert_eq!(expected, result); 336 | 337 | // Zraid 3 338 | let topo = CreateZpoolRequestBuilder::default() 339 | .name("tank") 340 | .vdev(CreateVdevRequest::RaidZ3(get_disks(8, &file_path))) 341 | .build() 342 | .unwrap(); 343 | 344 | let result = topo.into_args(); 345 | let expected = args_from_slice(&["raidz3", path, path, path, path, path, path, path, path]); 346 | assert_eq!(expected, result); 347 | } 348 | } 349 | -------------------------------------------------------------------------------- /src/zpool/vdev.rs: -------------------------------------------------------------------------------- 1 | //! Consumer friendly structure representing vdev. 2 | //! 3 | //! Everything that goes into vdev is in this module. 4 | //! 5 | //! ### Examples 6 | //! 7 | //! ##### Create a mirror with 2 disks 8 | //! 9 | //! ```rust 10 | //! use libzetta::zpool::CreateVdevRequest; 11 | //! use std::path::PathBuf; 12 | //! 13 | //! // Create an `Vec` with two disks 14 | //! let drives = vec![PathBuf::from("nvd0p4.eli"), PathBuf::from("nvd1p4.eli")]; 15 | //! let vdev = CreateVdevRequest::Mirror(drives); 16 | //! ``` 17 | //! ##### Create a single disk vdev with sparse file 18 | //! 19 | //! ```rust 20 | //! use libzetta::zpool::CreateVdevRequest; 21 | //! use std::path::PathBuf; 22 | //! // (file needs to exist prior) 23 | //! let path = PathBuf::from("/tmp/sparseFile0"); 24 | //! let vdev = CreateVdevRequest::SingleDisk(path); 25 | //! ``` 26 | 27 | use std::{ 28 | default::Default, 29 | ffi::OsString, 30 | path::{Path, PathBuf}, 31 | str::FromStr, 32 | }; 33 | 34 | use crate::zpool::{Health, Reason, ZpoolError}; 35 | 36 | /// Error statistics. 37 | /// 38 | /// NOTE: Due to imperfections of our world number of errors limited to [`std::u64::MAX`](https://doc.rust-lang.org/std/u64/constant.MAX.html). 39 | #[derive(Debug, Clone, Eq, PartialEq)] 40 | pub struct ErrorStatistics { 41 | /// I/O errors that occurred while issuing a read request 42 | pub read: u64, 43 | /// I/O errors that occurred while issuing a write request 44 | pub write: u64, 45 | /// Checksum errors, meaning the device returned corrupted data as the 46 | /// result of a read request 47 | pub checksum: u64, 48 | } 49 | 50 | impl Default for ErrorStatistics { 51 | fn default() -> ErrorStatistics { 52 | ErrorStatistics { 53 | read: 0, 54 | write: 0, 55 | checksum: 0, 56 | } 57 | } 58 | } 59 | 60 | /// Basic building block of vdev. 61 | /// 62 | /// It can be backed by a entire block device, a partition or a file. This particular structure 63 | /// represents backing of existing vdev. If disk is part of active zpool then it will also 64 | /// have error counts. 65 | #[derive(Debug, Clone, Getters, Eq, Builder)] 66 | #[builder(setter(into))] 67 | #[get = "pub"] 68 | pub struct Disk { 69 | /// Path to a backing device or file. If path is relative, then it's 70 | /// relative to `/dev/`. 71 | path: PathBuf, 72 | /// Current health of this specific device. 73 | health: Health, 74 | /// Reason why device is in this state. 75 | #[builder(default)] 76 | reason: Option, 77 | /// How many read, write and checksum errors device encountered since last 78 | /// reset. 79 | #[builder(default)] 80 | error_statistics: ErrorStatistics, 81 | } 82 | 83 | impl Disk { 84 | pub fn builder() -> DiskBuilder { 85 | DiskBuilder::default() 86 | } 87 | } 88 | 89 | /// Equal if path is the same. 90 | impl PartialEq for Disk { 91 | fn eq(&self, other: &Disk) -> bool { 92 | self.path == other.path 93 | } 94 | } 95 | 96 | impl PartialEq for Disk { 97 | fn eq(&self, other: &Path) -> bool { 98 | self.path.as_path() == other 99 | } 100 | } 101 | 102 | impl PartialEq for Disk { 103 | fn eq(&self, other: &PathBuf) -> bool { 104 | &self.path == other 105 | } 106 | } 107 | 108 | impl PartialEq for PathBuf { 109 | fn eq(&self, other: &Disk) -> bool { 110 | other == self 111 | } 112 | } 113 | 114 | impl PartialEq for Path { 115 | fn eq(&self, other: &Disk) -> bool { 116 | other == self 117 | } 118 | } 119 | 120 | /// A [type](https://www.freebsd.org/doc/handbook/zfs-term.html) of Vdev. 121 | #[derive(Debug, Clone, PartialEq, Eq)] 122 | pub enum VdevType { 123 | /// Just a single disk or file. 124 | SingleDisk, 125 | /// A mirror of multiple vdevs 126 | Mirror, 127 | /// ZFS implements [RAID-Z](https://blogs.oracle.com/ahl/what-is-raid-z), a 128 | /// variation on standard RAID-5 that offers better distribution of 129 | /// parity and eliminates the “RAID-5 write hole”. 130 | RaidZ, 131 | /// The same as RAID-Z, but with 2 parity drives. 132 | RaidZ2, 133 | /// The same as RAID-Z, but with 3 parity drives. 134 | RaidZ3, 135 | } 136 | 137 | impl FromStr for VdevType { 138 | type Err = ZpoolError; 139 | 140 | fn from_str(source: &str) -> Result { 141 | match source { 142 | "mirror" => Ok(VdevType::Mirror), 143 | "raidz1" => Ok(VdevType::RaidZ), 144 | "raidz2" => Ok(VdevType::RaidZ2), 145 | "raidz3" => Ok(VdevType::RaidZ3), 146 | n => Err(ZpoolError::UnknownRaidType(String::from(n))), 147 | } 148 | } 149 | } 150 | 151 | /// Consumer friendly wrapper to configure vdev to zpol. 152 | #[derive(Debug, Clone, PartialEq, Eq)] 153 | pub enum CreateVdevRequest { 154 | /// The most basic type of vdev is a standard block device. This can be an 155 | /// entire disk or a partition. In addition to disks, ZFS pools can be 156 | /// backed by regular files, this is especially useful for testing and 157 | /// experimentation. Use the full path to the file as the device path in 158 | /// zpool create. All vdevs must be at least 64MB or 128 MB in size 159 | /// depending on implementation. 160 | SingleDisk(PathBuf), 161 | /// A mirror of multiple disks. A mirror vdev will only hold as much data as 162 | /// its smallest member. A mirror vdev can withstand the failure of all 163 | /// but one of its members without losing any data. 164 | Mirror(Vec), 165 | /// ZFS implements [RAID-Z](https://blogs.oracle.com/ahl/what-is-raid-z), a 166 | /// variation on standard RAID-5 that offers better distribution of 167 | /// parity and eliminates the “RAID-5 write hole”. 168 | RaidZ(Vec), 169 | /// The same as RAID-Z, but with 2 parity drives. 170 | RaidZ2(Vec), 171 | /// The same as RAID-Z, but with 3 parity drives. 172 | RaidZ3(Vec), 173 | } 174 | 175 | impl CreateVdevRequest { 176 | #[inline] 177 | fn is_valid_raid(disks: &[PathBuf], min_disks: usize) -> bool { 178 | if disks.len() < min_disks { 179 | return false; 180 | } 181 | true 182 | } 183 | 184 | /// Check if given CreateVdevRequest is valid. 185 | /// 186 | /// For SingleDisk it means that what ever it points to exists. 187 | /// 188 | /// For Mirror it checks that it's at least two valid disks. 189 | /// 190 | /// For RaidZ it checks that it's at least three valid disk. And so goes on. 191 | /// This gives false negative results in RAIDZ2 and RAIDZ3. This is 192 | /// intentional. 193 | /// possible makes no sense. 194 | pub fn is_valid(&self) -> bool { 195 | match *self { 196 | CreateVdevRequest::SingleDisk(ref _disk) => true, 197 | CreateVdevRequest::Mirror(ref disks) => CreateVdevRequest::is_valid_raid(disks, 2), 198 | CreateVdevRequest::RaidZ(ref disks) => CreateVdevRequest::is_valid_raid(disks, 3), 199 | CreateVdevRequest::RaidZ2(ref disks) => CreateVdevRequest::is_valid_raid(disks, 5), 200 | CreateVdevRequest::RaidZ3(ref disks) => CreateVdevRequest::is_valid_raid(disks, 8), 201 | } 202 | } 203 | 204 | #[inline] 205 | fn conv_to_args>(vdev_type: T, disks: Vec) -> Vec { 206 | let mut ret = Vec::with_capacity(disks.len()); 207 | ret.push(vdev_type.into()); 208 | for disk in disks { 209 | ret.push(disk.into_os_string()); 210 | } 211 | ret 212 | } 213 | 214 | /// Make turn CreateVdevRequest into list of arguments. 215 | pub fn into_args(self) -> Vec { 216 | match self { 217 | CreateVdevRequest::SingleDisk(disk) => vec![disk.into_os_string()], 218 | CreateVdevRequest::Mirror(disks) => CreateVdevRequest::conv_to_args("mirror", disks), 219 | CreateVdevRequest::RaidZ(disks) => CreateVdevRequest::conv_to_args("raidz", disks), 220 | CreateVdevRequest::RaidZ2(disks) => CreateVdevRequest::conv_to_args("raidz2", disks), 221 | CreateVdevRequest::RaidZ3(disks) => CreateVdevRequest::conv_to_args("raidz3", disks), 222 | } 223 | } 224 | 225 | /// Short-cut to CreateVdevRequest::SingleDisk(disk) 226 | pub fn disk>(value: O) -> CreateVdevRequest { 227 | CreateVdevRequest::SingleDisk(value.into()) 228 | } 229 | 230 | /// Get kind 231 | pub fn kind(&self) -> VdevType { 232 | match self { 233 | CreateVdevRequest::SingleDisk(_) => VdevType::SingleDisk, 234 | CreateVdevRequest::Mirror(_) => VdevType::Mirror, 235 | CreateVdevRequest::RaidZ(_) => VdevType::RaidZ, 236 | CreateVdevRequest::RaidZ2(_) => VdevType::RaidZ2, 237 | CreateVdevRequest::RaidZ3(_) => VdevType::RaidZ3, 238 | } 239 | } 240 | } 241 | 242 | impl PartialEq for CreateVdevRequest { 243 | fn eq(&self, other: &Vdev) -> bool { 244 | other == self 245 | } 246 | } 247 | 248 | /// Basic zpool building block. 249 | /// 250 | /// A pool is made up of one or more vdevs, which themselves can be a single 251 | /// disk or a group of disks, in the case of a RAID transform. When multiple 252 | /// vdevs are used, ZFS spreads data across the vdevs to increase performance 253 | /// and maximize usable space. 254 | #[derive(Debug, Clone, Getters, Builder, Eq)] 255 | #[get = "pub"] 256 | pub struct Vdev { 257 | /// Type of Vdev 258 | kind: VdevType, 259 | /// Current Health of Vdev 260 | health: Health, 261 | /// Reason why vdev is in this state 262 | #[builder(default)] 263 | reason: Option, 264 | /// Backing devices for this vdev 265 | disks: Vec, 266 | /// How many read, write and checksum errors device encountered since last 267 | /// reset. 268 | #[builder(default)] 269 | error_statistics: ErrorStatistics, 270 | } 271 | 272 | impl Vdev { 273 | /// Create a builder - a referred way of creating Vdev structure. 274 | pub fn builder() -> VdevBuilder { 275 | VdevBuilder::default() 276 | } 277 | } 278 | /// Vdevs are equal of their type and backing disks are equal. 279 | impl PartialEq for Vdev { 280 | fn eq(&self, other: &Vdev) -> bool { 281 | self.kind() == other.kind() && self.disks() == other.disks() 282 | } 283 | } 284 | 285 | impl PartialEq for Vdev { 286 | fn eq(&self, other: &CreateVdevRequest) -> bool { 287 | self.kind() == &other.kind() && { 288 | match other { 289 | CreateVdevRequest::SingleDisk(ref d) => { 290 | self.disks().first().map(Disk::path) == Some(d) 291 | } 292 | CreateVdevRequest::Mirror(ref disks) => self.disks() == disks, 293 | CreateVdevRequest::RaidZ(ref disks) => self.disks() == disks, 294 | CreateVdevRequest::RaidZ2(ref disks) => self.disks() == disks, 295 | CreateVdevRequest::RaidZ3(ref disks) => self.disks() == disks, 296 | } 297 | } 298 | } 299 | } 300 | 301 | #[cfg(test)] 302 | mod test { 303 | use std::fs::File; 304 | 305 | use tempdir::TempDir; 306 | 307 | use super::*; 308 | 309 | fn get_disks(num: usize, path: &PathBuf) -> Vec { 310 | (0..num).map(|_| path.clone()).collect() 311 | } 312 | 313 | #[test] 314 | fn test_raid_validation_naked() { 315 | let tmp_dir = TempDir::new("zpool-tests").unwrap(); 316 | let file_path = tmp_dir.path().join("block-device"); 317 | 318 | let vdev = CreateVdevRequest::SingleDisk(file_path); 319 | assert!(vdev.is_valid()); 320 | } 321 | 322 | #[test] 323 | fn test_raid_validation_mirror() { 324 | let tmp_dir = TempDir::new("zpool-tests").unwrap(); 325 | let file_path = tmp_dir.path().join("block-device"); 326 | let _valid_file = File::create(file_path.clone()).unwrap(); 327 | 328 | let vdev = CreateVdevRequest::Mirror(get_disks(2, &file_path)); 329 | assert!(vdev.is_valid()); 330 | 331 | let bad = CreateVdevRequest::Mirror(get_disks(1, &file_path)); 332 | assert!(!bad.is_valid()); 333 | 334 | let also_bad = CreateVdevRequest::Mirror(get_disks(0, &file_path)); 335 | assert!(!also_bad.is_valid()); 336 | } 337 | 338 | #[test] 339 | fn test_raid_validation_raidz() { 340 | let tmp_dir = TempDir::new("zpool-tests").unwrap(); 341 | let file_path = tmp_dir.path().join("block-device"); 342 | let _valid_file = File::create(file_path.clone()).unwrap(); 343 | 344 | let vdev = CreateVdevRequest::RaidZ(get_disks(3, &file_path)); 345 | assert!(vdev.is_valid()); 346 | 347 | let also_vdev = CreateVdevRequest::RaidZ(get_disks(5, &file_path)); 348 | assert!(also_vdev.is_valid()); 349 | 350 | let bad = CreateVdevRequest::RaidZ(get_disks(2, &file_path)); 351 | assert!(!bad.is_valid()); 352 | 353 | let also_bad = CreateVdevRequest::RaidZ(get_disks(1, &file_path)); 354 | assert!(!also_bad.is_valid()); 355 | } 356 | 357 | #[test] 358 | fn test_raid_validation_raidz2() { 359 | let tmp_dir = TempDir::new("zpool-tests").unwrap(); 360 | let file_path = tmp_dir.path().join("block-device"); 361 | let _valid_file = File::create(file_path.clone()).unwrap(); 362 | 363 | let vdev = CreateVdevRequest::RaidZ2(get_disks(5, &file_path)); 364 | assert!(vdev.is_valid()); 365 | 366 | let also_vdev = CreateVdevRequest::RaidZ2(get_disks(8, &file_path)); 367 | assert!(also_vdev.is_valid()); 368 | 369 | let bad = CreateVdevRequest::RaidZ2(get_disks(3, &file_path)); 370 | assert!(!bad.is_valid()); 371 | 372 | let also_bad = CreateVdevRequest::RaidZ2(get_disks(1, &file_path)); 373 | assert!(!also_bad.is_valid()); 374 | } 375 | 376 | #[test] 377 | fn test_raid_validation_raidz3() { 378 | let tmp_dir = TempDir::new("zpool-tests").unwrap(); 379 | let file_path = tmp_dir.path().join("block-device"); 380 | let _valid_file = File::create(file_path.clone()).unwrap(); 381 | 382 | let vdev = CreateVdevRequest::RaidZ3(get_disks(8, &file_path)); 383 | assert!(vdev.is_valid()); 384 | 385 | let also_vdev = CreateVdevRequest::RaidZ3(get_disks(10, &file_path)); 386 | assert!(also_vdev.is_valid()); 387 | 388 | let bad = CreateVdevRequest::RaidZ3(get_disks(3, &file_path)); 389 | assert!(!bad.is_valid()); 390 | 391 | let also_bad = CreateVdevRequest::RaidZ3(get_disks(0, &file_path)); 392 | assert!(!also_bad.is_valid()); 393 | } 394 | 395 | #[test] 396 | fn test_vdev_to_arg_naked() { 397 | let tmp_dir = TempDir::new("zpool-tests").unwrap(); 398 | let file_path = tmp_dir.path().join("block-device"); 399 | let _valid_file = File::create(file_path.clone()).unwrap(); 400 | 401 | let vdev = CreateVdevRequest::SingleDisk(file_path.clone()); 402 | 403 | let args = vdev.into_args(); 404 | assert_eq!(vec![file_path], args); 405 | } 406 | #[test] 407 | fn test_vdev_to_arg_mirror() { 408 | let tmp_dir = TempDir::new("zpool-tests").unwrap(); 409 | let file_path = tmp_dir.path().join("block-device"); 410 | let _valid_file = File::create(file_path.clone()).unwrap(); 411 | 412 | let vdev = CreateVdevRequest::Mirror(get_disks(2, &file_path)); 413 | 414 | let args = vdev.into_args(); 415 | let expected: Vec = vec![ 416 | "mirror".into(), 417 | file_path.clone().into(), 418 | file_path.clone().into(), 419 | ]; 420 | assert_eq!(expected, args); 421 | } 422 | 423 | #[test] 424 | fn test_vdev_to_arg_raidz() { 425 | let tmp_dir = TempDir::new("zpool-tests").unwrap(); 426 | let file_path = tmp_dir.path().join("block-device"); 427 | let _valid_file = File::create(file_path.clone()).unwrap(); 428 | 429 | let vdev = CreateVdevRequest::RaidZ(get_disks(3, &file_path)); 430 | 431 | let args = vdev.into_args(); 432 | assert_eq!(4, args.len()); 433 | assert_eq!(OsString::from("raidz"), args[0]); 434 | } 435 | 436 | #[test] 437 | fn test_vdev_to_arg_raidz2() { 438 | let tmp_dir = TempDir::new("zpool-tests").unwrap(); 439 | let file_path = tmp_dir.path().join("block-device"); 440 | let _valid_file = File::create(file_path.clone()).unwrap(); 441 | 442 | let vdev = CreateVdevRequest::RaidZ2(get_disks(5, &file_path)); 443 | 444 | let args = vdev.into_args(); 445 | assert_eq!(6, args.len()); 446 | assert_eq!(OsString::from("raidz2"), args[0]); 447 | } 448 | #[test] 449 | fn test_vdev_to_arg_raidz3() { 450 | let tmp_dir = TempDir::new("zpool-tests").unwrap(); 451 | let file_path = tmp_dir.path().join("block-device"); 452 | let _valid_file = File::create(file_path.clone()).unwrap(); 453 | 454 | let vdev = CreateVdevRequest::RaidZ3(get_disks(8, &file_path)); 455 | 456 | let args = vdev.into_args(); 457 | assert_eq!(9, args.len()); 458 | assert_eq!(OsString::from("raidz3"), args[0]); 459 | } 460 | 461 | #[test] 462 | fn short_versions_disk() { 463 | let name = "wat"; 464 | let path = PathBuf::from(&name); 465 | let disk = CreateVdevRequest::SingleDisk(path.clone()); 466 | let disk_left = CreateVdevRequest::SingleDisk(path.clone()); 467 | 468 | assert_eq!(disk_left, disk); 469 | 470 | assert_eq!(disk_left, CreateVdevRequest::disk(name)); 471 | } 472 | 473 | #[test] 474 | fn test_path_eq_disk() { 475 | let path = PathBuf::from("wat"); 476 | let disk = Disk::builder() 477 | .path("wat") 478 | .health(Health::Online) 479 | .build() 480 | .unwrap(); 481 | assert_eq!(path, disk); 482 | assert_eq!(path.as_path(), &disk); 483 | assert_eq!(disk, path); 484 | assert_eq!(&disk, path.as_path()); 485 | } 486 | 487 | #[test] 488 | fn test_path_ne_disk() { 489 | let path = PathBuf::from("wat"); 490 | let disk = Disk::builder() 491 | .path("notwat") 492 | .health(Health::Online) 493 | .build() 494 | .unwrap(); 495 | assert_ne!(path, disk); 496 | assert_ne!(path.as_path(), &disk); 497 | assert_ne!(disk, path); 498 | assert_ne!(&disk, path.as_path()); 499 | } 500 | 501 | #[test] 502 | fn test_vdev_eq_vdev() { 503 | let disk = Disk::builder() 504 | .path("notwat") 505 | .health(Health::Online) 506 | .build() 507 | .unwrap(); 508 | 509 | let left = Vdev::builder() 510 | .kind(VdevType::SingleDisk) 511 | .health(Health::Online) 512 | .disks(vec![disk.clone()]) 513 | .build() 514 | .unwrap(); 515 | assert_eq!(left, left.clone()); 516 | } 517 | 518 | #[test] 519 | fn test_vdev_ne_vdev() { 520 | let disk = Disk::builder() 521 | .path("notwat") 522 | .health(Health::Online) 523 | .build() 524 | .unwrap(); 525 | 526 | let left = Vdev::builder() 527 | .kind(VdevType::SingleDisk) 528 | .health(Health::Online) 529 | .disks(vec![disk.clone()]) 530 | .build() 531 | .unwrap(); 532 | 533 | let right = Vdev::builder() 534 | .kind(VdevType::RaidZ) 535 | .health(Health::Online) 536 | .disks(vec![disk.clone()]) 537 | .build() 538 | .unwrap(); 539 | 540 | assert_ne!(left, right); 541 | 542 | let disk2 = Disk::builder() 543 | .path("wat") 544 | .health(Health::Online) 545 | .build() 546 | .unwrap(); 547 | let right = Vdev::builder() 548 | .kind(VdevType::RaidZ) 549 | .health(Health::Online) 550 | .disks(vec![disk2]) 551 | .build() 552 | .unwrap(); 553 | 554 | assert_ne!(left, right); 555 | } 556 | } 557 | -------------------------------------------------------------------------------- /tests/test_misc.rs: -------------------------------------------------------------------------------- 1 | use libzetta::GlobalLogger; 2 | use slog::{o, Drain, Logger}; 3 | use slog_stdlog::StdLog; 4 | 5 | #[test] 6 | fn test_not_default_logger() { 7 | let root = Logger::root(StdLog.fuse(), o!("wat" => "wat")); 8 | GlobalLogger::setup(&root).unwrap(); 9 | let pairs = GlobalLogger::get().list(); 10 | let expected = String::from("(zetta_version, wat)"); 11 | let actual = format!("{:?}", pairs); 12 | assert_eq!(expected, actual); 13 | } 14 | -------------------------------------------------------------------------------- /tests/test_misc2.rs: -------------------------------------------------------------------------------- 1 | use libzetta::GlobalLogger; 2 | 3 | #[test] 4 | fn test_default_logger() { 5 | let logger = GlobalLogger::get(); 6 | let pairs = logger.list(); 7 | let expected = String::from("(zetta_version)"); 8 | let actual = format!("{:?}", pairs); 9 | assert_eq!(expected, actual); 10 | } 11 | -------------------------------------------------------------------------------- /tests/test_zfs.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::mutex_atomic)] 2 | #[macro_use] 3 | extern crate lazy_static; 4 | 5 | use std::{ 6 | fs::{self, DirBuilder}, 7 | panic, 8 | path::{Path, PathBuf}, 9 | sync::Mutex, 10 | }; 11 | 12 | use cavity::{fill, Bytes, WriteMode}; 13 | use rand::Rng; 14 | 15 | use libzetta::{ 16 | slog::*, 17 | zfs::{ 18 | BookmarkRequest, Copies, CreateDatasetRequest, DatasetKind, Error, Properties, SendFlags, 19 | SnapDir, ZfsEngine, ZfsLzc, 20 | }, 21 | zpool::{CreateVdevRequest, CreateZpoolRequest, ZpoolEngine, ZpoolOpen3}, 22 | }; 23 | 24 | use libzetta::{ 25 | zfs::{DelegatingZfsEngine, DestroyTiming}, 26 | zpool::CreateMode, 27 | }; 28 | 29 | static ONE_MB_IN_BYTES: u64 = 1024 * 1024; 30 | 31 | static ZPOOL_NAME_PREFIX: &str = "tests-zfs-"; 32 | lazy_static! { 33 | static ref INITIALIZED: Mutex = Mutex::new(false); 34 | static ref SHARED_ZPOOL: String = { 35 | let name = get_zpool_name(); 36 | setup_zpool(&name); 37 | name 38 | }; 39 | } 40 | fn get_zpool_name() -> String { 41 | let mut rng = rand::thread_rng(); 42 | let suffix = rng.gen::(); 43 | let name = format!("{}-{}", ZPOOL_NAME_PREFIX, suffix); 44 | name 45 | } 46 | fn get_dataset_name() -> String { 47 | let mut rng = rand::thread_rng(); 48 | let name = rng.gen::(); 49 | let name = format!("{}", name); 50 | name 51 | } 52 | 53 | fn setup_zpool(name: &str) { 54 | let data = INITIALIZED.lock().unwrap(); 55 | 56 | if !*data { 57 | // Create vdevs if they're missing 58 | let vdev_dir = Path::new("/vdevs/zfs"); 59 | setup_vdev(vdev_dir.join("vdev0"), &Bytes::MegaBytes(64 + 10)); 60 | let zpool = ZpoolOpen3::default(); 61 | let topo = CreateZpoolRequest::builder() 62 | .name(name) 63 | .vdev(CreateVdevRequest::SingleDisk("/vdevs/zfs/vdev0".into())) 64 | .create_mode(CreateMode::Force) 65 | .build() 66 | .unwrap(); 67 | zpool.create(topo).unwrap(); 68 | } 69 | } 70 | fn setup_vdev>(path: P, bytes: &Bytes) -> PathBuf { 71 | let path = path.as_ref(); 72 | 73 | let parent = path.parent().unwrap(); 74 | DirBuilder::new().recursive(true).create(parent).unwrap(); 75 | 76 | if path.exists() { 77 | let meta = fs::metadata(&path).unwrap(); 78 | assert!(meta.is_file()); 79 | assert!(!meta.permissions().readonly()); 80 | if (meta.len() as usize) < bytes.as_bytes() { 81 | let _ = fs::remove_file(&path); 82 | setup_vdev(path, bytes) 83 | } else { 84 | path.into() 85 | } 86 | } else { 87 | let mut f = fs::File::create(path).unwrap(); 88 | fill(bytes.clone(), None, WriteMode::FlushOnce, &mut f).unwrap(); 89 | path.into() 90 | } 91 | } 92 | // Only used for debugging 93 | #[allow(dead_code)] 94 | fn get_logger() -> Option { 95 | let plain = slog_term::PlainSyncDecorator::new(std::io::stdout()); 96 | Some(Logger::root( 97 | slog_term::FullFormat::new(plain) 98 | .use_original_order() 99 | .build() 100 | .fuse(), 101 | o!(), 102 | )) 103 | } 104 | 105 | #[test] 106 | fn exists_on_fake() { 107 | let zpool = SHARED_ZPOOL.clone(); 108 | let fake_dataset = format!("{}/very/fake/dataset", zpool); 109 | 110 | let zfs = ZfsLzc::new().expect("Failed to initialize ZfsLzc"); 111 | 112 | let result = zfs.exists(fake_dataset).unwrap(); 113 | 114 | assert!(!result); 115 | } 116 | 117 | #[test] 118 | fn create_dumb() { 119 | let zpool = SHARED_ZPOOL.clone(); 120 | let dataset_path = PathBuf::from(format!("{}/{}", zpool, get_dataset_name())); 121 | 122 | let zfs = ZfsLzc::new().expect("Failed to initialize ZfsLzc"); 123 | 124 | let request = CreateDatasetRequest::builder() 125 | .name(dataset_path.clone()) 126 | .user_properties(std::collections::HashMap::new()) 127 | .kind(DatasetKind::Filesystem) 128 | .copies(Copies::Three) 129 | .build() 130 | .unwrap(); 131 | 132 | zfs.create(request).expect("Failed to create dataset"); 133 | 134 | let res = zfs.exists(dataset_path.to_str().unwrap()).unwrap(); 135 | assert!(res); 136 | } 137 | 138 | #[test] 139 | fn easy_invalid_zfs() { 140 | let zpool = SHARED_ZPOOL.clone(); 141 | let dataset_path = PathBuf::from(format!("{}/{}", zpool, get_dataset_name())); 142 | 143 | let zfs = ZfsLzc::new().expect("Failed to initialize ZfsLzc"); 144 | 145 | let request = CreateDatasetRequest::builder() 146 | .name(dataset_path.clone()) 147 | .user_properties(std::collections::HashMap::new()) 148 | .kind(DatasetKind::Filesystem) 149 | .volume_size(2) 150 | .build() 151 | .unwrap(); 152 | 153 | let res = zfs.create(request).unwrap_err(); 154 | assert_eq!(Error::invalid_input(), res); 155 | 156 | let request = CreateDatasetRequest::builder() 157 | .name(dataset_path.clone()) 158 | .user_properties(std::collections::HashMap::new()) 159 | .kind(DatasetKind::Filesystem) 160 | .volume_block_size(2) 161 | .build() 162 | .unwrap(); 163 | 164 | let res = zfs.create(request).unwrap_err(); 165 | assert_eq!(Error::invalid_input(), res); 166 | 167 | let request = CreateDatasetRequest::builder() 168 | .name(dataset_path.clone()) 169 | .user_properties(std::collections::HashMap::new()) 170 | .kind(DatasetKind::Filesystem) 171 | .volume_size(2) 172 | .volume_block_size(2) 173 | .build() 174 | .unwrap(); 175 | 176 | let res = zfs.create(request).unwrap_err(); 177 | assert_eq!(Error::invalid_input(), res); 178 | 179 | let request = CreateDatasetRequest::builder() 180 | .name(dataset_path) 181 | .user_properties(std::collections::HashMap::new()) 182 | .kind(DatasetKind::Volume) 183 | .build() 184 | .unwrap(); 185 | 186 | let res = zfs.create(request).unwrap_err(); 187 | assert_eq!(Error::invalid_input(), res); 188 | } 189 | 190 | #[test] 191 | fn create_and_destroy() { 192 | let zpool = SHARED_ZPOOL.clone(); 193 | let dataset_path = PathBuf::from(format!("{}/{}", zpool, get_dataset_name())); 194 | 195 | let zfs = DelegatingZfsEngine::new().expect("Failed to initialize ZfsLzc"); 196 | let request = CreateDatasetRequest::builder() 197 | .name(dataset_path.clone()) 198 | .user_properties(std::collections::HashMap::new()) 199 | .kind(DatasetKind::Filesystem) 200 | .build() 201 | .unwrap(); 202 | 203 | zfs.create(request).expect("Failed to create the dataset"); 204 | 205 | let res = zfs.exists(dataset_path.to_str().unwrap()).unwrap(); 206 | assert!(res); 207 | 208 | zfs.destroy(dataset_path.clone()).unwrap(); 209 | let res = zfs.exists(dataset_path.to_str().unwrap()).unwrap(); 210 | assert!(!res); 211 | } 212 | 213 | #[test] 214 | fn create_and_list() { 215 | let zpool = SHARED_ZPOOL.clone(); 216 | let zfs = DelegatingZfsEngine::new().expect("Failed to initialize ZfsLzc"); 217 | let root = PathBuf::from(format!("{}/{}", zpool, get_dataset_name())); 218 | let mut expected_filesystems = vec![root.clone()]; 219 | let mut expected_volumes = Vec::with_capacity(2); 220 | let request = CreateDatasetRequest::builder() 221 | .name(root.clone()) 222 | .kind(DatasetKind::Filesystem) 223 | .build() 224 | .unwrap(); 225 | zfs.create(request) 226 | .expect("Failed to create a root dataset"); 227 | 228 | for idx in 0..2 { 229 | let mut path = root.clone(); 230 | path.push(format!("{}", idx)); 231 | expected_filesystems.push(path.clone()); 232 | let request = CreateDatasetRequest::builder() 233 | .name(path) 234 | .kind(DatasetKind::Filesystem) 235 | .build() 236 | .unwrap(); 237 | zfs.create(request).expect("Failed to create a dataset"); 238 | } 239 | let datasets = zfs.list_filesystems(root.clone()).unwrap(); 240 | assert_eq!(3, datasets.len()); 241 | assert_eq!(expected_filesystems, datasets); 242 | 243 | for idx in 2..4 { 244 | let mut path = root.clone(); 245 | path.push(format!("{}", idx)); 246 | expected_volumes.push(path.clone()); 247 | let request = CreateDatasetRequest::builder() 248 | .name(path) 249 | .kind(DatasetKind::Volume) 250 | .volume_size(ONE_MB_IN_BYTES) 251 | .build() 252 | .unwrap(); 253 | zfs.create(request).expect("Failed to create a dataset"); 254 | } 255 | let datasets = zfs.list_volumes(root.clone()).unwrap(); 256 | assert_eq!(2, datasets.len()); 257 | assert_eq!(expected_volumes, datasets); 258 | let expected: Vec<(DatasetKind, PathBuf)> = expected_filesystems 259 | .into_iter() 260 | .map(|e| (DatasetKind::Filesystem, e)) 261 | .chain( 262 | expected_volumes 263 | .into_iter() 264 | .map(|e| (DatasetKind::Volume, e)), 265 | ) 266 | .collect(); 267 | let datasets = zfs.list(root).unwrap(); 268 | assert_eq!(5, datasets.len()); 269 | assert_eq!(expected, datasets); 270 | } 271 | 272 | #[test] 273 | fn easy_snapshot_and_bookmark() { 274 | let zpool = SHARED_ZPOOL.clone(); 275 | let zfs = DelegatingZfsEngine::new().expect("Failed to initialize ZfsLzc"); 276 | let root_name = get_dataset_name(); 277 | let root = PathBuf::from(format!("{}/{}", zpool, &root_name)); 278 | let request = CreateDatasetRequest::builder() 279 | .name(root.clone()) 280 | .kind(DatasetKind::Filesystem) 281 | .build() 282 | .unwrap(); 283 | zfs.create(request) 284 | .expect("Failed to create a root dataset"); 285 | let expected_snapshots = vec![PathBuf::from(format!("{}/{}@snap-1", zpool, &root_name))]; 286 | 287 | zfs.snapshot(&expected_snapshots, None) 288 | .expect("Failed to create snapshots"); 289 | 290 | let snapshots = zfs 291 | .list_snapshots(root.clone()) 292 | .expect("failed to list snapshots"); 293 | assert_eq!(expected_snapshots, snapshots); 294 | assert_eq!(Ok(true), zfs.exists(expected_snapshots[0].clone())); 295 | 296 | let expected_bookmarks = vec![PathBuf::from(format!("{}/{}#snap-1", zpool, &root_name))]; 297 | 298 | let bookmark_requests: Vec = expected_snapshots 299 | .iter() 300 | .zip(expected_bookmarks.iter()) 301 | .map(|(snapshot, bookmark)| BookmarkRequest::new(snapshot.clone(), bookmark.clone())) 302 | .collect(); 303 | zfs.bookmark(&bookmark_requests) 304 | .expect("Failed to create bookmarks"); 305 | 306 | let bookmarks = zfs 307 | .list_bookmarks(root.clone()) 308 | .expect("failed to list bookmarks"); 309 | assert_eq!(expected_bookmarks, bookmarks); 310 | 311 | zfs.destroy_snapshots(&expected_snapshots, DestroyTiming::RightNow) 312 | .unwrap(); 313 | assert_eq!(Ok(false), zfs.exists(expected_snapshots[0].clone())); 314 | 315 | zfs.destroy_bookmarks(&expected_bookmarks).unwrap(); 316 | let bookmarks = zfs.list_bookmarks(root).expect("failed to list bookmarks"); 317 | assert!(bookmarks.is_empty()) 318 | } 319 | 320 | #[test] 321 | fn read_properties_of_filesystem() { 322 | let zpool = SHARED_ZPOOL.clone(); 323 | let zfs = DelegatingZfsEngine::new().expect("Failed to initialize ZfsLzc"); 324 | let root_name = get_dataset_name(); 325 | let root = PathBuf::from(format!("{}/{}", zpool, &root_name)); 326 | let request = CreateDatasetRequest::builder() 327 | .name(root.clone()) 328 | .kind(DatasetKind::Filesystem) 329 | .copies(Copies::Two) 330 | .snap_dir(SnapDir::Visible) 331 | .build() 332 | .unwrap(); 333 | zfs.create(request) 334 | .expect("Failed to create a root dataset"); 335 | if let Properties::Filesystem(properties) = zfs.read_properties(&root).unwrap() { 336 | assert_eq!(&SnapDir::Visible, properties.snap_dir()); 337 | assert_eq!(&Copies::Two, properties.copies()); 338 | } else { 339 | panic!("Read not fs properties"); 340 | } 341 | } 342 | 343 | #[test] 344 | #[cfg(target_os = "freebsd")] 345 | fn read_properties_of_snapshot_and_bookmark_blessed_os() { 346 | let zpool = SHARED_ZPOOL.clone(); 347 | let zfs = DelegatingZfsEngine::new().expect("Failed to initialize ZfsLzc"); 348 | let root_name = get_dataset_name(); 349 | let root = PathBuf::from(format!("{}/{}", zpool, &root_name)); 350 | let request = CreateDatasetRequest::builder() 351 | .name(root) 352 | .kind(DatasetKind::Filesystem) 353 | .copies(Copies::Two) 354 | .snap_dir(SnapDir::Visible) 355 | .build() 356 | .unwrap(); 357 | zfs.create(request) 358 | .expect("Failed to create a root dataset"); 359 | 360 | let snapshot_name = format!("{}/{}@properties", zpool, &root_name); 361 | 362 | zfs.snapshot(&[PathBuf::from(&snapshot_name)], None) 363 | .expect("Failed to create snapshots"); 364 | 365 | if let Properties::Snapshot(properties) = zfs.read_properties(&snapshot_name).unwrap() { 366 | assert_eq!(&None, properties.clones()); 367 | assert!(properties.volume_mode().is_none()); 368 | 369 | let bookmark_name = format!("{}/{}#properties", zpool, &root_name); 370 | let bookmark_request = 371 | BookmarkRequest::new(PathBuf::from(&snapshot_name), PathBuf::from(&bookmark_name)); 372 | zfs.bookmark(&[bookmark_request]) 373 | .expect("Failed to create snapshots"); 374 | 375 | if let Properties::Bookmark(properties_bookmark) = 376 | zfs.read_properties(&bookmark_name).unwrap() 377 | { 378 | assert_eq!(properties.create_txg(), properties_bookmark.create_txg()); 379 | assert_eq!(properties.creation(), properties_bookmark.creation()); 380 | } else { 381 | panic!("Read wrong properties"); 382 | } 383 | } else { 384 | panic!("Read wrong properties"); 385 | } 386 | } 387 | #[test] 388 | fn read_properties_of_snapshot() { 389 | let zpool = SHARED_ZPOOL.clone(); 390 | let zfs = DelegatingZfsEngine::new().expect("Failed to initialize ZfsLzc"); 391 | let root_name = get_dataset_name(); 392 | let root = PathBuf::from(format!("{}/{}", zpool, &root_name)); 393 | let request = CreateDatasetRequest::builder() 394 | .name(root) 395 | .kind(DatasetKind::Filesystem) 396 | .copies(Copies::Two) 397 | .snap_dir(SnapDir::Visible) 398 | .build() 399 | .unwrap(); 400 | zfs.create(request) 401 | .expect("Failed to create a root dataset"); 402 | 403 | let snapshot_name = format!("{}/{}@properties", zpool, &root_name); 404 | 405 | zfs.snapshot(&[PathBuf::from(&snapshot_name)], None) 406 | .expect("Failed to create snapshots"); 407 | 408 | if let Properties::Snapshot(properties) = zfs.read_properties(&snapshot_name).unwrap() { 409 | assert_eq!(&None, properties.clones()); 410 | 411 | let bookmark_name = format!("{}/{}#properties", zpool, &root_name); 412 | let bookmark_request = 413 | BookmarkRequest::new(PathBuf::from(&snapshot_name), PathBuf::from(&bookmark_name)); 414 | zfs.bookmark(&[bookmark_request]) 415 | .expect("Failed to create snapshots"); 416 | 417 | if let Properties::Bookmark(properties_bookmark) = 418 | zfs.read_properties(&bookmark_name).unwrap() 419 | { 420 | assert_eq!(properties.create_txg(), properties_bookmark.create_txg()); 421 | assert_eq!(properties.creation(), properties_bookmark.creation()); 422 | } else { 423 | panic!("Read wrong properties"); 424 | } 425 | } else { 426 | panic!("Read wrong properties"); 427 | } 428 | } 429 | #[test] 430 | fn read_properties_of_volume() { 431 | let zpool = SHARED_ZPOOL.clone(); 432 | let zfs = DelegatingZfsEngine::new().expect("Failed to initialize ZfsLzc"); 433 | let root_name = get_dataset_name(); 434 | let root = PathBuf::from(format!("{}/{}", zpool, &root_name)); 435 | let request = CreateDatasetRequest::builder() 436 | .name(root.clone()) 437 | .kind(DatasetKind::Volume) 438 | .volume_size(ONE_MB_IN_BYTES) 439 | .build() 440 | .unwrap(); 441 | zfs.create(request) 442 | .expect("Failed to create a root dataset"); 443 | 444 | if let Properties::Volume(properties) = zfs.read_properties(&root).unwrap() { 445 | assert_eq!(&root, properties.name()); 446 | } else { 447 | panic!("Read not fs properties"); 448 | } 449 | } 450 | #[test] 451 | fn send_snapshot() { 452 | let zpool = SHARED_ZPOOL.clone(); 453 | let zfs = DelegatingZfsEngine::new().expect("Failed to initialize ZfsLzc"); 454 | let root_name = get_dataset_name(); 455 | let root = PathBuf::from(format!("{}/{}", zpool, &root_name)); 456 | let request = CreateDatasetRequest::builder() 457 | .name(root) 458 | .kind(DatasetKind::Volume) 459 | .volume_size(ONE_MB_IN_BYTES) 460 | .build() 461 | .unwrap(); 462 | zfs.create(request) 463 | .expect("Failed to create a root dataset"); 464 | 465 | let snapshot_name = format!("{}/{}@tosend", zpool, &root_name); 466 | let snapshot = PathBuf::from(&snapshot_name); 467 | 468 | zfs.snapshot(&[PathBuf::from(&snapshot_name)], None) 469 | .expect("Failed to create snapshots"); 470 | 471 | let tmpfile = tempfile::tempfile().unwrap(); 472 | 473 | zfs.send_full(snapshot, tmpfile, SendFlags::empty()) 474 | .unwrap(); 475 | } 476 | #[test] 477 | fn send_snapshot_incremental() { 478 | let zpool = SHARED_ZPOOL.clone(); 479 | let zfs = DelegatingZfsEngine::new().expect("Failed to initialize ZfsLzc"); 480 | let root_name = get_dataset_name(); 481 | let root = PathBuf::from(format!("{}/{}", zpool, &root_name)); 482 | let request = CreateDatasetRequest::builder() 483 | .name(root) 484 | .kind(DatasetKind::Volume) 485 | .volume_size(ONE_MB_IN_BYTES) 486 | .build() 487 | .unwrap(); 488 | zfs.create(request) 489 | .expect("Failed to create a root dataset"); 490 | 491 | let src_snapshot_name = format!("{}/{}@first", zpool, &root_name); 492 | let src_snapshot = PathBuf::from(&src_snapshot_name); 493 | zfs.snapshot(&[PathBuf::from(&src_snapshot_name)], None) 494 | .expect("Failed to create snapshots"); 495 | 496 | let snapshot_name = format!("{}/{}@tosend", zpool, &root_name); 497 | let snapshot = PathBuf::from(&snapshot_name); 498 | zfs.snapshot(&[PathBuf::from(&snapshot_name)], None) 499 | .expect("Failed to create snapshots"); 500 | 501 | let tmpfile = tempfile::tempfile().unwrap(); 502 | 503 | zfs.send_incremental(snapshot, src_snapshot, tmpfile, SendFlags::empty()) 504 | .unwrap(); 505 | } 506 | --------------------------------------------------------------------------------