├── .gitignore ├── LICENSE.md ├── README.md ├── a_test.go ├── common.c ├── common.go ├── common.h ├── destroy_test.go ├── go.mod ├── sendrecv.go ├── sort.go ├── zfs.c ├── zfs.go ├── zfs.h ├── zfs_test.go ├── zpool.c ├── zpool.go ├── zpool.h ├── zpool_test.go ├── zpool_vdev.c └── zpool_vdev.go /.gitignore: -------------------------------------------------------------------------------- 1 | .gitconfig 2 | *.sublime-* 3 | go-libzfs.test 4 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | Copyright (c) 2015, Faruk Kasumovic 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 10 | * Redistributions in binary form must reproduce the above copyright notice, 11 | this list of conditions and the following disclaimer in the documentation 12 | and/or other materials provided with the distribution. 13 | 14 | * Neither the name of go-libzfs nor the names of its 15 | contributors may be used to endorse or promote products derived from 16 | this software without specific prior written permission. 17 | 18 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 19 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 22 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 25 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 26 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Introduction 2 | 3 | **go-libzfs** currently implements basic manipulation of ZFS pools and data sets. Plan is to add more in further development, improve documentation with more examples, and add more tests. _go-libzfs_ use libzfs C library and does not wrap OpenZFS CLI tools. Goal is to let easy using and manipulating OpenZFS form with in go, and tries to map libzfs C library in to go style package respecting golang common practice. 4 | 5 | ## Note 6 | This golang package is only used and tested on Linux. 7 | 8 | - Version tagged as v0.1 is latest used and compatible with ZFS On Linux version 0.6.5.x 9 | - Version tagged as v0.2 is latest used and compatible with ZFS On Linux version 0.7.x 10 | 11 | [![GoDoc](https://godoc.org/github.com/bicomsystems/go-libzfs?status.svg)](https://godoc.org/github.com/bicomsystems/go-libzfs) 12 | 13 | ## Main features 14 | 15 | - Creating, destroying, importing and exporting pools. 16 | - Reading and modifying pool properties. 17 | - Creating, destroying and renaming of filesystem datasets and volumes. 18 | - Creating, destroying and rollback of snapshots. 19 | - Cloning datasets and volumes. 20 | - Reading and modifying dataset and volume properties. 21 | - Send and receive snapshot streams 22 | 23 | 24 | ## Requirements: 25 | 26 | - OpenZFS on Linux and libzfs with development headers installed. 27 | - Developed using go1.9 28 | 29 | ## Installing 30 | 31 | ```sh 32 | go get github.com/bicomsystems/go-libzfs 33 | ``` 34 | 35 | ## Testing 36 | 37 | ```sh 38 | # On command line shell run 39 | cd $GOPATH/src/github.com/bicomsystems/go-libzfs 40 | go test 41 | ``` 42 | 43 | ## Usage example 44 | 45 | ```go 46 | // Create map to represent ZFS dataset properties. This is equivalent to 47 | // list of properties you can get from ZFS CLI tool, and some more 48 | // internally used by libzfs. 49 | props := make(map[ZFSProp]Property) 50 | 51 | // I choose to create (block) volume 1GiB in size. Size is just ZFS dataset 52 | // property and this is done as map of strings. So, You have to either 53 | // specify size as base 10 number in string, or use strconv package or 54 | // similar to convert in to string (base 10) from numeric type. 55 | strSize := "1073741824" 56 | 57 | props[DatasetPropVolsize] = Property{Value: strSize} 58 | // In addition I explicitly choose some more properties to be set. 59 | props[DatasetPropVolblocksize] = Property{Value: "4096"} 60 | props[DatasetPropReservation] = Property{Value: strSize} 61 | 62 | // Lets create desired volume 63 | d, err := DatasetCreate("TESTPOOL/VOLUME1", DatasetTypeVolume, props) 64 | if err != nil { 65 | println(err.Error()) 66 | return 67 | } 68 | // Dataset have to be closed for memory cleanup 69 | defer d.Close() 70 | 71 | println("Created zfs volume TESTPOOL/VOLUME1") 72 | ``` 73 | 74 | ## Special thanks to 75 | 76 | - [Bicom Systems](http://www.bicomsystems.com) for supporting this little project and that way making it possible. 77 | - [OpenZFS](http://open-zfs.org) as the main ZFS software collective. -------------------------------------------------------------------------------- /a_test.go: -------------------------------------------------------------------------------- 1 | package zfs_test 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | /* ------------------------------------------------------------------------- */ 8 | // TESTS ARE DEPENDED AND MUST RUN IN DEPENDENT ORDER 9 | 10 | func Test(t *testing.T) { 11 | zpoolTestPoolCreate(t) 12 | zpoolTestPoolVDevTree(t) 13 | zpoolTestExport(t) 14 | zpoolTestPoolImportSearch(t) 15 | zpoolTestImport(t) 16 | zpoolTestInitialization(t) 17 | zpoolTestExportForce(t) 18 | zpoolTestImportByGUID(t) 19 | zpoolTestPoolProp(t) 20 | zpoolTestPoolStatusAndState(t) 21 | zpoolTestPoolOpenAll(t) 22 | zpoolTestFailPoolOpen(t) 23 | 24 | zfsTestDatasetCreate(t) 25 | zfsTestDatasetOpen(t) 26 | // zfsTestMountPointConcurrency(t) 27 | // time.Sleep(15 * time.Second) 28 | 29 | zfsTestResumeTokenUnpack(t) 30 | 31 | zfsTestDatasetSnapshot(t) 32 | zfsTestSendSize(t) 33 | zfsTestDatasetOpenAll(t) 34 | zfsTestDatasetSetProperty(t) 35 | zfsTestDatasetHoldRelease(t) 36 | 37 | zfsTestDoubleFreeOnDestroy(t) 38 | zfsTestDatasetDestroy(t) 39 | 40 | zpoolTestPoolDestroy(t) 41 | 42 | cleanupVDisks() 43 | } 44 | -------------------------------------------------------------------------------- /common.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include "common.h" 7 | 8 | libzfs_handle_ptr libzfsHandle; 9 | 10 | int go_libzfs_init() { 11 | libzfsHandle = libzfs_init(); 12 | return 0; 13 | } 14 | 15 | int libzfs_last_error() { 16 | return libzfs_errno(libzfsHandle); 17 | } 18 | 19 | const char *libzfs_last_error_str() { 20 | return libzfs_error_description(libzfsHandle); 21 | } 22 | 23 | int libzfs_clear_last_error() { 24 | zfs_standard_error(libzfsHandle, EZFS_SUCCESS, "success"); 25 | return 0; 26 | } 27 | 28 | property_list_t *new_property_list() { 29 | property_list_t *r = malloc(sizeof(property_list_t)); 30 | memset(r, 0, sizeof(property_list_t)); 31 | return r; 32 | } 33 | 34 | void free_properties(property_list_t *root) { 35 | property_list_t *tmp = 0; 36 | while(root) { 37 | tmp = root->pnext; 38 | free(root); 39 | root = tmp; 40 | } 41 | } 42 | 43 | nvlist_ptr new_property_nvlist() { 44 | nvlist_ptr props = NULL; 45 | int r = nvlist_alloc(&props, NV_UNIQUE_NAME, 0); 46 | if ( r != 0 ) { 47 | return NULL; 48 | } 49 | return props; 50 | } 51 | 52 | int property_nvlist_add(nvlist_ptr list, const char *prop, const char *value) { 53 | return nvlist_add_string(list, prop, value); 54 | } 55 | 56 | int redirect_libzfs_stdout(int to) { 57 | int save, res; 58 | save = dup(STDOUT_FILENO); 59 | if (save < 0) { 60 | return save; 61 | } 62 | res = dup2(to, STDOUT_FILENO); 63 | if (res < 0) { 64 | return res; 65 | } 66 | return save; 67 | } 68 | 69 | int restore_libzfs_stdout(int saved) { 70 | int res; 71 | fflush(stdout); 72 | res = dup2(saved, STDOUT_FILENO); 73 | if (res < 0) { 74 | return res; 75 | } 76 | close(saved); 77 | } 78 | -------------------------------------------------------------------------------- /common.go: -------------------------------------------------------------------------------- 1 | // Package zfs implements basic manipulation of ZFS pools and data sets. 2 | // Use libzfs C library instead CLI zfs tools, with goal 3 | // to let using and manipulating OpenZFS form with in go project. 4 | // 5 | // TODO: Adding to the pool. (Add the given vdevs to the pool) 6 | // TODO: Scan for pools. 7 | // 8 | // 9 | package zfs 10 | 11 | /* 12 | #cgo CFLAGS: -I /usr/include/libzfs -I /usr/include/libspl -DHAVE_IOCTL_IN_SYS_IOCTL_H -D_GNU_SOURCE 13 | #cgo LDFLAGS: -lzfs -lzpool -lnvpair 14 | 15 | #include 16 | #include 17 | #include "common.h" 18 | #include "zpool.h" 19 | #include "zfs.h" 20 | */ 21 | import "C" 22 | 23 | import ( 24 | "errors" 25 | "sync" 26 | ) 27 | 28 | // VDevType type of device in the pool 29 | type VDevType string 30 | 31 | func init() { 32 | C.go_libzfs_init() 33 | return 34 | } 35 | 36 | // Types of Virtual Devices 37 | const ( 38 | VDevTypeRoot VDevType = "root" // VDevTypeRoot root device in ZFS pool 39 | VDevTypeMirror = "mirror" // VDevTypeMirror mirror device in ZFS pool 40 | VDevTypeReplacing = "replacing" // VDevTypeReplacing replacing 41 | VDevTypeRaidz = "raidz" // VDevTypeRaidz RAIDZ device 42 | VDevTypeDisk = "disk" // VDevTypeDisk device is disk 43 | VDevTypeFile = "file" // VDevTypeFile device is file 44 | VDevTypeMissing = "missing" // VDevTypeMissing missing device 45 | VDevTypeHole = "hole" // VDevTypeHole hole 46 | VDevTypeSpare = "spare" // VDevTypeSpare spare device 47 | VDevTypeLog = "log" // VDevTypeLog ZIL device 48 | VDevTypeL2cache = "l2cache" // VDevTypeL2cache cache device (disk) 49 | ) 50 | 51 | // Prop type to enumerate all different properties suppoerted by ZFS 52 | type Prop int 53 | 54 | // PoolStatus type representing status of the pool 55 | type PoolStatus int 56 | 57 | // PoolState type representing pool state 58 | type PoolState uint64 59 | 60 | // VDevState - vdev states tye 61 | type VDevState uint64 62 | 63 | // VDevAux - vdev aux states 64 | type VDevAux uint64 65 | 66 | // Property ZFS pool or dataset property value 67 | type Property struct { 68 | Value string 69 | Source string 70 | } 71 | 72 | var Global struct { 73 | Mtx sync.Mutex 74 | } 75 | 76 | // Pool status 77 | const ( 78 | /* 79 | * The following correspond to faults as defined in the (fault.fs.zfs.*) 80 | * event namespace. Each is associated with a corresponding message ID. 81 | */ 82 | PoolStatusCorruptCache PoolStatus = iota /* corrupt /kernel/drv/zpool.cache */ 83 | PoolStatusMissingDevR /* missing device with replicas */ 84 | PoolStatusMissingDevNr /* missing device with no replicas */ 85 | PoolStatusCorruptLabelR /* bad device label with replicas */ 86 | PoolStatusCorruptLabelNr /* bad device label with no replicas */ 87 | PoolStatusBadGUIDSum /* sum of device guids didn't match */ 88 | PoolStatusCorruptPool /* pool metadata is corrupted */ 89 | PoolStatusCorruptData /* data errors in user (meta)data */ 90 | PoolStatusFailingDev /* device experiencing errors */ 91 | PoolStatusVersionNewer /* newer on-disk version */ 92 | PoolStatusHostidMismatch /* last accessed by another system */ 93 | PoolStatusHosidActive /* currently active on another system */ 94 | PoolStatusHostidRequired /* multihost=on and hostid=0 */ 95 | PoolStatusIoFailureWait /* failed I/O, failmode 'wait' */ 96 | PoolStatusIoFailureContinue /* failed I/O, failmode 'continue' */ 97 | PoolStatusIOFailureMap /* ailed MMP, failmode not 'panic' */ 98 | PoolStatusBadLog /* cannot read log chain(s) */ 99 | PoolStatusErrata /* informational errata available */ 100 | 101 | /* 102 | * If the pool has unsupported features but can still be opened in 103 | * read-only mode, its status is ZPOOL_STATUS_UNSUP_FEAT_WRITE. If the 104 | * pool has unsupported features but cannot be opened at all, its 105 | * status is ZPOOL_STATUS_UNSUP_FEAT_READ. 106 | */ 107 | PoolStatusUnsupFeatRead /* unsupported features for read */ 108 | PoolStatusUnsupFeatWrite /* unsupported features for write */ 109 | 110 | /* 111 | * These faults have no corresponding message ID. At the time we are 112 | * checking the status, the original reason for the FMA fault (I/O or 113 | * checksum errors) has been lost. 114 | */ 115 | PoolStatusFaultedDevR /* faulted device with replicas */ 116 | PoolStatusFaultedDevNr /* faulted device with no replicas */ 117 | 118 | /* 119 | * The following are not faults per se, but still an error possibly 120 | * requiring administrative attention. There is no corresponding 121 | * message ID. 122 | */ 123 | PoolStatusVersionOlder /* older legacy on-disk version */ 124 | PoolStatusFeatDisabled /* supported features are disabled */ 125 | PoolStatusResilvering /* device being resilvered */ 126 | PoolStatusOfflineDev /* device online */ 127 | PoolStatusRemovedDev /* removed device */ 128 | 129 | /* 130 | * Finally, the following indicates a healthy pool. 131 | */ 132 | PoolStatusOk 133 | ) 134 | 135 | // Possible ZFS pool states 136 | const ( 137 | PoolStateActive PoolState = iota /* In active use */ 138 | PoolStateExported /* Explicitly exported */ 139 | PoolStateDestroyed /* Explicitly destroyed */ 140 | PoolStateSpare /* Reserved for hot spare use */ 141 | PoolStateL2cache /* Level 2 ARC device */ 142 | PoolStateUninitialized /* Internal spa_t state */ 143 | PoolStateUnavail /* Internal libzfs state */ 144 | PoolStatePotentiallyActive /* Internal libzfs state */ 145 | ) 146 | 147 | // Pool properties. Enumerates available ZFS pool properties. Use it to access 148 | // pool properties either to read or set soecific property. 149 | const ( 150 | PoolPropCont Prop = iota - 2 151 | PoolPropInval 152 | PoolPropName 153 | PoolPropSize 154 | PoolPropCapacity 155 | PoolPropAltroot 156 | PoolPropHealth 157 | PoolPropGUID 158 | PoolPropVersion 159 | PoolPropBootfs 160 | PoolPropDelegation 161 | PoolPropAutoreplace 162 | PoolPropCachefile 163 | PoolPropFailuremode 164 | PoolPropListsnaps 165 | PoolPropAutoexpand 166 | PoolPropDedupditto 167 | PoolPropDedupratio 168 | PoolPropFree 169 | PoolPropAllocated 170 | PoolPropReadonly 171 | PoolPropAshift 172 | PoolPropComment 173 | PoolPropExpandsz 174 | PoolPropFreeing 175 | PoolPropFragmentaion 176 | PoolPropLeaked 177 | PoolPropMaxBlockSize 178 | PoolPropTName 179 | PoolPropMaxNodeSize 180 | PoolPropMultiHost 181 | PoolPropCheckpoint 182 | PoolPropLoadGuid 183 | PoolPropAutotrim 184 | PoolNumProps 185 | ) 186 | 187 | /* 188 | * Dataset properties are identified by these constants and must be added to 189 | * the end of this list to ensure that external consumers are not affected 190 | * by the change. If you make any changes to this list, be sure to update 191 | * the property table in module/zcommon/zfs_prop.c. 192 | */ 193 | const ( 194 | DatasetPropCont Prop = iota - 2 195 | DatasetPropBad 196 | DatasetPropType 197 | DatasetPropCreation 198 | DatasetPropUsed 199 | DatasetPropAvailable 200 | DatasetPropReferenced 201 | DatasetPropCompressratio 202 | DatasetPropMounted 203 | DatasetPropOrigin 204 | DatasetPropQuota 205 | DatasetPropReservation 206 | DatasetPropVolsize 207 | DatasetPropVolblocksize 208 | DatasetPropRecordsize 209 | DatasetPropMountpoint 210 | DatasetPropSharenfs 211 | DatasetPropChecksum 212 | DatasetPropCompression 213 | DatasetPropAtime 214 | DatasetPropDevices 215 | DatasetPropExec 216 | DatasetPropSetuid 217 | DatasetPropReadonly 218 | DatasetPropZoned 219 | DatasetPropSnapdir 220 | DatasetPropPrivate /* not exposed to user, temporary */ 221 | DatasetPropAclinherit 222 | DatasetPropCreateTXG /* not exposed to the user */ 223 | DatasetPropName /* not exposed to the user */ 224 | DatasetPropCanmount 225 | DatasetPropIscsioptions /* not exposed to the user */ 226 | DatasetPropXattr 227 | DatasetPropNumclones /* not exposed to the user */ 228 | DatasetPropCopies 229 | DatasetPropVersion 230 | DatasetPropUtf8only 231 | DatasetPropNormalize 232 | DatasetPropCase 233 | DatasetPropVscan 234 | DatasetPropNbmand 235 | DatasetPropSharesmb 236 | DatasetPropRefquota 237 | DatasetPropRefreservation 238 | DatasetPropGUID 239 | DatasetPropPrimarycache 240 | DatasetPropSecondarycache 241 | DatasetPropUsedsnap 242 | DatasetPropUsedds 243 | DatasetPropUsedchild 244 | DatasetPropUsedrefreserv 245 | DatasetPropUseraccounting /* not exposed to the user */ 246 | DatasetPropStmfShareinfo /* not exposed to the user */ 247 | DatasetPropDeferDestroy 248 | DatasetPropUserrefs 249 | DatasetPropLogbias 250 | DatasetPropUnique /* not exposed to the user */ 251 | DatasetPropObjsetid /* not exposed to the user */ 252 | DatasetPropDedup 253 | DatasetPropMlslabel 254 | DatasetPropSync 255 | DatasetPropDnodeSize 256 | DatasetPropRefratio 257 | DatasetPropWritten 258 | DatasetPropClones 259 | DatasetPropLogicalused 260 | DatasetPropLogicalreferenced 261 | DatasetPropInconsistent /* not exposed to the user */ 262 | DatasetPropVolmode 263 | DatasetPropFilesystemLimit 264 | DatasetPropSnapshotLimit 265 | DatasetPropFilesystemCount 266 | DatasetPropSnapshotCount 267 | DatasetPropSnapdev 268 | DatasetPropAcltype 269 | DatasetPropSelinuxContext 270 | DatasetPropSelinuxFsContext 271 | DatasetPropSelinuxDefContext 272 | DatasetPropSelinuxRootContext 273 | DatasetPropRelatime 274 | DatasetPropRedundantMetadata 275 | DatasetPropOverlay 276 | DatasetPropPrevSnap 277 | DatasetPropReceiveResumeToken 278 | DatasetPropEncryption 279 | DatasetPropKeyLocation 280 | DatasetPropKeyFormat 281 | DatasetPropPBKDF2Salt 282 | DatasetPropPBKDF2Iters 283 | DatasetPropEncryptionRoot 284 | DatasetPropKeyGUID 285 | DatasetPropKeyStatus 286 | DatasetPropRemapTXG /* not exposed to the user */ 287 | DatasetNumProps 288 | ) 289 | 290 | // LastError get last underlying libzfs error description if any 291 | func LastError() (err error) { 292 | return errors.New(C.GoString(C.libzfs_last_error_str())) 293 | } 294 | 295 | // ClearLastError force clear of any last error set by undeliying libzfs 296 | func ClearLastError() (err error) { 297 | err = LastError() 298 | C.libzfs_clear_last_error() 299 | return 300 | } 301 | 302 | func booleanT(b bool) (r C.boolean_t) { 303 | if b { 304 | return 1 305 | } 306 | return 0 307 | } 308 | 309 | // ZFS errors 310 | const ( 311 | ESuccess = 0 /* no error -- success */ 312 | ENomem = 2000 + iota - 1 /* out of memory */ 313 | EBadprop /* invalid property value */ 314 | EPropreadonly /* cannot set readonly property */ 315 | EProptype /* property does not apply to dataset type */ 316 | EPropnoninherit /* property is not inheritable */ 317 | EPropspace /* bad quota or reservation */ 318 | EBadtype /* dataset is not of appropriate type */ 319 | EBusy /* pool or dataset is busy */ 320 | EExists /* pool or dataset already exists */ 321 | ENoent /* no such pool or dataset */ 322 | EBadstream /* bad backup stream */ 323 | EDsreadonly /* dataset is readonly */ 324 | EVoltoobig /* volume is too large for 32-bit system */ 325 | EInvalidname /* invalid dataset name */ 326 | EBadrestore /* unable to restore to destination */ 327 | EBadbackup /* backup failed */ 328 | EBadtarget /* bad attach/detach/replace target */ 329 | ENodevice /* no such device in pool */ 330 | EBaddev /* invalid device to add */ 331 | ENoreplicas /* no valid replicas */ 332 | EResilvering /* currently resilvering */ 333 | EBadversion /* unsupported version */ 334 | EPoolunavail /* pool is currently unavailable */ 335 | EDevoverflow /* too many devices in one vdev */ 336 | EBadpath /* must be an absolute path */ 337 | ECrosstarget /* rename or clone across pool or dataset */ 338 | EZoned /* used improperly in local zone */ 339 | EMountfailed /* failed to mount dataset */ 340 | EUmountfailed /* failed to unmount dataset */ 341 | EUnsharenfsfailed /* unshare(1M) failed */ 342 | ESharenfsfailed /* share(1M) failed */ 343 | EPerm /* permission denied */ 344 | ENospc /* out of space */ 345 | EFault /* bad address */ 346 | EIo /* I/O error */ 347 | EIntr /* signal received */ 348 | EIsspare /* device is a hot spare */ 349 | EInvalconfig /* invalid vdev configuration */ 350 | ERecursive /* recursive dependency */ 351 | ENohistory /* no history object */ 352 | EPoolprops /* couldn't retrieve pool props */ 353 | EPoolNotsup /* ops not supported for this type of pool */ 354 | EPoolInvalarg /* invalid argument for this pool operation */ 355 | ENametoolong /* dataset name is too long */ 356 | EOpenfailed /* open of device failed */ 357 | ENocap /* couldn't get capacity */ 358 | ELabelfailed /* write of label failed */ 359 | EBadwho /* invalid permission who */ 360 | EBadperm /* invalid permission */ 361 | EBadpermset /* invalid permission set name */ 362 | ENodelegation /* delegated administration is disabled */ 363 | EUnsharesmbfailed /* failed to unshare over smb */ 364 | ESharesmbfailed /* failed to share over smb */ 365 | EBadcache /* bad cache file */ 366 | EIsl2CACHE /* device is for the level 2 ARC */ 367 | EVdevnotsup /* unsupported vdev type */ 368 | ENotsup /* ops not supported on this dataset */ 369 | EActiveSpare /* pool has active shared spare devices */ 370 | EUnplayedLogs /* log device has unplayed logs */ 371 | EReftagRele /* snapshot release: tag not found */ 372 | EReftagHold /* snapshot hold: tag already exists */ 373 | ETagtoolong /* snapshot hold/rele: tag too long */ 374 | EPipefailed /* pipe create failed */ 375 | EThreadcreatefailed /* thread create failed */ 376 | EPostsplitOnline /* onlining a disk after splitting it */ 377 | EScrubbing /* currently scrubbing */ 378 | ENoScrub /* no active scrub */ 379 | EDiff /* general failure of zfs diff */ 380 | EDiffdata /* bad zfs diff data */ 381 | EPoolreadonly /* pool is in read-only mode */ 382 | EScrubpaused /* scrub currently paused */ 383 | EActivepool /* pool is imported on a different system */ 384 | ECryptofailed /* failed to setup encryption */ 385 | ENopending /* cannot cancel, no operation is pending */ 386 | ECheckpointExists /* checkpoint exists */ 387 | EDiscardingCheckpoint /* currently discarding a checkpoint */ 388 | ENoCheckpoint /* pool has no checkpoint */ 389 | EDevrmInProgress /* a device is currently being removed */ 390 | EVdevTooBig /* a device is too big to be used */ 391 | EIocNotsupported /* operation not supported by zfs module */ 392 | EToomany /* argument list too long */ 393 | EInitializing /* currently initializing */ 394 | ENoInitialize /* no active initialize */ 395 | EWrongParent /* invalid parent dataset (e.g ZVOL) */ 396 | ETrimming /* currently trimming */ 397 | ENoTrim /* no active trim */ 398 | ETrimNotsup /* device does not support trim */ 399 | ENoResilverDefer /* pool doesn't support resilver_defer */ 400 | EExportInProgress /* currently exporting the pool */ 401 | EUnknown 402 | ) 403 | 404 | // vdev states are ordered from least to most healthy. 405 | // A vdev that's VDevStateCantOpen or below is considered unusable. 406 | const ( 407 | VDevStateUnknown VDevState = iota // Uninitialized vdev 408 | VDevStateClosed // Not currently open 409 | VDevStateOffline // Not allowed to open 410 | VDevStateRemoved // Explicitly removed from system 411 | VDevStateCantOpen // Tried to open, but failed 412 | VDevStateFaulted // External request to fault device 413 | VDevStateDegraded // Replicated vdev with unhealthy kids 414 | VDevStateHealthy // Presumed good 415 | ) 416 | 417 | // vdev aux states. When a vdev is in the VDevStateCantOpen state, the aux field 418 | // of the vdev stats structure uses these constants to distinguish why. 419 | const ( 420 | VDevAuxNone VDevAux = iota // no error 421 | VDevAuxOpenFailed // ldi_open_*() or vn_open() failed 422 | VDevAuxCorruptData // bad label or disk contents 423 | VDevAuxNoReplicas // insufficient number of replicas 424 | VDevAuxBadGUIDSum // vdev guid sum doesn't match 425 | VDevAuxTooSmall // vdev size is too small 426 | VDevAuxBadLabel // the label is OK but invalid 427 | VDevAuxVersionNewer // on-disk version is too new 428 | VDevAuxVersionOlder // on-disk version is too old 429 | VDevAuxUnsupFeat // unsupported features 430 | VDevAuxSpared // hot spare used in another pool 431 | VDevAuxErrExceeded // too many errors 432 | VDevAuxIOFailure // experienced I/O failure 433 | VDevAuxBadLog // cannot read log chain(s) 434 | VDevAuxExternal // external diagnosis 435 | VDevAuxSplitPool // vdev was split off into another pool 436 | ) 437 | 438 | // status strings used by the zfs CLI when reporting zpool status. 439 | // These make it easier for users of this library to report status. 440 | const ( 441 | MsgPoolStatusMissingDevR = `One or more devices could not be opened. Sufficient replicas exist for the pool to continue functioning in a degraded state.` 442 | MsgPoolStatusMissingDevNr = `One or more devices could not be opened. There are insufficient replicas for the pool to continue functioning.` 443 | MsgPoolStatusCorruptLabelR = `One or more devices could not be used because the label is missing or invalid. Sufficient replicas exist for the pool to continue functioning in a degraded state.` 444 | MsgPoolStatusCorruptLabelNr = `One or more devices could not be used because the label is missing or invalid. There are insufficient replicas for the pool to continue functioning.` 445 | MsgPoolStatusCorruptPool = `The pool metadata is corrupted and the pool cannot be opened.` 446 | MsgPoolStatusCorruptData = `One or more devices has experienced an error resulting in data corruption. Applications may be affected.` 447 | MsgPoolStatusFailingDev = `One or more devices has experienced an unrecoverable error. An attempt was made to correct the error. Applications are unaffected.` 448 | MsgPoolStatusOfflineDev = `One or more devices has been taken offline by the administrator. Sufficient replicas exist for the pool to continue functioning in a degraded state.` 449 | MsgPoolStatusRemovedDev = `One or more devices has been removed by the administrator. Sufficient replicas exist for the pool to continue functioning in a degraded state.` 450 | MsgPoolStatusResilvering = `One or more devices is currently being resilvered. The pool will continue to function, possibly in a degraded state.` 451 | MsgPoolStatusRebuilding = MsgPoolStatusResilvering 452 | MsgPoolStatusVersionNewer = `The pool has been upgraded to a newer, incompatible on-disk version. The pool cannot be accessed on this system.` 453 | MsgPoolStatusVersionOlder = `The pool is formatted using a legacy on-disk format. The pool can still be used, but some features are unavailable.` 454 | MsgPoolStatusFeatDisabled = `Some supported and requested features are not enabled on the pool. The pool can still be used, but some features are unavailable.` 455 | MsgPoolStatusUnsupFeatRead = `The pool cannot be accessed on this system because it uses feature(s) not supported on this system.` 456 | MsgPoolStatusUnsupFeatWrite = `The pool can only be accessed in read-only mode on this system. It cannot be accessed in read-write mode because it uses feature(s) not supported on this system.` 457 | MsgPoolStatusFaultedDevR = `One or more devices are faulted in response to persistent errors. Sufficient replicas exist for the pool to continue functioning in a degraded state.` 458 | MsgPoolStatusFaultedDevNr = `One or more devices are faulted in response to persistent errors. There are insufficient replicas for the pool to continue functioning.` 459 | MsgPoolStatusHostidMismatch = `Mismatch between pool hostid and system hostid on imported pool. This pool was previously imported into a system with a different hostid, and then was verbatim imported into this system.` 460 | MsgPoolStatusHosidActive = `The pool is currently imported by another system.` 461 | MsgPoolStatusHostidRequired = `The pool has the multihost property on. It cannot be safely imported when the system hostid is not set.` 462 | MsgPoolStatusIoFailureWait = `One or more devices are faulted in response to IO failures.` 463 | MsgPoolStatusIoFailureContinue = MsgPoolStatusIoFailureWait 464 | MsgPoolStatusIOFailureMap = `The pool is suspended because multihost writes failed or were delayed; another system could import the pool undetected.` 465 | MsgPoolStatusBadLog = `An intent log record could not be read. Waiting for administrator intervention to fix the faulted pool.` 466 | MsgPoolStatusErrata = `Errate detected.` 467 | ) 468 | 469 | // action strings 470 | const ( 471 | ActionPoolStatusMissingDevR = `Attach the missing device and online it using 'zpool online'.` 472 | ActionPoolStatusMissingDevNr = `Attach the missing device and online it using 'zpool online'.` 473 | ActionPoolStatusCorruptLabelR = `Replace the device using 'zpool replace'.` 474 | ActionPoolStatusCorruptLabelNr = `` 475 | ActionPoolStatusCorruptPool = `` 476 | ActionPoolStatusCorruptData = `Restore the file in question if possible. Otherwise restore the entire pool from backup.` 477 | ActionPoolStatusFailingDev = `Determine if the device needs to be replaced, and clear the errors using 'zpool clear' or replace the device with 'zpool replace'.` 478 | ActionPoolStatusOfflineDev = `Online the device using 'zpool online' or replace the device with 'zpool replace'.` 479 | ActionPoolStatusRemovedDev = `Online the device using zpool online' or replace the device with 'zpool replace'.` 480 | ActionPoolStatusResilvering = `Wait for the resilver to complete.` 481 | ActionPoolStatusRebuilding = ActionPoolStatusResilvering 482 | ActionPoolStatusVersionNewer = `Access the pool from a system running more recent software, or restore the pool from backup.` 483 | ActionPoolStatusVersionOlder = `Upgrade the pool using 'zpool upgrade'. Once this is done, the pool will no longer be accessible on software that does not support feature flags.` 484 | ActionPoolStatusFeatDisabled = `Enable all features using 'zpool upgrade'. Once this is done, the pool may no longer be accessible by software that does not support the features. See zpool-features(7) for details.` 485 | ActionPoolStatusUnsupFeatRead = `Access the pool from a system that supports the required feature(s), or restore the pool from backup.` 486 | ActionPoolStatusUnsupFeatWrite = `The pool cannot be accessed in read-write mode. Import the pool with "-o readonly=on", access the pool from a system that supports the required feature(s), or restore the pool from backup.` 487 | ActionPoolStatusFaultedDevR = `Replace the faulted device, or use 'zpool clear' to mark the device repaired.` 488 | ActionPoolStatusFaultedDevNr = `Destroy and re-create the pool from a backup source. Manually marking the device repaired using 'zpool clear' may allow some data to be recovered.` 489 | ActionPoolStatusHostidMismatch = `Export this pool on all systems on which it is imported. Then import it to correct the mismatch.` 490 | ActionPoolStatusHosidActive = `` 491 | ActionPoolStatusHostidRequired = `` 492 | ActionPoolStatusIoFailureWait = `Make sure the affected devices are connected, then run 'zpool clear'.` 493 | ActionPoolStatusIoFailureContinue = ActionPoolStatusIoFailureWait 494 | ActionPoolStatusIOFailureMap = `Make sure the pool's devices are connected, then reboot your system and import the pool.` 495 | ActionPoolStatusBadLog = `Either restore the affected device(s) and run 'zpool online', or ignore the intent log records by running 'zpool clear'.` 496 | ActionPoolStatusErrata = `` 497 | ) 498 | 499 | // GetStatusMessages get the status and action message for a given PoolStatus. 500 | // If none is available, return "" for each. 501 | func GetStatusMessages(status PoolStatus) (msg, action string) { 502 | switch status { 503 | case PoolStatusCorruptCache: 504 | // no msg or status for this, but leaving blank as a sign for later 505 | case PoolStatusMissingDevNr: 506 | msg = MsgPoolStatusMissingDevNr 507 | action = ActionPoolStatusMissingDevNr 508 | case PoolStatusCorruptLabelR: 509 | msg = MsgPoolStatusCorruptLabelR 510 | action = ActionPoolStatusCorruptLabelR 511 | case PoolStatusCorruptLabelNr: 512 | msg = MsgPoolStatusCorruptLabelNr 513 | action = ActionPoolStatusCorruptLabelNr 514 | case PoolStatusBadGUIDSum: 515 | // no msg or status for this, but leaving blank as a sign for later 516 | case PoolStatusCorruptPool: 517 | msg = MsgPoolStatusCorruptPool 518 | action = ActionPoolStatusCorruptPool 519 | case PoolStatusCorruptData: 520 | msg = MsgPoolStatusCorruptData 521 | action = ActionPoolStatusCorruptData 522 | case PoolStatusFailingDev: 523 | msg = MsgPoolStatusFailingDev 524 | action = ActionPoolStatusFailingDev 525 | case PoolStatusVersionNewer: 526 | msg = MsgPoolStatusVersionNewer 527 | action = ActionPoolStatusVersionNewer 528 | case PoolStatusHostidMismatch: 529 | msg = MsgPoolStatusHostidMismatch 530 | action = ActionPoolStatusHostidMismatch 531 | case PoolStatusHosidActive: 532 | msg = MsgPoolStatusHosidActive 533 | action = ActionPoolStatusHosidActive 534 | case PoolStatusHostidRequired: 535 | msg = MsgPoolStatusHostidRequired 536 | action = ActionPoolStatusHostidRequired 537 | case PoolStatusIoFailureWait: 538 | msg = MsgPoolStatusIoFailureWait 539 | action = ActionPoolStatusIoFailureWait 540 | case PoolStatusIoFailureContinue: 541 | msg = MsgPoolStatusIoFailureContinue 542 | action = ActionPoolStatusIoFailureContinue 543 | case PoolStatusIOFailureMap: 544 | msg = MsgPoolStatusIOFailureMap 545 | action = ActionPoolStatusIOFailureMap 546 | case PoolStatusBadLog: 547 | msg = MsgPoolStatusBadLog 548 | action = ActionPoolStatusBadLog 549 | case PoolStatusErrata: 550 | msg = MsgPoolStatusErrata 551 | action = ActionPoolStatusErrata 552 | case PoolStatusUnsupFeatRead: 553 | msg = MsgPoolStatusUnsupFeatRead 554 | action = ActionPoolStatusUnsupFeatRead 555 | case PoolStatusUnsupFeatWrite: 556 | msg = MsgPoolStatusUnsupFeatWrite 557 | action = ActionPoolStatusUnsupFeatWrite 558 | case PoolStatusFaultedDevR: 559 | msg = MsgPoolStatusFaultedDevR 560 | action = ActionPoolStatusFaultedDevR 561 | case PoolStatusFaultedDevNr: 562 | msg = MsgPoolStatusFaultedDevNr 563 | action = ActionPoolStatusFaultedDevNr 564 | case PoolStatusVersionOlder: 565 | msg = MsgPoolStatusVersionOlder 566 | action = ActionPoolStatusVersionOlder 567 | case PoolStatusFeatDisabled: 568 | msg = MsgPoolStatusFeatDisabled 569 | action = ActionPoolStatusFeatDisabled 570 | case PoolStatusResilvering: 571 | msg = MsgPoolStatusResilvering 572 | action = ActionPoolStatusResilvering 573 | case PoolStatusOfflineDev: 574 | msg = MsgPoolStatusOfflineDev 575 | action = ActionPoolStatusOfflineDev 576 | case PoolStatusRemovedDev: 577 | msg = MsgPoolStatusRemovedDev 578 | action = ActionPoolStatusRemovedDev 579 | case PoolStatusOk: 580 | msg = "" 581 | action = "" 582 | } 583 | return msg, action 584 | } 585 | -------------------------------------------------------------------------------- /common.h: -------------------------------------------------------------------------------- 1 | /* C wrappers around some zfs calls and C in general that should simplify 2 | * using libzfs from go language, make go code shorter and more readable. 3 | */ 4 | 5 | #ifndef loff_t 6 | #define loff_t off_t 7 | #endif 8 | #define INT_MAX_NAME 256 9 | #define INT_MAX_VALUE 1024 10 | #define ZAP_OLDMAXVALUELEN 1024 11 | #define ZFS_MAX_DATASET_NAME_LEN 256 12 | 13 | typedef struct property_list { 14 | char value[INT_MAX_VALUE]; 15 | char source[ZFS_MAX_DATASET_NAME_LEN]; 16 | int property; 17 | void *pnext; 18 | } property_list_t; 19 | 20 | typedef struct libzfs_handle* libzfs_handle_ptr; 21 | typedef struct nvlist* nvlist_ptr; 22 | typedef struct property_list *property_list_ptr; 23 | typedef struct nvpair* nvpair_ptr; 24 | typedef struct vdev_stat* vdev_stat_ptr; 25 | typedef char* char_ptr; 26 | 27 | extern libzfs_handle_ptr libzfsHandle; 28 | 29 | int go_libzfs_init(); 30 | 31 | int libzfs_last_error(); 32 | const char *libzfs_last_error_str(); 33 | int libzfs_clear_last_error(); 34 | 35 | property_list_t *new_property_list(); 36 | void free_properties(property_list_t *root); 37 | 38 | nvlist_ptr new_property_nvlist(); 39 | int property_nvlist_add(nvlist_ptr ptr, const char* prop, const char *value); 40 | 41 | int redirect_libzfs_stdout(int to); 42 | int restore_libzfs_stdout(int saved); 43 | 44 | -------------------------------------------------------------------------------- /destroy_test.go: -------------------------------------------------------------------------------- 1 | package zfs_test 2 | 3 | import ( 4 | "testing" 5 | 6 | zfs "github.com/bicomsystems/go-libzfs" 7 | ) 8 | 9 | func TestDataset_DestroyPromote(t *testing.T) { 10 | zpoolTestPoolCreate(t) 11 | // defer zpoolTestPoolDestroy(t) 12 | var c1, c2 zfs.Dataset 13 | 14 | props := make(map[zfs.Prop]zfs.Property) 15 | 16 | d, err := zfs.DatasetCreate(TSTPoolName+"/original", 17 | zfs.DatasetTypeFilesystem, make(map[zfs.Prop]zfs.Property)) 18 | if err != nil { 19 | t.Errorf("DatasetCreate(\"%s/original\") error: %v", TSTPoolName, err) 20 | return 21 | } 22 | 23 | s1, _ := zfs.DatasetSnapshot(d.Properties[zfs.DatasetPropName].Value+"@snap2", false, props) 24 | s2, _ := zfs.DatasetSnapshot(d.Properties[zfs.DatasetPropName].Value+"@snap1", false, props) 25 | 26 | c1, err = s1.Clone(TSTPoolName+"/clone1", nil) 27 | if err != nil { 28 | t.Errorf("d.Clone(\"%s/clone1\", props)) error: %v", TSTPoolName, err) 29 | d.Close() 30 | return 31 | } 32 | 33 | zfs.DatasetSnapshot(c1.Properties[zfs.DatasetPropName].Value+"@snap1", false, props) 34 | 35 | c2, err = s2.Clone(TSTPoolName+"/clone2", nil) 36 | if err != nil { 37 | t.Errorf("c1.Clone(\"%s/clone1\", props)) error: %v", TSTPoolName, err) 38 | d.Close() 39 | c1.Close() 40 | return 41 | } 42 | s2.Close() 43 | 44 | zfs.DatasetSnapshot(c2.Properties[zfs.DatasetPropName].Value+"@snap0", false, props) 45 | c1.Close() 46 | c2.Close() 47 | 48 | // reopen pool 49 | d.Close() 50 | if d, err = zfs.DatasetOpen(TSTPoolName + "/original"); err != nil { 51 | t.Error("zfs.DatasetOpen") 52 | return 53 | } 54 | 55 | if err = d.DestroyPromote(); err != nil { 56 | t.Errorf("DestroyPromote error: %v", err) 57 | d.Close() 58 | return 59 | } 60 | t.Log("Destroy promote completed with success") 61 | d.Close() 62 | zpoolTestPoolDestroy(t) 63 | cleanupVDisks() 64 | } 65 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/bicomsystems/go-libzfs 2 | 3 | go 1.13 4 | -------------------------------------------------------------------------------- /sendrecv.go: -------------------------------------------------------------------------------- 1 | package zfs 2 | 3 | // #include 4 | // #include 5 | // #include "common.h" 6 | // #include "zpool.h" 7 | // #include "zfs.h" 8 | // #include 9 | // #include 10 | import "C" 11 | import ( 12 | "fmt" 13 | "io/ioutil" 14 | "os" 15 | "regexp" 16 | "strconv" 17 | "strings" 18 | "time" 19 | "unsafe" 20 | ) 21 | 22 | // SendFlags send flags 23 | type SendFlags struct { 24 | Verbose bool // -v 25 | Replicate bool // -R 26 | DoAll bool // -I 27 | FromOrigin bool // -o 28 | Dedup bool // -D 29 | Props bool // -p 30 | DryRun bool // -n 31 | Parsable bool // -P 32 | Progress bool // show progress (ie. -v) 33 | LargeBlock bool // -L 34 | EmbedData bool // -e 35 | Compress bool // -c 36 | Raw bool // raw encrypted records are permitted 37 | Backup bool // only send received properties (ie. -b) 38 | Holds bool // include snapshot holds in send stream 39 | } 40 | 41 | // RecvFlags receive flags 42 | type RecvFlags struct { 43 | Verbose bool // -v 44 | IsPrefix bool // -d 45 | IsTail bool // -e 46 | DryRun bool // -n 47 | Force bool // -r 48 | Resumable bool // -s 49 | NoMount bool // -u 50 | CanmountOff bool 51 | ByteSwap bool 52 | } 53 | 54 | // ResumeToken - informations extracted from resume token 55 | type ResumeToken struct { 56 | ToName string 57 | FromName string 58 | Object uint64 59 | Offset uint64 60 | ToGUID uint64 61 | FromGUID uint64 62 | Bytes uint64 63 | LargeBlock bool 64 | EmbedOk bool 65 | CompressOk bool 66 | RawOk bool 67 | } 68 | 69 | func to_boolean_t(a bool) C.boolean_t { 70 | if a { 71 | return 1 72 | } 73 | return 0 74 | } 75 | 76 | func to_sendflags_t(flags *SendFlags) (cflags *C.sendflags_t) { 77 | cflags = C.alloc_sendflags() 78 | cflags.verbose = to_boolean_t(flags.Verbose) 79 | cflags.replicate = to_boolean_t(flags.Replicate) 80 | cflags.doall = to_boolean_t(flags.DoAll) 81 | cflags.fromorigin = to_boolean_t(flags.FromOrigin) 82 | cflags.dedup = to_boolean_t(flags.Dedup) 83 | cflags.props = to_boolean_t(flags.Props) 84 | cflags.dryrun = to_boolean_t(flags.DryRun) 85 | cflags.parsable = to_boolean_t(flags.Parsable) 86 | cflags.progress = to_boolean_t(flags.Progress) 87 | cflags.largeblock = to_boolean_t(flags.LargeBlock) 88 | cflags.embed_data = to_boolean_t(flags.EmbedData) 89 | cflags.compress = to_boolean_t(flags.Compress) 90 | cflags.raw = to_boolean_t(flags.Raw) 91 | cflags.backup = to_boolean_t(flags.Backup) 92 | cflags.holds = to_boolean_t(flags.Holds) 93 | return 94 | } 95 | 96 | func to_recvflags_t(flags *RecvFlags) (cflags *C.recvflags_t) { 97 | cflags = C.alloc_recvflags() 98 | cflags.verbose = to_boolean_t(flags.Verbose) 99 | cflags.isprefix = to_boolean_t(flags.IsPrefix) 100 | cflags.istail = to_boolean_t(flags.IsTail) 101 | cflags.dryrun = to_boolean_t(flags.DryRun) 102 | cflags.force = to_boolean_t(flags.Force) 103 | cflags.canmountoff = to_boolean_t(flags.CanmountOff) 104 | cflags.resumable = to_boolean_t(flags.Resumable) 105 | cflags.byteswap = to_boolean_t(flags.ByteSwap) 106 | cflags.nomount = to_boolean_t(flags.NoMount) 107 | return 108 | } 109 | 110 | func (d *Dataset) send(FromName string, outf *os.File, flags *SendFlags) (err error) { 111 | var cfromname, ctoname *C.char 112 | var dpath string 113 | var pd Dataset 114 | 115 | if d.Type != DatasetTypeSnapshot || (len(FromName) > 0 && strings.Contains(FromName, "#")) { 116 | err = fmt.Errorf( 117 | "Unsupported method on filesystem or bookmark. Use func SendOne() for that purpose.") 118 | return 119 | } 120 | 121 | cflags := to_sendflags_t(flags) 122 | defer C.free(unsafe.Pointer(cflags)) 123 | if dpath, err = d.Path(); err != nil { 124 | return 125 | } 126 | sendparams := strings.Split(dpath, "@") 127 | parent := sendparams[0] 128 | if len(FromName) > 0 { 129 | if FromName[0] == '@' { 130 | FromName = FromName[1:] 131 | } else if strings.Contains(FromName, "/") { 132 | from := strings.Split(FromName, "@") 133 | if len(from) > 0 { 134 | FromName = from[1] 135 | } 136 | } 137 | cfromname = C.CString(FromName) 138 | defer C.free(unsafe.Pointer(cfromname)) 139 | } 140 | ctoname = C.CString(sendparams[1]) 141 | defer C.free(unsafe.Pointer(ctoname)) 142 | if pd, err = DatasetOpen(parent); err != nil { 143 | return 144 | } 145 | defer pd.Close() 146 | cerr := C.zfs_send(pd.list.zh, cfromname, ctoname, cflags, C.int(outf.Fd()), nil, nil, nil) 147 | if cerr != 0 { 148 | err = LastError() 149 | } 150 | return 151 | } 152 | 153 | func (d *Dataset) SendResume(outf *os.File, flags *SendFlags, receiveResumeToken string) (err error) { 154 | if d.Type != DatasetTypeSnapshot { 155 | err = fmt.Errorf("Unsupported method on filesystem or bookmark. Use func SendOne() for that purpose.") 156 | return 157 | } 158 | 159 | var dpath string 160 | var pd Dataset 161 | 162 | cflags := to_sendflags_t(flags) 163 | defer C.free(unsafe.Pointer(cflags)) 164 | if dpath, err = d.Path(); err != nil { 165 | return 166 | } 167 | sendparams := strings.Split(dpath, "@") 168 | parent := sendparams[0] 169 | 170 | if pd, err = DatasetOpen(parent); err != nil { 171 | return 172 | } 173 | defer pd.Close() 174 | 175 | cReceiveResumeToken := C.CString(receiveResumeToken) 176 | defer C.free(unsafe.Pointer(cReceiveResumeToken)) 177 | 178 | clerr := C.zfs_send_resume(C.libzfsHandle, cflags, C.int(outf.Fd()), cReceiveResumeToken) 179 | if clerr != 0 { 180 | err = LastError() 181 | } 182 | 183 | return 184 | } 185 | 186 | func (d *Dataset) Send(outf *os.File, flags SendFlags) (err error) { 187 | if flags.Replicate { 188 | flags.DoAll = true 189 | } 190 | err = d.send("", outf, &flags) 191 | return 192 | } 193 | 194 | func (d *Dataset) SendFrom(FromName string, outf *os.File, flags SendFlags) (err error) { 195 | var porigin Property 196 | var from, dest []string 197 | if err = d.ReloadProperties(); err != nil { 198 | return 199 | } 200 | porigin, _ = d.GetProperty(DatasetPropOrigin) 201 | if len(porigin.Value) > 0 && porigin.Value == FromName { 202 | FromName = "" 203 | flags.FromOrigin = true 204 | } else { 205 | var dpath string 206 | if dpath, err = d.Path(); err != nil { 207 | return 208 | } 209 | dest = strings.Split(dpath, "@") 210 | from = strings.Split(FromName, "@") 211 | 212 | if len(from[0]) > 0 && from[0] != dest[0] { 213 | err = fmt.Errorf("Incremental source must be in same filesystem.") 214 | return 215 | } 216 | if len(from) < 2 || strings.Contains(from[1], "@") || strings.Contains(from[1], "/") { 217 | err = fmt.Errorf("Invalid incremental source.") 218 | return 219 | } 220 | } 221 | err = d.send("@"+from[1], outf, &flags) 222 | return 223 | } 224 | 225 | // SendSize - estimate snapshot size to transfer 226 | func (d *Dataset) SendSize(FromName string, flags SendFlags) (size int64, err error) { 227 | var r, w *os.File 228 | errch := make(chan error) 229 | defer func() { 230 | select { 231 | case <-errch: 232 | default: 233 | } 234 | close(errch) 235 | }() 236 | flags.DryRun = true 237 | flags.Verbose = true 238 | flags.Progress = true 239 | flags.Parsable = true 240 | if r, w, err = os.Pipe(); err != nil { 241 | return 242 | } 243 | defer r.Close() 244 | go func() { 245 | var tmpe error 246 | saveOut := C.redirect_libzfs_stdout(C.int(w.Fd())) 247 | if saveOut < 0 { 248 | tmpe = fmt.Errorf("Redirection of zfslib stdout failed %d", saveOut) 249 | } else { 250 | tmpe = d.send(FromName, w, &flags) 251 | C.restore_libzfs_stdout(saveOut) 252 | } 253 | w.Close() 254 | errch <- tmpe 255 | }() 256 | 257 | r.SetReadDeadline(time.Now().Add(60 * time.Second)) 258 | var data []byte 259 | if data, err = ioutil.ReadAll(r); err != nil { 260 | return 261 | } 262 | // parse size 263 | var sizeRe *regexp.Regexp 264 | if sizeRe, err = regexp.Compile("size[ \t]*([0-9]+)"); err != nil { 265 | return 266 | } 267 | matches := sizeRe.FindAllSubmatch(data, 3) 268 | if len(matches) > 0 && len(matches[0]) > 1 { 269 | if size, err = strconv.ParseInt( 270 | string(matches[0][1]), 10, 64); err != nil { 271 | return 272 | } 273 | } 274 | err = <-errch 275 | return 276 | } 277 | 278 | // Receive - receive snapshot stream 279 | func (d *Dataset) Receive(inf *os.File, flags RecvFlags) (err error) { 280 | var dpath string 281 | if dpath, err = d.Path(); err != nil { 282 | return 283 | } 284 | props := C.new_property_nvlist() 285 | if props == nil { 286 | err = fmt.Errorf("Out of memory func (d *Dataset) Recv()") 287 | return 288 | } 289 | defer C.nvlist_free(props) 290 | cflags := to_recvflags_t(&flags) 291 | defer C.free(unsafe.Pointer(cflags)) 292 | dest := C.CString(dpath) 293 | defer C.free(unsafe.Pointer(dest)) 294 | ec := C.zfs_receive(C.libzfsHandle, dest, nil, cflags, C.int(inf.Fd()), nil) 295 | if ec != 0 { 296 | err = fmt.Errorf("ZFS receive of %s failed. %s", C.GoString(dest), LastError().Error()) 297 | } 298 | return 299 | } 300 | 301 | // Unpack unpack resume token 302 | func (rt *ResumeToken) Unpack(token string) (err error) { 303 | ctoken := C.CString(token) 304 | defer C.free(unsafe.Pointer(ctoken)) 305 | resume_nvl := C.zfs_send_resume_token_to_nvlist(C.libzfsHandle, ctoken) 306 | defer C.nvlist_free(resume_nvl) 307 | if resume_nvl == nil { 308 | err = fmt.Errorf("Failed to unpack resume token: %s", LastError().Error()) 309 | return 310 | } 311 | if rt.ToName, err = rt.lookupString(resume_nvl, "toname"); err != nil { 312 | return 313 | } 314 | rt.FromName, _ = rt.lookupString(resume_nvl, "fromname") 315 | 316 | if rt.Object, err = rt.lookupUnit64(resume_nvl, "object"); err != nil { 317 | return 318 | } 319 | if rt.Offset, err = rt.lookupUnit64(resume_nvl, "offset"); err != nil { 320 | return 321 | } 322 | if rt.Bytes, err = rt.lookupUnit64(resume_nvl, "bytes"); err != nil { 323 | return 324 | } 325 | if rt.ToGUID, err = rt.lookupUnit64(resume_nvl, "toguid"); err != nil { 326 | return 327 | } 328 | 329 | rt.FromGUID, _ = rt.lookupUnit64(resume_nvl, "fromguid") 330 | 331 | rt.LargeBlock = rt.exist(resume_nvl, "largeblockok") 332 | rt.EmbedOk = rt.exist(resume_nvl, "embedok") 333 | rt.CompressOk = rt.exist(resume_nvl, "compressok") 334 | rt.RawOk = rt.exist(resume_nvl, "rawok") 335 | 336 | return 337 | } 338 | 339 | func (rt *ResumeToken) lookupString(nvl *C.nvlist_t, key string) (val string, err error) { 340 | var cstr *C.char 341 | ckey := C.CString(key) 342 | defer C.free(unsafe.Pointer(ckey)) 343 | defer C.free(unsafe.Pointer(cstr)) 344 | rc := C.nvlist_lookup_string(nvl, ckey, &cstr) 345 | if rc != 0 { 346 | err = fmt.Errorf("resume token is corrupt") 347 | return 348 | } 349 | val = C.GoString(cstr) 350 | return 351 | } 352 | 353 | func (rt *ResumeToken) lookupUnit64(nvl *C.nvlist_t, key string) (val uint64, err error) { 354 | var num C.uint64_t 355 | ckey := C.CString(key) 356 | defer C.free(unsafe.Pointer(ckey)) 357 | rc := C.nvlist_lookup_uint64(nvl, ckey, &num) 358 | if rc != 0 { 359 | err = fmt.Errorf("resume token is corrupt") 360 | return 361 | } 362 | val = uint64(num) 363 | return 364 | } 365 | 366 | func (rt *ResumeToken) exist(nvl *C.nvlist_t, key string) (val bool) { 367 | ckey := C.CString(key) 368 | defer C.free(unsafe.Pointer(ckey)) 369 | rc := C.nvlist_exists(nvl, ckey) 370 | val = (rc != 0) 371 | return 372 | } 373 | -------------------------------------------------------------------------------- /sort.go: -------------------------------------------------------------------------------- 1 | package zfs 2 | 3 | import ( 4 | "strconv" 5 | ) 6 | 7 | type clonesCreateDesc []Dataset 8 | 9 | func (list clonesCreateDesc) Less(i, j int) bool { 10 | _, oki := list[i].Properties[DatasetNumProps+1000] 11 | _, okj := list[i].Properties[DatasetNumProps+1000] 12 | if oki && okj { 13 | unixti, err := strconv.ParseInt( 14 | list[i].Properties[DatasetNumProps+1000].Value, 10, 64) 15 | if err != nil { 16 | panic(err) 17 | } 18 | unixtj, err := strconv.ParseInt( 19 | list[j].Properties[DatasetNumProps+1000].Value, 10, 64) 20 | if err != nil { 21 | panic(err) 22 | } 23 | if unixti != unixtj { 24 | return unixti > unixtj 25 | } 26 | } 27 | 28 | // if we have two datasets created from same snapshot 29 | // any of them will do, but we will go for most recent 30 | unixti, err := strconv.ParseInt( 31 | list[i].Properties[DatasetPropCreateTXG].Value, 10, 64) 32 | if err != nil { 33 | panic(err) 34 | } 35 | unixtj, err := strconv.ParseInt( 36 | list[j].Properties[DatasetPropCreateTXG].Value, 10, 64) 37 | if err != nil { 38 | panic(err) 39 | } 40 | 41 | return unixti > unixtj 42 | } 43 | 44 | func (list clonesCreateDesc) Swap(i, j int) { 45 | list[i], list[j] = list[j], list[i] 46 | } 47 | 48 | func (list clonesCreateDesc) Len() int { 49 | return len(list) 50 | } 51 | -------------------------------------------------------------------------------- /zfs.c: -------------------------------------------------------------------------------- 1 | /* C wrappers around some zfs calls and C in general that should simplify 2 | * using libzfs from go language, make go code shorter and more readable. 3 | */ 4 | 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | #include "common.h" 11 | #include "zpool.h" 12 | #include "zfs.h" 13 | 14 | 15 | dataset_list_t *create_dataset_list_item() { 16 | dataset_list_t *zlist = malloc(sizeof(dataset_list_t)); 17 | memset(zlist, 0, sizeof(dataset_list_t)); 18 | return zlist; 19 | } 20 | 21 | void dataset_list_close(dataset_list_t *list) { 22 | if (list != NULL) { 23 | if (list->zh != NULL) { 24 | zfs_close(list->zh); 25 | list->zh = NULL; 26 | } 27 | free(list); 28 | } 29 | // dataset_list_free(list); 30 | } 31 | 32 | void dataset_list_free(dataset_list_t *list) { 33 | dataset_list_t *next; 34 | while(list) { 35 | next = list->pnext; 36 | free(list); 37 | list = next; 38 | } 39 | } 40 | 41 | int dataset_list_callb(zfs_handle_t *dataset, void *data) { 42 | dataset_list_t **lroot = (dataset_list_t**)data; 43 | 44 | if ( !((*lroot)->zh) ) { 45 | (*lroot)->zh = dataset; 46 | } else { 47 | dataset_list_t *nroot = create_dataset_list_item(); 48 | nroot->zh = dataset; 49 | nroot->pnext = (void*)*lroot; 50 | *lroot = nroot; 51 | } 52 | return 0; 53 | } 54 | 55 | dataset_list_ptr dataset_list_root() { 56 | int err = 0; 57 | dataset_list_t *zlist = create_dataset_list_item(); 58 | err = zfs_iter_root(libzfsHandle, dataset_list_callb, &zlist); 59 | if ( err != 0 || zlist->zh == NULL) { 60 | dataset_list_free(zlist); 61 | return NULL; 62 | } 63 | return zlist; 64 | } 65 | 66 | dataset_list_ptr dataset_next(dataset_list_t *dataset) { 67 | return dataset->pnext; 68 | } 69 | 70 | int dataset_type(dataset_list_ptr dataset) { 71 | return zfs_get_type(dataset->zh); 72 | } 73 | 74 | dataset_list_ptr dataset_open(const char *path) { 75 | dataset_list_ptr list = create_dataset_list_item(); 76 | list->zh = zfs_open(libzfsHandle, path, 0xF); 77 | if (list->zh == NULL) { 78 | dataset_list_free(list); 79 | list = NULL; 80 | } 81 | return list; 82 | } 83 | 84 | int dataset_create(const char *path, zfs_type_t type, nvlist_ptr props) { 85 | return zfs_create(libzfsHandle, path, type, props); 86 | } 87 | 88 | int dataset_destroy(dataset_list_ptr dataset, boolean_t defer) { 89 | return zfs_destroy(dataset->zh, defer); 90 | } 91 | 92 | dataset_list_t *dataset_list_children(dataset_list_t *dataset) { 93 | int err = 0; 94 | dataset_list_t *zlist = create_dataset_list_item(); 95 | err = zfs_iter_children(dataset->zh, dataset_list_callb, &zlist); 96 | if ( err != 0 || zlist->zh == NULL) { 97 | dataset_list_free(zlist); 98 | return NULL; 99 | } 100 | return zlist; 101 | } 102 | 103 | zpool_list_ptr dataset_get_pool(dataset_list_ptr dataset) { 104 | zpool_list_ptr pool = create_zpool_list_item(); 105 | if(pool != NULL) { 106 | pool->zph = zfs_get_pool_handle(dataset->zh); 107 | } 108 | return pool; 109 | } 110 | 111 | int dataset_prop_set(dataset_list_ptr dataset, zfs_prop_t prop, const char *value) { 112 | return zfs_prop_set(dataset->zh, zfs_prop_to_name(prop), value); 113 | } 114 | 115 | int dataset_user_prop_set(dataset_list_ptr dataset, const char *prop, const char *value) { 116 | return zfs_prop_set(dataset->zh, prop, value); 117 | } 118 | 119 | int dataset_clone(dataset_list_ptr dataset, const char *target, nvlist_ptr props) { 120 | return zfs_clone(dataset->zh, target, props); 121 | } 122 | 123 | int dataset_snapshot(const char *path, boolean_t recur, nvlist_ptr props) { 124 | return zfs_snapshot(libzfsHandle, path, recur, props); 125 | } 126 | 127 | int dataset_rollback(dataset_list_ptr dataset, dataset_list_ptr snapshot, boolean_t force) { 128 | return zfs_rollback(dataset->zh, snapshot->zh, force); 129 | } 130 | 131 | int dataset_promote(dataset_list_ptr dataset) { 132 | return zfs_promote(dataset->zh); 133 | } 134 | 135 | int dataset_rename(dataset_list_ptr dataset, const char* new_name, boolean_t recur, boolean_t force_unm) { 136 | return zfs_rename(dataset->zh, new_name, recur, force_unm); 137 | } 138 | 139 | const char *dataset_is_mounted(dataset_list_ptr dataset){ 140 | char *mp = NULL; 141 | // zfs_is_mounted returns B_TRUE or B_FALSE 142 | if (0 != zfs_is_mounted(dataset->zh, &mp)) { 143 | return mp; 144 | } 145 | return NULL; 146 | } 147 | 148 | int dataset_mount(dataset_list_ptr dataset, const char *options, int flags) { 149 | if ( 0 < strlen(options)) { 150 | return zfs_mount(dataset->zh, options, flags); 151 | } else { 152 | return zfs_mount(dataset->zh, NULL, flags); 153 | } 154 | } 155 | 156 | int dataset_unmount(dataset_list_ptr dataset, int flags) { 157 | return zfs_unmount(dataset->zh, NULL, flags); 158 | } 159 | 160 | int dataset_unmountall(dataset_list_ptr dataset, int flags) { 161 | return zfs_unmountall(dataset->zh, flags); 162 | } 163 | 164 | const char *dataset_get_name(dataset_list_ptr ds) { 165 | return zfs_get_name(ds->zh); 166 | } 167 | 168 | //int read_dataset_property(zfs_handle_t *zh, property_list_t *list, int prop) { 169 | property_list_t *read_dataset_property(dataset_list_t *dataset, int prop) { 170 | int r = 0; 171 | zprop_source_t source; 172 | char statbuf[INT_MAX_VALUE]; 173 | property_list_ptr list = NULL; 174 | list = new_property_list(); 175 | 176 | r = zfs_prop_get(dataset->zh, prop, 177 | list->value, INT_MAX_VALUE, &source, statbuf, INT_MAX_VALUE, 1); 178 | if (r == 0 && list != NULL) { 179 | // strcpy(list->name, zpool_prop_to_name(prop)); 180 | zprop_source_tostr(list->source, source); 181 | list->property = (int)prop; 182 | } else if (list != NULL) { 183 | free_properties(list); 184 | list = NULL; 185 | } 186 | return list; 187 | } 188 | 189 | // int read_user_property(zfs_handle_t *zh, property_list_t *list, const char *prop) { 190 | property_list_t *read_user_property(dataset_list_t *dataset, const char* prop) { 191 | nvlist_t *user_props = zfs_get_user_props(dataset->zh); 192 | nvlist_t *propval; 193 | zprop_source_t sourcetype; 194 | char *strval; 195 | char *sourceval; 196 | // char source[ZFS_MAX_DATASET_NAME_LEN]; 197 | property_list_ptr list = new_property_list(); 198 | 199 | if (nvlist_lookup_nvlist(user_props, 200 | prop, &propval) != 0) { 201 | sourcetype = ZPROP_SRC_NONE; 202 | (void) strncpy(list->source, 203 | "none", sizeof (list->source)); 204 | strval = "-"; 205 | } else { 206 | verify(nvlist_lookup_string(propval, 207 | ZPROP_VALUE, &strval) == 0); 208 | verify(nvlist_lookup_string(propval, 209 | ZPROP_SOURCE, &sourceval) == 0); 210 | 211 | if (strcmp(sourceval, 212 | zfs_get_name(dataset->zh)) == 0) { 213 | sourcetype = ZPROP_SRC_LOCAL; 214 | (void) strncpy(list->source, 215 | "local", sizeof (list->source)); 216 | } else if (strcmp(sourceval, 217 | ZPROP_SOURCE_VAL_RECVD) == 0) { 218 | sourcetype = ZPROP_SRC_RECEIVED; 219 | (void) strncpy(list->source, 220 | "received", sizeof (list->source)); 221 | } else { 222 | sourcetype = ZPROP_SRC_INHERITED; 223 | (void) strncpy(list->source, 224 | sourceval, sizeof (list->source)); 225 | } 226 | } 227 | (void) strncpy(list->value, 228 | strval, sizeof (list->value)); 229 | return list; 230 | } 231 | 232 | char** alloc_cstrings(int size) { 233 | return malloc(size*sizeof(char*)); 234 | } 235 | 236 | void strings_setat(char **a, int at, char *v) { 237 | a[at] = v; 238 | } 239 | 240 | 241 | sendflags_t *alloc_sendflags() { 242 | sendflags_t *r = malloc(sizeof(sendflags_t)); 243 | memset(r, 0, sizeof(sendflags_t)); 244 | return r; 245 | } 246 | recvflags_t *alloc_recvflags() { 247 | recvflags_t *r = malloc(sizeof(recvflags_t)); 248 | memset(r, 0, sizeof(recvflags_t)); 249 | return r; 250 | } 251 | 252 | struct zfs_cmd *new_zfs_cmd(){ 253 | struct zfs_cmd *cmd = malloc(sizeof(struct zfs_cmd)); 254 | memset(cmd, 0, sizeof(struct zfs_cmd)); 255 | return cmd; 256 | } 257 | 258 | int estimate_send_size(struct zfs_cmd *zc) { 259 | int rc = zfs_ioctl(libzfsHandle, ZFS_IOC_SEND, zc); 260 | if (rc != 0) { 261 | rc = errno; 262 | } 263 | return rc; 264 | } 265 | 266 | -------------------------------------------------------------------------------- /zfs.go: -------------------------------------------------------------------------------- 1 | package zfs 2 | 3 | // #include 4 | // #include 5 | // #include "common.h" 6 | // #include "zpool.h" 7 | // #include "zfs.h" 8 | import "C" 9 | 10 | import ( 11 | "errors" 12 | "fmt" 13 | "path" 14 | "sort" 15 | "strings" 16 | "sync" 17 | "time" 18 | "unsafe" 19 | ) 20 | 21 | const ( 22 | msgDatasetIsNil = "Dataset handle not initialized or its closed" 23 | ) 24 | 25 | // DatasetProperties type is map of dataset or volume properties prop -> value 26 | type DatasetProperties map[Prop]string 27 | 28 | // DatasetType defines enum of dataset types 29 | type DatasetType int32 30 | 31 | const ( 32 | // DatasetTypeFilesystem - file system dataset 33 | DatasetTypeFilesystem DatasetType = (1 << 0) 34 | // DatasetTypeSnapshot - snapshot of dataset 35 | DatasetTypeSnapshot = (1 << 1) 36 | // DatasetTypeVolume - volume (virtual block device) dataset 37 | DatasetTypeVolume = (1 << 2) 38 | // DatasetTypePool - pool dataset 39 | DatasetTypePool = (1 << 3) 40 | // DatasetTypeBookmark - bookmark dataset 41 | DatasetTypeBookmark = (1 << 4) 42 | ) 43 | 44 | // HoldTag - user holds tags 45 | type HoldTag struct { 46 | Name string 47 | Timestamp time.Time 48 | } 49 | 50 | // Dataset - ZFS dataset object 51 | type Dataset struct { 52 | list C.dataset_list_ptr 53 | closeOnce *sync.Once 54 | Type DatasetType 55 | Properties map[Prop]Property 56 | Children []Dataset 57 | } 58 | 59 | func (d *Dataset) openChildren() (err error) { 60 | d.Children = make([]Dataset, 0, 5) 61 | list := C.dataset_list_children(d.list) 62 | for list != nil { 63 | dataset := Dataset{list: list, closeOnce: new(sync.Once)} 64 | dataset.Type = DatasetType(C.dataset_type(list)) 65 | dataset.Properties = make(map[Prop]Property) 66 | err = dataset.ReloadProperties() 67 | if err != nil { 68 | return 69 | } 70 | d.Children = append(d.Children, dataset) 71 | list = C.dataset_next(list) 72 | } 73 | for ci := range d.Children { 74 | if err = d.Children[ci].openChildren(); err != nil { 75 | return 76 | } 77 | } 78 | return 79 | } 80 | 81 | // DatasetOpenAll recursive get handles to all available datasets on system 82 | // (file-systems, volumes or snapshots). 83 | func DatasetOpenAll() (datasets []Dataset, err error) { 84 | list := C.dataset_list_root() 85 | for list != nil { 86 | dataset := Dataset{ 87 | list: list, 88 | closeOnce: new(sync.Once), 89 | Type: DatasetType(C.dataset_type(list)), 90 | } 91 | dataset.Type = DatasetType(C.dataset_type(list)) 92 | err = dataset.ReloadProperties() 93 | if err != nil { 94 | return 95 | } 96 | datasets = append(datasets, dataset) 97 | list = C.dataset_next(list) 98 | } 99 | for ci := range datasets { 100 | if err = datasets[ci].openChildren(); err != nil { 101 | return 102 | } 103 | } 104 | return 105 | } 106 | 107 | // DatasetCloseAll close all datasets in slice and all of its recursive 108 | // children datasets 109 | func DatasetCloseAll(datasets []Dataset) { 110 | for _, d := range datasets { 111 | d.Close() 112 | } 113 | } 114 | 115 | // DatasetOpen open dataset and all of its recursive children datasets 116 | func DatasetOpen(path string) (d Dataset, err error) { 117 | if d, err = DatasetOpenSingle(path); err != nil { 118 | return 119 | } 120 | err = d.openChildren() 121 | return 122 | } 123 | 124 | // DatasetOpenSingle open dataset without opening all of its recursive 125 | // children datasets 126 | func DatasetOpenSingle(path string) (d Dataset, err error) { 127 | csPath := C.CString(path) 128 | d.list = C.dataset_open(csPath) 129 | C.free(unsafe.Pointer(csPath)) 130 | 131 | if d.list == nil || d.list.zh == nil { 132 | err = LastError() 133 | if err == nil { 134 | err = fmt.Errorf("dataset not found") 135 | } 136 | err = fmt.Errorf("%s - %s", err.Error(), path) 137 | return 138 | } 139 | d.closeOnce = new(sync.Once) 140 | d.Type = DatasetType(C.dataset_type(d.list)) 141 | err = d.ReloadProperties() 142 | if err != nil { 143 | return 144 | } 145 | return 146 | } 147 | 148 | func datasetPropertiesTonvlist(props map[Prop]Property) ( 149 | cprops C.nvlist_ptr, err error) { 150 | // convert properties to nvlist C type 151 | cprops = C.new_property_nvlist() 152 | if cprops == nil { 153 | err = errors.New("Failed to allocate properties") 154 | return 155 | } 156 | for prop, value := range props { 157 | csValue := C.CString(value.Value) 158 | r := C.property_nvlist_add( 159 | cprops, C.zfs_prop_to_name(C.zfs_prop_t(prop)), csValue) 160 | C.free(unsafe.Pointer(csValue)) 161 | if r != 0 { 162 | err = errors.New("Failed to convert property") 163 | return 164 | } 165 | } 166 | return 167 | } 168 | 169 | // DatasetCreate create a new filesystem or volume on path representing 170 | // pool/dataset or pool/parent/dataset 171 | func DatasetCreate(path string, dtype DatasetType, 172 | props map[Prop]Property) (d Dataset, err error) { 173 | var cprops C.nvlist_ptr 174 | if cprops, err = datasetPropertiesTonvlist(props); err != nil { 175 | return 176 | } 177 | defer C.nvlist_free(cprops) 178 | 179 | csPath := C.CString(path) 180 | errcode := C.dataset_create(csPath, C.zfs_type_t(dtype), cprops) 181 | C.free(unsafe.Pointer(csPath)) 182 | if errcode != 0 { 183 | err = LastError() 184 | return 185 | } 186 | return DatasetOpen(path) 187 | } 188 | 189 | // Close close dataset and all its recursive children datasets (close handle 190 | // and cleanup dataset object/s from memory) 191 | func (d *Dataset) Close() { 192 | // if dataset was ever open 193 | if d.closeOnce != nil { 194 | d.closeOnce.Do(func() { 195 | C.dataset_list_close(d.list) 196 | }) 197 | } 198 | d.list = nil 199 | for _, cd := range d.Children { 200 | cd.Close() 201 | } 202 | } 203 | 204 | // reOpen - close and open dataset. Not thread safe! 205 | func (d *Dataset) reOpen() (err error) { 206 | d.Close() 207 | *d, err = DatasetOpen(d.Properties[DatasetPropName].Value) 208 | return 209 | } 210 | 211 | // Destroy destroys the dataset. The caller must make sure that the filesystem 212 | // isn't mounted, and that there are no active dependents. Set Defer argument 213 | // to true to defer destruction for when dataset is not in use. Call Close() to 214 | // cleanup memory. 215 | func (d *Dataset) Destroy(Defer bool) (err error) { 216 | if len(d.Children) > 0 { 217 | path, e := d.Path() 218 | if e != nil { 219 | return 220 | } 221 | dsType, e := d.GetProperty(DatasetPropType) 222 | if e != nil { 223 | dsType.Value = err.Error() // just put error (why it didn't fetch property type) 224 | } 225 | err = errors.New("Cannot destroy dataset " + path + 226 | ": " + dsType.Value + " has children") 227 | return 228 | } 229 | if d.list != nil { 230 | if ec := C.dataset_destroy(d.list, booleanT(Defer)); ec != 0 { 231 | err = LastError() 232 | } 233 | } else { 234 | err = errors.New(msgDatasetIsNil) 235 | } 236 | return 237 | } 238 | 239 | // IsSnapshot - retrun true if datset is snapshot 240 | func (d *Dataset) IsSnapshot() (ok bool) { 241 | path := d.Properties[DatasetPropName].Value 242 | ok = (d.Type == DatasetTypeSnapshot || strings.Contains(path, "@")) 243 | return 244 | } 245 | 246 | // DestroyRecursive recursively destroy children of dataset and dataset. 247 | func (d *Dataset) DestroyRecursive() (err error) { 248 | var path string 249 | if path, err = d.Path(); err != nil { 250 | return 251 | } 252 | if !strings.Contains(path, "@") { // not snapshot 253 | if len(d.Children) > 0 { 254 | for _, c := range d.Children { 255 | if err = c.DestroyRecursive(); err != nil { 256 | return 257 | } 258 | // close handle to destroyed child dataset 259 | c.Close() 260 | } 261 | // clear closed children array 262 | d.Children = make([]Dataset, 0) 263 | } 264 | err = d.Destroy(false) 265 | } else { 266 | var parent Dataset 267 | tmp := strings.Split(path, "@") 268 | ppath, snapname := tmp[0], tmp[1] 269 | if parent, err = DatasetOpen(ppath); err != nil { 270 | return 271 | } 272 | defer parent.Close() 273 | if len(parent.Children) > 0 { 274 | for _, c := range parent.Children { 275 | if path, err = c.Path(); err != nil { 276 | return 277 | } 278 | if strings.Contains(path, "@") { 279 | continue // skip other snapshots 280 | } 281 | if c, err = DatasetOpen(path + "@" + snapname); err != nil { 282 | continue 283 | } 284 | if err = c.DestroyRecursive(); err != nil { 285 | c.Close() 286 | return 287 | } 288 | c.Close() 289 | } 290 | } 291 | err = d.Destroy(false) 292 | } 293 | return 294 | } 295 | 296 | // Pool returns pool dataset belongs to 297 | func (d *Dataset) Pool() (p Pool, err error) { 298 | if d.list == nil { 299 | err = errors.New(msgDatasetIsNil) 300 | return 301 | } 302 | p.list = C.dataset_get_pool(d.list) 303 | if p.list != nil && p.list.zph != nil { 304 | err = p.ReloadProperties() 305 | return 306 | } 307 | err = LastError() 308 | return 309 | } 310 | 311 | // PoolName - return name of the pool 312 | func (d *Dataset) PoolName() string { 313 | path := d.Properties[DatasetPropName].Value 314 | i := strings.Index(path, "/") 315 | if i < 0 { 316 | return path 317 | } 318 | return path[0:i] 319 | } 320 | 321 | // ReloadProperties re-read dataset's properties 322 | func (d *Dataset) ReloadProperties() (err error) { 323 | Global.Mtx.Lock() 324 | defer Global.Mtx.Unlock() 325 | if d.list == nil { 326 | err = errors.New(msgDatasetIsNil) 327 | return 328 | } 329 | d.Properties = make(map[Prop]Property) 330 | C.zfs_refresh_properties(d.list.zh) 331 | for prop := DatasetPropType; prop < DatasetNumProps; prop++ { 332 | plist := C.read_dataset_property(d.list, C.int(prop)) 333 | if plist == nil { 334 | continue 335 | } 336 | d.Properties[prop] = Property{Value: C.GoString(&(*plist).value[0]), 337 | Source: C.GoString(&(*plist).source[0])} 338 | C.free_properties(plist) 339 | } 340 | return 341 | } 342 | 343 | // GetProperty reload and return single specified property. This also reloads requested 344 | // property in Properties map. 345 | func (d *Dataset) GetProperty(p Prop) (prop Property, err error) { 346 | Global.Mtx.Lock() 347 | defer Global.Mtx.Unlock() 348 | if d.list == nil { 349 | err = errors.New(msgDatasetIsNil) 350 | return 351 | } 352 | plist := C.read_dataset_property(d.list, C.int(p)) 353 | if plist == nil { 354 | err = LastError() 355 | return 356 | } 357 | defer C.free_properties(plist) 358 | prop = Property{Value: C.GoString(&(*plist).value[0]), 359 | Source: C.GoString(&(*plist).source[0])} 360 | d.Properties[p] = prop 361 | return 362 | } 363 | 364 | // GetUserProperty - lookup and return user propery 365 | func (d *Dataset) GetUserProperty(p string) (prop Property, err error) { 366 | Global.Mtx.Lock() 367 | defer Global.Mtx.Unlock() 368 | if d.list == nil { 369 | err = errors.New(msgDatasetIsNil) 370 | return 371 | } 372 | csp := C.CString(p) 373 | defer C.free(unsafe.Pointer(csp)) 374 | plist := C.read_user_property(d.list, csp) 375 | if plist == nil { 376 | err = LastError() 377 | return 378 | } 379 | defer C.free_properties(plist) 380 | prop = Property{Value: C.GoString(&(*plist).value[0]), 381 | Source: C.GoString(&(*plist).source[0])} 382 | return 383 | } 384 | 385 | // SetProperty set ZFS dataset property to value. Not all properties can be set, 386 | // some can be set only at creation time and some are read only. 387 | // Always check if returned error and its description. 388 | func (d *Dataset) SetProperty(p Prop, value string) (err error) { 389 | Global.Mtx.Lock() 390 | defer Global.Mtx.Unlock() 391 | if d.list == nil { 392 | err = errors.New(msgDatasetIsNil) 393 | return 394 | } 395 | csValue := C.CString(value) 396 | errcode := C.dataset_prop_set(d.list, C.zfs_prop_t(p), csValue) 397 | C.free(unsafe.Pointer(csValue)) 398 | if errcode != 0 { 399 | err = LastError() 400 | return 401 | } 402 | // Update Properties member with change made 403 | plist := C.read_dataset_property(d.list, C.int(p)) 404 | if plist == nil { 405 | err = LastError() 406 | return 407 | } 408 | defer C.free_properties(plist) 409 | d.Properties[p] = Property{Value: C.GoString(&(*plist).value[0]), 410 | Source: C.GoString(&(*plist).source[0])} 411 | return 412 | } 413 | 414 | // SetUserProperty - 415 | func (d *Dataset) SetUserProperty(prop, value string) (err error) { 416 | Global.Mtx.Lock() 417 | defer Global.Mtx.Unlock() 418 | if d.list == nil { 419 | err = errors.New(msgDatasetIsNil) 420 | return 421 | } 422 | csValue := C.CString(value) 423 | csProp := C.CString(prop) 424 | errcode := C.dataset_user_prop_set(d.list, csProp, csValue) 425 | C.free(unsafe.Pointer(csValue)) 426 | C.free(unsafe.Pointer(csProp)) 427 | if errcode != 0 { 428 | err = LastError() 429 | } 430 | return 431 | } 432 | 433 | // Clone - clones the dataset. The target must be of the same type as 434 | // the source. 435 | func (d *Dataset) Clone(target string, props map[Prop]Property) (rd Dataset, err error) { 436 | var cprops C.nvlist_ptr 437 | if d.list == nil { 438 | err = errors.New(msgDatasetIsNil) 439 | return 440 | } 441 | if cprops, err = datasetPropertiesTonvlist(props); err != nil { 442 | return 443 | } 444 | defer C.nvlist_free(cprops) 445 | csTarget := C.CString(target) 446 | defer C.free(unsafe.Pointer(csTarget)) 447 | if errc := C.dataset_clone(d.list, csTarget, cprops); errc != 0 { 448 | err = LastError() 449 | return 450 | } 451 | rd, err = DatasetOpen(target) 452 | return 453 | } 454 | 455 | // DatasetSnapshot create dataset snapshot. Set recur to true to snapshot child datasets. 456 | func DatasetSnapshot(path string, recur bool, props map[Prop]Property) (rd Dataset, err error) { 457 | var cprops C.nvlist_ptr 458 | if cprops, err = datasetPropertiesTonvlist(props); err != nil { 459 | return 460 | } 461 | defer C.nvlist_free(cprops) 462 | csPath := C.CString(path) 463 | defer C.free(unsafe.Pointer(csPath)) 464 | if errc := C.dataset_snapshot(csPath, booleanT(recur), cprops); errc != 0 { 465 | err = LastError() 466 | return 467 | } 468 | rd, err = DatasetOpen(path) 469 | return 470 | } 471 | 472 | // Path return zfs dataset path/name 473 | func (d *Dataset) Path() (path string, err error) { 474 | if d.list == nil { 475 | err = errors.New(msgDatasetIsNil) 476 | return 477 | } 478 | name := C.dataset_get_name(d.list) 479 | path = C.GoString(name) 480 | return 481 | } 482 | 483 | // Rollback rollabck's dataset snapshot 484 | func (d *Dataset) Rollback(snap *Dataset, force bool) (err error) { 485 | if d.list == nil { 486 | err = errors.New(msgDatasetIsNil) 487 | return 488 | } 489 | if errc := C.dataset_rollback(d.list, snap.list, booleanT(force)); errc != 0 { 490 | err = LastError() 491 | return 492 | } 493 | d.ReloadProperties() 494 | return 495 | } 496 | 497 | // Promote promotes dataset clone 498 | func (d *Dataset) Promote() (err error) { 499 | if d.list == nil { 500 | err = errors.New(msgDatasetIsNil) 501 | return 502 | } 503 | if errc := C.dataset_promote(d.list); errc != 0 { 504 | err = LastError() 505 | return 506 | } 507 | d.ReloadProperties() 508 | return 509 | } 510 | 511 | // Rename dataset 512 | func (d *Dataset) Rename(newName string, recur, 513 | forceUnmount bool) (err error) { 514 | if d.list == nil { 515 | err = errors.New(msgDatasetIsNil) 516 | return 517 | } 518 | csNewName := C.CString(newName) 519 | defer C.free(unsafe.Pointer(csNewName)) 520 | if errc := C.dataset_rename(d.list, csNewName, 521 | booleanT(recur), booleanT(forceUnmount)); errc != 0 { 522 | err = LastError() 523 | return 524 | } 525 | d.ReloadProperties() 526 | return 527 | } 528 | 529 | // IsMounted checks to see if the mount is active. If the filesystem is mounted, 530 | // sets in 'where' argument the current mountpoint, and returns true. Otherwise, 531 | // returns false. 532 | func (d *Dataset) IsMounted() (mounted bool, where string) { 533 | if d.list == nil { 534 | return 535 | } 536 | Global.Mtx.Lock() 537 | defer Global.Mtx.Unlock() 538 | mp := C.dataset_is_mounted(d.list) 539 | // defer C.free(mp) 540 | if mounted = (mp != nil); mounted { 541 | where = C.GoString(mp) 542 | C.free(unsafe.Pointer(mp)) 543 | } 544 | return 545 | } 546 | 547 | // Mount the given filesystem. 548 | func (d *Dataset) Mount(options string, flags int) (err error) { 549 | Global.Mtx.Lock() 550 | defer Global.Mtx.Unlock() 551 | if d.list == nil { 552 | err = errors.New(msgDatasetIsNil) 553 | return 554 | } 555 | csOptions := C.CString(options) 556 | defer C.free(unsafe.Pointer(csOptions)) 557 | if ec := C.dataset_mount(d.list, csOptions, C.int(flags)); ec != 0 { 558 | err = LastError() 559 | } 560 | return 561 | } 562 | 563 | // Unmount the given filesystem. 564 | func (d *Dataset) Unmount(flags int) (err error) { 565 | if d.list == nil { 566 | err = errors.New(msgDatasetIsNil) 567 | return 568 | } 569 | if ec := C.dataset_unmount(d.list, C.int(flags)); ec != 0 { 570 | err = LastError() 571 | } 572 | return 573 | } 574 | 575 | // UnmountAll unmount this filesystem and any children inheriting the 576 | // mountpoint property. 577 | func (d *Dataset) UnmountAll(flags int) (err error) { 578 | if d.list == nil { 579 | err = errors.New(msgDatasetIsNil) 580 | return 581 | } 582 | // This is implemented recursive because zfs_unmountall() didn't work 583 | if len(d.Children) > 0 { 584 | for _, c := range d.Children { 585 | if err = c.UnmountAll(flags); err != nil { 586 | return 587 | } 588 | } 589 | } 590 | return d.Unmount(flags) 591 | } 592 | 593 | // Hold - Adds a single reference, named with the tag argument, to the snapshot. 594 | // Each snapshot has its own tag namespace, and tags must be unique within that space. 595 | func (d *Dataset) Hold(flag string) (err error) { 596 | var path string 597 | var pd Dataset 598 | if path, err = d.Path(); err != nil { 599 | return 600 | } 601 | if !strings.Contains(path, "@") { 602 | err = fmt.Errorf("'%s' is not a snapshot", path) 603 | return 604 | } 605 | pd, err = DatasetOpenSingle(path[:strings.Index(path, "@")]) 606 | if err != nil { 607 | return 608 | } 609 | defer pd.Close() 610 | csSnapName := C.CString(path[strings.Index(path, "@")+1:]) 611 | defer C.free(unsafe.Pointer(csSnapName)) 612 | csFlag := C.CString(flag) 613 | defer C.free(unsafe.Pointer(csFlag)) 614 | if 0 != C.zfs_hold(pd.list.zh, csSnapName, csFlag, booleanT(false), -1) { 615 | err = LastError() 616 | } 617 | return 618 | } 619 | 620 | // Release - Removes a single reference, named with the tag argument, from the specified snapshot. 621 | // The tag must already exist for each snapshot. If a hold exists on a snapshot, attempts to destroy 622 | // that snapshot by using the zfs destroy command return EBUSY. 623 | func (d *Dataset) Release(flag string) (err error) { 624 | var path string 625 | var pd Dataset 626 | if path, err = d.Path(); err != nil { 627 | return 628 | } 629 | if !strings.Contains(path, "@") { 630 | err = fmt.Errorf("'%s' is not a snapshot", path) 631 | return 632 | } 633 | pd, err = DatasetOpenSingle(path[:strings.Index(path, "@")]) 634 | if err != nil { 635 | return 636 | } 637 | defer pd.Close() 638 | csSnapName := C.CString(path[strings.Index(path, "@")+1:]) 639 | defer C.free(unsafe.Pointer(csSnapName)) 640 | csFlag := C.CString(flag) 641 | defer C.free(unsafe.Pointer(csFlag)) 642 | if 0 != C.zfs_release(pd.list.zh, csSnapName, csFlag, booleanT(false)) { 643 | err = LastError() 644 | } 645 | return 646 | } 647 | 648 | // Holds - Lists all existing user references for the given snapshot 649 | func (d *Dataset) Holds() (tags []HoldTag, err error) { 650 | var nvl *C.nvlist_t 651 | var nvp *C.nvpair_t 652 | var tu64 C.uint64_t 653 | var path string 654 | if path, err = d.Path(); err != nil { 655 | return 656 | } 657 | if !strings.Contains(path, "@") { 658 | err = fmt.Errorf("'%s' is not a snapshot", path) 659 | return 660 | } 661 | if 0 != C.zfs_get_holds(d.list.zh, &nvl) { 662 | err = LastError() 663 | return 664 | } 665 | defer C.nvlist_free(nvl) 666 | tags = make([]HoldTag, 0, 5) 667 | for nvp = C.nvlist_next_nvpair(nvl, nvp); nvp != nil; { 668 | tag := C.nvpair_name(nvp) 669 | C.nvpair_value_uint64(nvp, &tu64) 670 | tags = append(tags, HoldTag{ 671 | Name: C.GoString(tag), 672 | Timestamp: time.Unix(int64(tu64), 0), 673 | }) 674 | 675 | nvp = C.nvlist_next_nvpair(nvl, nvp) 676 | } 677 | return 678 | } 679 | 680 | // DatasetPropertyToName convert property to name 681 | // ( returns built in string representation of property name). 682 | // This is optional, you can represent each property with string 683 | // name of choice. 684 | func DatasetPropertyToName(p Prop) (name string) { 685 | if p == DatasetNumProps { 686 | return "numofprops" 687 | } 688 | prop := C.zfs_prop_t(p) 689 | name = C.GoString(C.zfs_prop_to_name(prop)) 690 | return 691 | } 692 | 693 | // DestroyPromote - Same as DestroyRecursive() except it will not destroy 694 | // any dependent clones, but promote them first. 695 | // This function will navigate any dependency chain 696 | // of cloned datasets using breadth first search to promote according and let 697 | // you remove dataset regardless of its cloned dependencies. 698 | // Note: that this function wan't work when you want to destroy snapshot this way. 699 | // However it will destroy all snaphsot of destroyed dataset without dependencies, 700 | // otherwise snapshot will move to promoted clone 701 | func (d *Dataset) DestroyPromote() (err error) { 702 | var snaps []Dataset 703 | var clones []string 704 | // We need to save list of child snapshots, to destroy them latter 705 | // since they will be moved to promoted clone 706 | var psnaps []string 707 | if clones, err = d.Clones(); err != nil { 708 | return 709 | } 710 | if len(clones) > 0 { 711 | var cds Dataset 712 | // For this to always work we need to promote youngest clone 713 | // in terms of most recent origin snapshot or creation time if 714 | // cloned from same snapshot 715 | if cds, err = DatasetOpen(clones[0]); err != nil { 716 | return 717 | } 718 | defer cds.Close() 719 | // since promote will move the snapshots to promoted dataset 720 | // we need to check and resolve possible name conflicts 721 | if snaps, err = d.Snapshots(); err != nil { 722 | return 723 | } 724 | for _, s := range snaps { 725 | spath := s.Properties[DatasetPropName].Value 726 | sname := spath[strings.Index(spath, "@"):] 727 | // conflict and resolve 728 | if ok, _ := cds.FindSnapshotName(sname); ok { 729 | // snapshot with the same name already exist 730 | volname := path.Base(spath[:strings.Index(spath, "@")]) 731 | sname = sname + "." + volname 732 | if err = s.Rename(spath+"."+volname, false, true); err != nil { 733 | return 734 | } 735 | } 736 | psnaps = append(psnaps, sname) 737 | } 738 | if err = cds.Promote(); err != nil { 739 | return 740 | } 741 | } 742 | // destroy child datasets, since this works recursive 743 | for _, cd := range d.Children { 744 | if err = cd.DestroyPromote(); err != nil { 745 | return 746 | } 747 | } 748 | d.Children = make([]Dataset, 0) 749 | if err = d.Destroy(false); err != nil { 750 | return 751 | } 752 | // Load with new promoted snapshots 753 | if len(clones) > 0 && len(psnaps) > 0 { 754 | var cds Dataset 755 | if cds, err = DatasetOpen(clones[0]); err != nil { 756 | return 757 | } 758 | defer cds.Close() 759 | // try to destroy (promoted) snapshots now 760 | for _, sname := range psnaps { 761 | if ok, snap := cds.FindSnapshotName(sname); ok { 762 | snap.Destroy(false) 763 | } 764 | } 765 | } 766 | return 767 | } 768 | 769 | // Snapshots - filter and return all snapshots of dataset 770 | func (d *Dataset) Snapshots() (snaps []Dataset, err error) { 771 | for _, ch := range d.Children { 772 | if !ch.IsSnapshot() { 773 | continue 774 | } 775 | snaps = append(snaps, ch) 776 | } 777 | return 778 | } 779 | 780 | // FindSnapshot - returns true if given path is one of dataset snaphsots 781 | func (d *Dataset) FindSnapshot(path string) (ok bool, snap Dataset) { 782 | for _, ch := range d.Children { 783 | if !ch.IsSnapshot() { 784 | continue 785 | } 786 | if ok = (path == ch.Properties[DatasetPropName].Value); ok { 787 | snap = ch 788 | break 789 | } 790 | } 791 | return 792 | } 793 | 794 | // FindSnapshotName - returns true and snapshot if given snapshot 795 | // name eg. '@snap1' is one of dataset snaphsots 796 | func (d *Dataset) FindSnapshotName(name string) (ok bool, snap Dataset) { 797 | return d.FindSnapshot(d.Properties[DatasetPropName].Value + name) 798 | } 799 | 800 | // Clones - get list of all dataset paths cloned from this 801 | // dataset or this snapshot 802 | // List is sorted descedent by origin snapshot order 803 | func (d *Dataset) Clones() (clones []string, err error) { 804 | // Clones can only live on same pool 805 | var root Dataset 806 | var sortDesc []Dataset 807 | if root, err = DatasetOpen(d.PoolName()); err != nil { 808 | return 809 | } 810 | defer root.Close() 811 | dIsSnapshot := d.IsSnapshot() 812 | // USe breadth first search to find all clones 813 | queue := make(chan Dataset, 1024) 814 | defer close(queue) // This will close and cleanup all 815 | queue <- root // start from the root element 816 | for { 817 | select { 818 | case ds := <-queue: // pull from queue (breadth first search) 819 | for _, ch := range ds.Children { 820 | origin := ch.Properties[DatasetPropOrigin].Value 821 | if len(origin) > 0 { 822 | if dIsSnapshot && origin == d.Properties[DatasetPropName].Value { 823 | // if this dataset is snaphot 824 | ch.Properties[DatasetNumProps+1000] = d.Properties[DatasetPropCreateTXG] 825 | sortDesc = append(sortDesc, ch) 826 | } else { 827 | // Check if origin of this dataset is one of snapshots 828 | ok, snap := d.FindSnapshot(origin) 829 | if !ok { 830 | continue 831 | } 832 | ch.Properties[DatasetNumProps+1000] = snap.Properties[DatasetPropCreateTXG] 833 | sortDesc = append(sortDesc, ch) 834 | } 835 | } 836 | queue <- ch 837 | } 838 | default: 839 | sort.Sort(clonesCreateDesc(sortDesc)) 840 | // This way we get clones ordered from most recent sanpshots first 841 | for _, c := range sortDesc { 842 | clones = append(clones, c.Properties[DatasetPropName].Value) 843 | } 844 | return 845 | } 846 | } 847 | return 848 | } 849 | -------------------------------------------------------------------------------- /zfs.h: -------------------------------------------------------------------------------- 1 | /* C wrappers around some zfs calls and C in general that should simplify 2 | * using libzfs from go language, make go code shorter and more readable. 3 | */ 4 | 5 | #ifndef SERVERWARE_ZFS_H 6 | #define SERVERWARE_ZFS_H 7 | 8 | struct dataset_list { 9 | zfs_handle_t *zh; 10 | void *pnext; 11 | }; 12 | 13 | typedef struct zfs_share { 14 | uint64_t z_exportdata; 15 | uint64_t z_sharedata; 16 | uint64_t z_sharetype; /* 0 = share, 1 = unshare */ 17 | uint64_t z_sharemax; /* max length of share string */ 18 | } zfs_share_t; 19 | 20 | /* 21 | * A limited number of zpl level stats are retrievable 22 | * with an ioctl. zfs diff is the current consumer. 23 | */ 24 | typedef struct zfs_stat { 25 | uint64_t zs_gen; 26 | uint64_t zs_mode; 27 | uint64_t zs_links; 28 | uint64_t zs_ctime[2]; 29 | } zfs_stat_t; 30 | 31 | typedef struct zinject_record { 32 | uint64_t zi_objset; 33 | uint64_t zi_object; 34 | uint64_t zi_start; 35 | uint64_t zi_end; 36 | uint64_t zi_guid; 37 | uint32_t zi_level; 38 | uint32_t zi_error; 39 | uint64_t zi_type; 40 | uint32_t zi_freq; 41 | uint32_t zi_failfast; 42 | char zi_func[MAXNAMELEN]; 43 | uint32_t zi_iotype; 44 | int32_t zi_duration; 45 | uint64_t zi_timer; 46 | uint64_t zi_nlanes; 47 | uint32_t zi_cmd; 48 | uint32_t zi_pad; 49 | } zinject_record_t; 50 | 51 | typedef struct dmu_objset_stats { 52 | uint64_t dds_num_clones; /* number of clones of this */ 53 | uint64_t dds_creation_txg; 54 | uint64_t dds_guid; 55 | dmu_objset_type_t dds_type; 56 | uint8_t dds_is_snapshot; 57 | uint8_t dds_inconsistent; 58 | char dds_origin[ZFS_MAX_DATASET_NAME_LEN]; 59 | } dmu_objset_stats_t; 60 | 61 | typedef struct zfs_cmd { 62 | char zc_name[MAXPATHLEN]; /* name of pool or dataset */ 63 | uint64_t zc_nvlist_src; /* really (char *) */ 64 | uint64_t zc_nvlist_src_size; 65 | uint64_t zc_nvlist_dst; /* really (char *) */ 66 | uint64_t zc_nvlist_dst_size; 67 | boolean_t zc_nvlist_dst_filled; /* put an nvlist in dst? */ 68 | int zc_pad2; 69 | 70 | /* 71 | * The following members are for legacy ioctls which haven't been 72 | * converted to the new method. 73 | */ 74 | uint64_t zc_history; /* really (char *) */ 75 | char zc_value[MAXPATHLEN * 2]; 76 | char zc_string[MAXNAMELEN]; 77 | uint64_t zc_guid; 78 | uint64_t zc_nvlist_conf; /* really (char *) */ 79 | uint64_t zc_nvlist_conf_size; 80 | uint64_t zc_cookie; 81 | uint64_t zc_objset_type; 82 | uint64_t zc_perm_action; 83 | uint64_t zc_history_len; 84 | uint64_t zc_history_offset; 85 | uint64_t zc_obj; 86 | uint64_t zc_iflags; /* internal to zfs(7fs) */ 87 | zfs_share_t zc_share; 88 | dmu_objset_stats_t zc_objset_stats; 89 | zinject_record_t zc_inject_record; 90 | uint32_t zc_defer_destroy; 91 | uint32_t zc_flags; 92 | uint64_t zc_action_handle; 93 | int zc_cleanup_fd; 94 | uint8_t zc_simple; 95 | uint8_t zc_pad[3]; /* alignment */ 96 | uint64_t zc_sendobj; 97 | uint64_t zc_fromobj; 98 | uint64_t zc_createtxg; 99 | zfs_stat_t zc_stat; 100 | } zfs_cmd_t; 101 | 102 | typedef struct dataset_list dataset_list_t; 103 | typedef struct dataset_list* dataset_list_ptr; 104 | 105 | 106 | dataset_list_t *create_dataset_list_item(); 107 | void dataset_list_close(dataset_list_t *list); 108 | void dataset_list_free(dataset_list_t *list); 109 | 110 | dataset_list_t* dataset_list_root(); 111 | dataset_list_t* dataset_list_children(dataset_list_t *dataset); 112 | dataset_list_t *dataset_next(dataset_list_t *dataset); 113 | int dataset_type(dataset_list_ptr dataset); 114 | 115 | dataset_list_ptr dataset_open(const char *path); 116 | int dataset_create(const char *path, zfs_type_t type, nvlist_ptr props); 117 | int dataset_destroy(dataset_list_ptr dataset, boolean_t defer); 118 | zpool_list_ptr dataset_get_pool(dataset_list_ptr dataset); 119 | int dataset_prop_set(dataset_list_ptr dataset, zfs_prop_t prop, const char *value); 120 | int dataset_user_prop_set(dataset_list_ptr dataset, const char *prop, const char *value); 121 | int dataset_clone(dataset_list_ptr dataset, const char *target, nvlist_ptr props); 122 | int dataset_snapshot(const char *path, boolean_t recur, nvlist_ptr props); 123 | int dataset_rollback(dataset_list_ptr dataset, dataset_list_ptr snapshot, boolean_t force); 124 | int dataset_promote(dataset_list_ptr dataset); 125 | int dataset_rename(dataset_list_ptr dataset, const char* new_name, boolean_t recur, boolean_t force_unm); 126 | const char* dataset_is_mounted(dataset_list_ptr dataset); 127 | int dataset_mount(dataset_list_ptr dataset, const char *options, int flags); 128 | int dataset_unmount(dataset_list_ptr dataset, int flags); 129 | int dataset_unmountall(dataset_list_ptr dataset, int flags); 130 | const char *dataset_get_name(dataset_list_ptr ds); 131 | 132 | property_list_t *read_dataset_property(dataset_list_t *dataset, int prop); 133 | property_list_t *read_user_property(dataset_list_t *dataset, const char* prop); 134 | 135 | char** alloc_cstrings(int size); 136 | void strings_setat(char **a, int at, char *v); 137 | 138 | sendflags_t *alloc_sendflags(); 139 | recvflags_t *alloc_recvflags(); 140 | 141 | 142 | struct zfs_cmd *new_zfs_cmd(); 143 | int estimate_send_size(struct zfs_cmd *zc); 144 | 145 | #endif 146 | /* SERVERWARE_ZFS_H */ 147 | -------------------------------------------------------------------------------- /zfs_test.go: -------------------------------------------------------------------------------- 1 | package zfs_test 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | zfs "github.com/bicomsystems/go-libzfs" 8 | ) 9 | 10 | /* ------------------------------------------------------------------------- */ 11 | // HELPERS: 12 | var TSTDatasetPath = TSTPoolName + "/DATASET" 13 | var TSTVolumePath = TSTDatasetPath + "/VOLUME" 14 | var TSTDatasetPathSnap = TSTDatasetPath + "@test" 15 | 16 | func printDatasets(ds []zfs.Dataset) error { 17 | for _, d := range ds { 18 | 19 | path, err := d.Path() 20 | if err != nil { 21 | return err 22 | } 23 | p, err := d.GetProperty(zfs.DatasetPropType) 24 | if err != nil { 25 | return err 26 | } 27 | fmt.Printf(" %30s | %10s\n", path, p.Value) 28 | if len(d.Children) > 0 { 29 | printDatasets(d.Children) 30 | } 31 | } 32 | return nil 33 | } 34 | 35 | /* ------------------------------------------------------------------------- */ 36 | // TESTS: 37 | 38 | func zfsTestDatasetCreate(t *testing.T) { 39 | // reinit names used in case TESTPOOL was in conflict 40 | TSTDatasetPath = TSTPoolName + "/DATASET" 41 | TSTVolumePath = TSTDatasetPath + "/VOLUME" 42 | TSTDatasetPathSnap = TSTDatasetPath + "@test" 43 | 44 | println("TEST DatasetCreate(", TSTDatasetPath, ") (filesystem) ... ") 45 | props := make(map[zfs.Prop]zfs.Property) 46 | d, err := zfs.DatasetCreate(TSTDatasetPath, zfs.DatasetTypeFilesystem, props) 47 | if err != nil { 48 | t.Error(err) 49 | return 50 | } 51 | d.Close() 52 | print("PASS\n\n") 53 | 54 | strSize := "536870912" // 512M 55 | 56 | println("TEST DatasetCreate(", TSTVolumePath, ") (volume) ... ") 57 | props[zfs.DatasetPropVolsize] = zfs.Property{Value: strSize} 58 | // In addition I explicitly choose some more properties to be set. 59 | props[zfs.DatasetPropVolblocksize] = zfs.Property{Value: "4096"} 60 | props[zfs.DatasetPropReservation] = zfs.Property{Value: strSize} 61 | d, err = zfs.DatasetCreate(TSTVolumePath, zfs.DatasetTypeVolume, props) 62 | if err != nil { 63 | t.Error(err) 64 | return 65 | } 66 | d.Close() 67 | print("PASS\n\n") 68 | } 69 | 70 | func zfsTestDatasetOpen(t *testing.T) { 71 | println("TEST DatasetOpen(", TSTDatasetPath, ") ... ") 72 | d, err := zfs.DatasetOpen(TSTDatasetPath) 73 | if err != nil { 74 | t.Error(err) 75 | return 76 | } 77 | defer d.Close() 78 | print("PASS\n\n") 79 | 80 | println("TEST Set/GetUserProperty(prop, value string) ... ") 81 | var p zfs.Property 82 | // Test set/get user property 83 | if err = d.SetUserProperty("go-libzfs:test", "yes"); err != nil { 84 | t.Error(err) 85 | return 86 | } 87 | if p, err = d.GetUserProperty("go-libzfs:test"); err != nil { 88 | t.Error(err) 89 | return 90 | } 91 | println("go-libzfs:test", " = ", 92 | p.Value) 93 | print("PASS\n\n") 94 | } 95 | 96 | func zfsTestDatasetSetProperty(t *testing.T) { 97 | println("TEST Dataset SetProp(", TSTDatasetPath, ") ... ") 98 | d, err := zfs.DatasetOpen(TSTDatasetPath) 99 | if err != nil { 100 | t.Error(err) 101 | return 102 | } 103 | defer d.Close() 104 | if err = d.SetProperty(zfs.DatasetPropOverlay, "on"); err != nil { 105 | t.Error(err) 106 | return 107 | } 108 | if prop, err := d.GetProperty(zfs.DatasetPropOverlay); err != nil { 109 | t.Error(err) 110 | return 111 | } else { 112 | println(prop.Value) 113 | if prop.Value != "on" { 114 | t.Error(fmt.Errorf("Update of dataset property failed")) 115 | return 116 | } 117 | } 118 | print("PASS\n\n") 119 | return 120 | } 121 | 122 | func zfsTestDatasetOpenAll(t *testing.T) { 123 | println("TEST DatasetOpenAll()/DatasetCloseAll() ... ") 124 | ds, err := zfs.DatasetOpenAll() 125 | if err != nil { 126 | t.Error(err) 127 | return 128 | } 129 | if err = printDatasets(ds); err != nil { 130 | zfs.DatasetCloseAll(ds) 131 | t.Error(err) 132 | return 133 | } 134 | zfs.DatasetCloseAll(ds) 135 | print("PASS\n\n") 136 | } 137 | 138 | func zfsTestDatasetSnapshot(t *testing.T) { 139 | println("TEST DatasetSnapshot(", TSTDatasetPath, ", true, ...) ... ") 140 | props := make(map[zfs.Prop]zfs.Property) 141 | d, err := zfs.DatasetSnapshot(TSTDatasetPathSnap, true, props) 142 | if err != nil { 143 | t.Error(err) 144 | return 145 | } 146 | defer d.Close() 147 | print("PASS\n\n") 148 | } 149 | 150 | func zfsTestDatasetHoldRelease(t *testing.T) { 151 | println("TEST Hold/Release(", TSTDatasetPathSnap, ", true, ...) ... ") 152 | d, err := zfs.DatasetOpen(TSTDatasetPathSnap) 153 | if err != nil { 154 | t.Error(err) 155 | return 156 | } 157 | defer d.Close() 158 | err = d.Hold("keep") 159 | if err != nil { 160 | t.Error(err) 161 | return 162 | } 163 | 164 | var tags []zfs.HoldTag 165 | tags, err = d.Holds() 166 | if err != nil { 167 | t.Error(err) 168 | return 169 | } 170 | for _, tag := range tags { 171 | println("tag:", tag.Name, "timestamp:", tag.Timestamp.String()) 172 | } 173 | 174 | err = d.Release("keep") 175 | if err != nil { 176 | t.Error(err) 177 | return 178 | } 179 | 180 | tags, err = d.Holds() 181 | if err != nil { 182 | t.Error(err) 183 | return 184 | } 185 | for _, tag := range tags { 186 | println("* tag:", tag.Name, "timestamp:", tag.Timestamp.String()) 187 | } 188 | print("PASS\n\n") 189 | } 190 | 191 | func zfsTestSendSize(t *testing.T) { 192 | var size int64 193 | println("TEST SendSize(", TSTDatasetPathSnap, ") ... ") 194 | d, err := zfs.DatasetOpen(TSTDatasetPathSnap) 195 | if err != nil { 196 | t.Error(err) 197 | return 198 | } 199 | defer d.Close() 200 | if size, err = d.SendSize("", zfs.SendFlags{Compress: true}); err != nil { 201 | t.Error(err) 202 | return 203 | } 204 | if size <= 0 { 205 | t.Error(fmt.Errorf("Failed to fetch size. size = %d", size)) 206 | return 207 | } 208 | print("PASS\n\n") 209 | } 210 | 211 | func zfsTestResumeTokenUnpack(t *testing.T) { 212 | var resToken zfs.ResumeToken 213 | println("TEST ResumeTokenUnpack ... ") 214 | err := resToken.Unpack("1-2111998041-170-789c636064000310a501c49c50360710a715e5e7a69766a6304001bfd66a579708b10d0a40363b92bafca4acd4e412081f0430e4d3d28a5381f20c0f94f218a1f26c48f2499525a9c540fac6d3fd6cd8f497e4435cb1f891ebba68d955ce3390e439c1f27989b9a90c0c7eae21c121fe41fa29f9b9899979bae646a98966c9166686c9496926262969894606a64966e629a686466946268689fa65f939bac9c996e68646a906e606962926a949e696c989a6a989468926a99666264646e60ec995c939a9ba86c6c98986494666692649294926e6a916c9498929c646c64916408d40d3e1fee666408467727e6e41516a71717e36031c0000cb4c43f4") 215 | if err != nil { 216 | t.Error(err) 217 | return 218 | } 219 | println("ResumeToken:", fmt.Sprintf("%v", resToken)) 220 | return 221 | } 222 | 223 | func zfsTestDatasetDestroy(t *testing.T) { 224 | println("TEST DATASET Destroy( ", TSTDatasetPath, " ) ... ") 225 | d, err := zfs.DatasetOpen(TSTDatasetPath) 226 | if err != nil { 227 | t.Error(err) 228 | return 229 | } 230 | defer d.Close() 231 | if err = d.DestroyRecursive(); err != nil { 232 | t.Error(err) 233 | return 234 | } 235 | print("PASS\n\n") 236 | } 237 | 238 | func zfsTestMountPointConcurrency(t *testing.T) { 239 | println("TEST DATASET MountPointConcurrency( ", TSTDatasetPath, " ) ... ") 240 | d, err := zfs.DatasetOpen(TSTDatasetPath) 241 | if err != nil { 242 | t.Error(err) 243 | return 244 | } 245 | defer d.Close() 246 | gr1 := make(chan bool) 247 | gr2 := make(chan bool) 248 | go func() { 249 | for i := 0; i < 100; i++ { 250 | println("reload properties:", i) 251 | // d.SetProperty(zfs.DatasetPropMountpoint, "/TEST") 252 | d.ReloadProperties() 253 | } 254 | gr1 <- true 255 | }() 256 | go func() { 257 | for i := 0; i < 100; i++ { 258 | println("set mountpoint:", i) 259 | d.ReloadProperties() 260 | // d.SetProperty(zfs.DatasetPropMountpoint, "/TEST") 261 | // d.GetProperty(zfs.DatasetPropMountpoint) 262 | } 263 | gr2 <- true 264 | }() 265 | 266 | d.SetProperty(zfs.DatasetPropMountpoint, "none") 267 | 268 | <-gr1 269 | <-gr2 270 | } 271 | 272 | /* ------------------------------------------------------------------------- */ 273 | // EXAMPLES: 274 | 275 | // Example of creating ZFS volume 276 | func ExampleDatasetCreate() { 277 | // Create map to represent ZFS dataset properties. This is equivalent to 278 | // list of properties you can get from ZFS CLI tool, and some more 279 | // internally used by libzfs. 280 | props := make(map[zfs.Prop]zfs.Property) 281 | 282 | // I choose to create (block) volume 1GiB in size. Size is just ZFS dataset 283 | // property and this is done as map of strings. So, You have to either 284 | // specify size as base 10 number in string, or use strconv package or 285 | // similar to convert in to string (base 10) from numeric type. 286 | strSize := "1073741824" 287 | 288 | props[zfs.DatasetPropVolsize] = zfs.Property{Value: strSize} 289 | // In addition I explicitly choose some more properties to be set. 290 | props[zfs.DatasetPropVolblocksize] = zfs.Property{Value: "4096"} 291 | props[zfs.DatasetPropReservation] = zfs.Property{Value: strSize} 292 | 293 | // Lets create desired volume 294 | d, err := zfs.DatasetCreate("TESTPOOL/VOLUME1", zfs.DatasetTypeVolume, props) 295 | if err != nil { 296 | println(err.Error()) 297 | return 298 | } 299 | // Dataset have to be closed for memory cleanup 300 | defer d.Close() 301 | 302 | println("Created zfs volume TESTPOOL/VOLUME1") 303 | } 304 | 305 | func ExampleDatasetOpen() { 306 | // Open dataset and read its available space 307 | d, err := zfs.DatasetOpen("TESTPOOL/DATASET1") 308 | if err != nil { 309 | panic(err.Error()) 310 | } 311 | defer d.Close() 312 | var p zfs.Property 313 | if p, err = d.GetProperty(zfs.DatasetPropAvailable); err != nil { 314 | panic(err.Error()) 315 | } 316 | println(zfs.DatasetPropertyToName(zfs.DatasetPropAvailable), " = ", 317 | p.Value) 318 | } 319 | 320 | func ExampleDatasetOpenAll() { 321 | datasets, err := zfs.DatasetOpenAll() 322 | if err != nil { 323 | panic(err.Error()) 324 | } 325 | defer zfs.DatasetCloseAll(datasets) 326 | 327 | // Print out path and type of root datasets 328 | for _, d := range datasets { 329 | path, err := d.Path() 330 | if err != nil { 331 | panic(err.Error()) 332 | } 333 | p, err := d.GetProperty(zfs.DatasetPropType) 334 | if err != nil { 335 | panic(err.Error()) 336 | } 337 | fmt.Printf("%30s | %10s\n", path, p.Value) 338 | } 339 | 340 | } 341 | 342 | func CopyAndDestroy(d *zfs.Dataset) (err error) { 343 | if err = d.Destroy(false); err != nil { 344 | return 345 | } 346 | d.Close() 347 | return 348 | } 349 | 350 | func zfsTestDoubleFreeOnDestroy(t *testing.T) { 351 | TSTDestroyPath := TSTPoolName + "/DESTROY" 352 | println("TEST Doble Free On Destroy( ", TSTVolumePath, " ) ... ") 353 | props := make(map[zfs.Prop]zfs.Property) 354 | d, err := zfs.DatasetCreate(TSTDestroyPath, zfs.DatasetTypeFilesystem, props) 355 | if err != nil { 356 | t.Error(err) 357 | return 358 | } 359 | d.Close() 360 | 361 | d, err = zfs.DatasetOpen(TSTDestroyPath) 362 | if err != nil { 363 | t.Error(err) 364 | return 365 | } 366 | defer d.Close() 367 | if err = CopyAndDestroy(&d); err != nil { 368 | t.Error(err) 369 | return 370 | } 371 | print("PASS\n\n") 372 | } 373 | -------------------------------------------------------------------------------- /zpool.c: -------------------------------------------------------------------------------- 1 | /* C wrappers around some zfs calls and C in general that should simplify 2 | * using libzfs from go language, and make go code shorter and more readable. 3 | */ 4 | 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | #include 11 | #include 12 | #include 13 | 14 | #include "common.h" 15 | #include "zpool.h" 16 | 17 | char *sZPOOL_CONFIG_VERSION = ZPOOL_CONFIG_VERSION; 18 | char *sZPOOL_CONFIG_POOL_NAME = ZPOOL_CONFIG_POOL_NAME; 19 | char *sZPOOL_CONFIG_POOL_STATE = ZPOOL_CONFIG_POOL_STATE; 20 | char *sZPOOL_CONFIG_POOL_TXG = ZPOOL_CONFIG_POOL_TXG; 21 | char *sZPOOL_CONFIG_POOL_GUID = ZPOOL_CONFIG_POOL_GUID; 22 | char *sZPOOL_CONFIG_CREATE_TXG = ZPOOL_CONFIG_CREATE_TXG; 23 | char *sZPOOL_CONFIG_TOP_GUID = ZPOOL_CONFIG_TOP_GUID; 24 | char *sZPOOL_CONFIG_VDEV_TREE = ZPOOL_CONFIG_VDEV_TREE; 25 | char *sZPOOL_CONFIG_TYPE = ZPOOL_CONFIG_TYPE; 26 | char *sZPOOL_CONFIG_CHILDREN = ZPOOL_CONFIG_CHILDREN; 27 | char *sZPOOL_CONFIG_ID = ZPOOL_CONFIG_ID; 28 | char *sZPOOL_CONFIG_GUID = ZPOOL_CONFIG_GUID; 29 | char *sZPOOL_CONFIG_PATH = ZPOOL_CONFIG_PATH; 30 | char *sZPOOL_CONFIG_DEVID = ZPOOL_CONFIG_DEVID; 31 | char *sZPOOL_CONFIG_METASLAB_ARRAY = ZPOOL_CONFIG_METASLAB_ARRAY; 32 | char *sZPOOL_CONFIG_METASLAB_SHIFT = ZPOOL_CONFIG_METASLAB_SHIFT; 33 | char *sZPOOL_CONFIG_ASHIFT = ZPOOL_CONFIG_ASHIFT; 34 | char *sZPOOL_CONFIG_ASIZE = ZPOOL_CONFIG_ASIZE; 35 | char *sZPOOL_CONFIG_DTL = ZPOOL_CONFIG_DTL; 36 | char *sZPOOL_CONFIG_SCAN_STATS = ZPOOL_CONFIG_SCAN_STATS; 37 | char *sZPOOL_CONFIG_VDEV_STATS = ZPOOL_CONFIG_VDEV_STATS; 38 | char *sZPOOL_CONFIG_WHOLE_DISK = ZPOOL_CONFIG_WHOLE_DISK; 39 | char *sZPOOL_CONFIG_ERRCOUNT = ZPOOL_CONFIG_ERRCOUNT; 40 | char *sZPOOL_CONFIG_NOT_PRESENT = ZPOOL_CONFIG_NOT_PRESENT; 41 | char *sZPOOL_CONFIG_SPARES = ZPOOL_CONFIG_SPARES; 42 | char *sZPOOL_CONFIG_IS_SPARE = ZPOOL_CONFIG_IS_SPARE; 43 | char *sZPOOL_CONFIG_NPARITY = ZPOOL_CONFIG_NPARITY; 44 | char *sZPOOL_CONFIG_HOSTID = ZPOOL_CONFIG_HOSTID; 45 | char *sZPOOL_CONFIG_HOSTNAME = ZPOOL_CONFIG_HOSTNAME; 46 | char *sZPOOL_CONFIG_LOADED_TIME = ZPOOL_CONFIG_LOADED_TIME; 47 | char *sZPOOL_CONFIG_UNSPARE = ZPOOL_CONFIG_UNSPARE; 48 | char *sZPOOL_CONFIG_PHYS_PATH = ZPOOL_CONFIG_PHYS_PATH; 49 | char *sZPOOL_CONFIG_IS_LOG = ZPOOL_CONFIG_IS_LOG; 50 | char *sZPOOL_CONFIG_L2CACHE = ZPOOL_CONFIG_L2CACHE; 51 | char *sZPOOL_CONFIG_HOLE_ARRAY = ZPOOL_CONFIG_HOLE_ARRAY; 52 | char *sZPOOL_CONFIG_VDEV_CHILDREN = ZPOOL_CONFIG_VDEV_CHILDREN; 53 | char *sZPOOL_CONFIG_IS_HOLE = ZPOOL_CONFIG_IS_HOLE; 54 | char *sZPOOL_CONFIG_DDT_HISTOGRAM = ZPOOL_CONFIG_DDT_HISTOGRAM; 55 | char *sZPOOL_CONFIG_DDT_OBJ_STATS = ZPOOL_CONFIG_DDT_OBJ_STATS; 56 | char *sZPOOL_CONFIG_DDT_STATS = ZPOOL_CONFIG_DDT_STATS; 57 | char *sZPOOL_CONFIG_SPLIT = ZPOOL_CONFIG_SPLIT; 58 | char *sZPOOL_CONFIG_ORIG_GUID = ZPOOL_CONFIG_ORIG_GUID; 59 | char *sZPOOL_CONFIG_SPLIT_GUID = ZPOOL_CONFIG_SPLIT_GUID; 60 | char *sZPOOL_CONFIG_SPLIT_LIST = ZPOOL_CONFIG_SPLIT_LIST; 61 | char *sZPOOL_CONFIG_REMOVING = ZPOOL_CONFIG_REMOVING; 62 | char *sZPOOL_CONFIG_RESILVER_TXG = ZPOOL_CONFIG_RESILVER_TXG; 63 | char *sZPOOL_CONFIG_COMMENT = ZPOOL_CONFIG_COMMENT; 64 | char *sZPOOL_CONFIG_SUSPENDED = ZPOOL_CONFIG_SUSPENDED; 65 | char *sZPOOL_CONFIG_TIMESTAMP = ZPOOL_CONFIG_TIMESTAMP; 66 | char *sZPOOL_CONFIG_BOOTFS = ZPOOL_CONFIG_BOOTFS; 67 | char *sZPOOL_CONFIG_MISSING_DEVICES = ZPOOL_CONFIG_MISSING_DEVICES; 68 | char *sZPOOL_CONFIG_LOAD_INFO = ZPOOL_CONFIG_LOAD_INFO; 69 | char *sZPOOL_CONFIG_REWIND_INFO = ZPOOL_CONFIG_REWIND_INFO; 70 | char *sZPOOL_CONFIG_UNSUP_FEAT = ZPOOL_CONFIG_UNSUP_FEAT; 71 | char *sZPOOL_CONFIG_ENABLED_FEAT = ZPOOL_CONFIG_ENABLED_FEAT; 72 | char *sZPOOL_CONFIG_CAN_RDONLY = ZPOOL_CONFIG_CAN_RDONLY; 73 | char *sZPOOL_CONFIG_FEATURES_FOR_READ = ZPOOL_CONFIG_FEATURES_FOR_READ; 74 | char *sZPOOL_CONFIG_FEATURE_STATS = ZPOOL_CONFIG_FEATURE_STATS; 75 | char *sZPOOL_CONFIG_ERRATA = ZPOOL_CONFIG_ERRATA; 76 | char *sZPOOL_CONFIG_OFFLINE = ZPOOL_CONFIG_OFFLINE; 77 | char *sZPOOL_CONFIG_FAULTED = ZPOOL_CONFIG_FAULTED; 78 | char *sZPOOL_CONFIG_DEGRADED = ZPOOL_CONFIG_DEGRADED; 79 | char *sZPOOL_CONFIG_REMOVED = ZPOOL_CONFIG_REMOVED; 80 | char *sZPOOL_CONFIG_FRU = ZPOOL_CONFIG_FRU; 81 | char *sZPOOL_CONFIG_AUX_STATE = ZPOOL_CONFIG_AUX_STATE; 82 | char *sZPOOL_LOAD_POLICY = ZPOOL_LOAD_POLICY; 83 | char *sZPOOL_LOAD_REWIND_POLICY = ZPOOL_LOAD_REWIND_POLICY; 84 | char *sZPOOL_LOAD_REQUEST_TXG = ZPOOL_LOAD_REQUEST_TXG; 85 | char *sZPOOL_LOAD_META_THRESH = ZPOOL_LOAD_META_THRESH; 86 | char *sZPOOL_LOAD_DATA_THRESH = ZPOOL_LOAD_DATA_THRESH; 87 | char *sZPOOL_CONFIG_LOAD_TIME = ZPOOL_CONFIG_LOAD_TIME; 88 | char *sZPOOL_CONFIG_LOAD_DATA_ERRORS = ZPOOL_CONFIG_LOAD_DATA_ERRORS; 89 | char *sZPOOL_CONFIG_REWIND_TIME = ZPOOL_CONFIG_REWIND_TIME; 90 | 91 | static char _lasterr_[1024]; 92 | 93 | const char *lasterr(void) { 94 | return _lasterr_; 95 | } 96 | 97 | zpool_list_t *create_zpool_list_item() { 98 | zpool_list_t *zlist = malloc(sizeof(zpool_list_t)); 99 | memset(zlist, 0, sizeof(zpool_list_t)); 100 | return zlist; 101 | } 102 | 103 | int zpool_list_callb(zpool_handle_t *pool, void *data) { 104 | zpool_list_t **lroot = (zpool_list_t**)data; 105 | zpool_list_t *nroot = create_zpool_list_item(); 106 | 107 | if ( !((*lroot)->zph) ) { 108 | (*lroot)->zph = pool; 109 | } else { 110 | nroot->zph = pool; 111 | nroot->pnext = (void*)*lroot; 112 | *lroot = nroot; 113 | } 114 | return 0; 115 | } 116 | 117 | zpool_list_ptr zpool_list_openall() { 118 | int err = 0; 119 | zpool_list_t *zlist = create_zpool_list_item(); 120 | err = zpool_iter(libzfsHandle, zpool_list_callb, &zlist); 121 | if ( err != 0 || zlist->zph == NULL ) { 122 | zpool_list_free(zlist); 123 | zlist = NULL; 124 | } 125 | return zlist; 126 | } 127 | 128 | zpool_list_t* zpool_list_open(const char *name) { 129 | zpool_list_t *zlist = create_zpool_list_item(); 130 | zlist->zph = zpool_open(libzfsHandle, name); 131 | if ( zlist->zph ) { 132 | return zlist; 133 | } else { 134 | zpool_list_free(zlist); 135 | } 136 | return 0; 137 | } 138 | 139 | zpool_list_t *zpool_next(zpool_list_t *pool) { 140 | return pool->pnext; 141 | } 142 | 143 | void zpool_list_free(zpool_list_t *list) { 144 | zpool_list_ptr next; 145 | while(list) { 146 | next = list->pnext; 147 | free(list); 148 | list = next; 149 | } 150 | } 151 | 152 | void zpool_list_close(zpool_list_t *pool) { 153 | zpool_close(pool->zph); 154 | zpool_list_free(pool); 155 | } 156 | 157 | property_list_t *next_property(property_list_t *list) { 158 | if (list != 0) { 159 | return list->pnext; 160 | } 161 | return list; 162 | } 163 | 164 | 165 | void zprop_source_tostr(char *dst, zprop_source_t source) { 166 | switch (source) { 167 | case ZPROP_SRC_NONE: 168 | strcpy(dst, "none"); 169 | break; 170 | case ZPROP_SRC_TEMPORARY: 171 | strcpy(dst, "temporary"); 172 | break; 173 | case ZPROP_SRC_LOCAL: 174 | strcpy(dst, "local"); 175 | break; 176 | case ZPROP_SRC_INHERITED: 177 | strcpy(dst, "inherited"); 178 | break; 179 | case ZPROP_SRC_RECEIVED: 180 | strcpy(dst, "received"); 181 | break; 182 | default: 183 | strcpy(dst, "default"); 184 | break; 185 | } 186 | } 187 | 188 | 189 | property_list_ptr read_zpool_property(zpool_list_ptr pool, int prop) { 190 | 191 | int r = 0; 192 | zprop_source_t source; 193 | property_list_ptr list = new_property_list(); 194 | 195 | r = zpool_get_prop(pool->zph, prop, 196 | list->value, INT_MAX_VALUE, &source, B_TRUE); 197 | if (r == 0) { 198 | // strcpy(list->name, zpool_prop_to_name(prop)); 199 | zprop_source_tostr(list->source, source); 200 | } else { 201 | free_properties(list); 202 | return NULL; 203 | } 204 | list->property = (int)prop; 205 | return list; 206 | } 207 | 208 | property_list_ptr read_append_zpool_property(zpool_list_ptr pool, property_list_ptr proot, zpool_prop_t prop) { 209 | int r = 0; 210 | property_list_t *newitem = NULL; 211 | 212 | newitem = read_zpool_property(pool, prop); 213 | if (newitem == NULL) { 214 | return proot; 215 | } 216 | // printf("p: %s %s %s\n", newitem->name, newitem->value, newitem->source); 217 | newitem->pnext = proot; 218 | proot = newitem; 219 | 220 | return proot; 221 | } 222 | 223 | property_list_t *read_zpool_properties(zpool_list_ptr pool) { 224 | // read pool name as first property 225 | property_list_t *root = NULL, *list = NULL; 226 | 227 | root = read_append_zpool_property(pool, root, ZPOOL_PROP_NAME); 228 | root = read_append_zpool_property(pool, root, ZPOOL_PROP_SIZE); 229 | root = read_append_zpool_property(pool, root, ZPOOL_PROP_CAPACITY); 230 | root = read_append_zpool_property(pool, root, ZPOOL_PROP_ALTROOT); 231 | root = read_append_zpool_property(pool, root, ZPOOL_PROP_HEALTH); 232 | root = read_append_zpool_property(pool, root, ZPOOL_PROP_GUID); 233 | root = read_append_zpool_property(pool, root, ZPOOL_PROP_VERSION); 234 | root = read_append_zpool_property(pool, root, ZPOOL_PROP_BOOTFS); 235 | root = read_append_zpool_property(pool, root, ZPOOL_PROP_DELEGATION); 236 | root = read_append_zpool_property(pool, root, ZPOOL_PROP_AUTOREPLACE); 237 | root = read_append_zpool_property(pool, root, ZPOOL_PROP_CACHEFILE); 238 | root = read_append_zpool_property(pool, root, ZPOOL_PROP_FAILUREMODE); 239 | root = read_append_zpool_property(pool, root, ZPOOL_PROP_LISTSNAPS); 240 | root = read_append_zpool_property(pool, root, ZPOOL_PROP_AUTOEXPAND); 241 | root = read_append_zpool_property(pool, root, ZPOOL_PROP_DEDUPDITTO); 242 | root = read_append_zpool_property(pool, root, ZPOOL_PROP_DEDUPRATIO); 243 | root = read_append_zpool_property(pool, root, ZPOOL_PROP_FREE); 244 | root = read_append_zpool_property(pool, root, ZPOOL_PROP_ALLOCATED); 245 | root = read_append_zpool_property(pool, root, ZPOOL_PROP_READONLY); 246 | root = read_append_zpool_property(pool, root, ZPOOL_PROP_ASHIFT); 247 | root = read_append_zpool_property(pool, root, ZPOOL_PROP_COMMENT); 248 | root = read_append_zpool_property(pool, root, ZPOOL_PROP_EXPANDSZ); 249 | root = read_append_zpool_property(pool, root, ZPOOL_PROP_FREEING); 250 | 251 | list = new_property_list(); 252 | list->property = ZPOOL_NUM_PROPS; 253 | sprintf(list->value, "%d", ZPOOL_NUM_PROPS); 254 | list->pnext = root; 255 | zprop_source_tostr(list->source, ZPROP_SRC_NONE); 256 | root = list; 257 | 258 | // printf("Finished properties reading.\n"); 259 | return root; 260 | } 261 | 262 | pool_state_t zpool_read_state(zpool_handle_t *zh) { 263 | return zpool_get_state(zh); 264 | } 265 | 266 | 267 | const char *gettext(const char *txt) { 268 | return txt; 269 | } 270 | /* 271 | * Add a property pair (name, string-value) into a property nvlist. 272 | */ 273 | // int 274 | // add_prop_list(const char *propname, char *propval, nvlist_t **props, 275 | // boolean_t poolprop) { 276 | // zpool_prop_t prop = ZPROP_INVAL; 277 | // zfs_prop_t fprop; 278 | // nvlist_t *proplist; 279 | // const char *normnm; 280 | // char *strval; 281 | 282 | // if (*props == NULL && 283 | // nvlist_alloc(props, NV_UNIQUE_NAME, 0) != 0) { 284 | // (void) snprintf(_lasterr_, 1024, "internal error: out of memory"); 285 | // return (1); 286 | // } 287 | 288 | // proplist = *props; 289 | 290 | // if (poolprop) { 291 | // const char *vname = zpool_prop_to_name(ZPOOL_PROP_VERSION); 292 | 293 | // if ((prop = zpool_name_to_prop(propname)) == ZPROP_INVAL && 294 | // !zpool_prop_feature(propname)) { 295 | // (void) snprintf(_lasterr_, 1024, "property '%s' is " 296 | // "not a valid pool property", propname); 297 | // return (2); 298 | // } 299 | 300 | // /* 301 | // * feature@ properties and version should not be specified 302 | // * at the same time. 303 | // */ 304 | // // if ((prop == ZPROP_INVAL && zpool_prop_feature(propname) && 305 | // // nvlist_exists(proplist, vname)) || 306 | // // (prop == ZPOOL_PROP_VERSION && 307 | // // prop_list_contains_feature(proplist))) { 308 | // // (void) fprintf(stderr, gettext("'feature@' and " 309 | // // "'version' properties cannot be specified " 310 | // // "together\n")); 311 | // // return (2); 312 | // // } 313 | 314 | 315 | // if (zpool_prop_feature(propname)) 316 | // normnm = propname; 317 | // else 318 | // normnm = zpool_prop_to_name(prop); 319 | // } else { 320 | // if ((fprop = zfs_name_to_prop(propname)) != ZPROP_INVAL) { 321 | // normnm = zfs_prop_to_name(fprop); 322 | // } else { 323 | // normnm = propname; 324 | // } 325 | // } 326 | 327 | // if (nvlist_lookup_string(proplist, normnm, &strval) == 0 && 328 | // prop != ZPOOL_PROP_CACHEFILE) { 329 | // (void) snprintf(_lasterr_, 1024, "property '%s' " 330 | // "specified multiple times", propname); 331 | // return (2); 332 | // } 333 | 334 | // if (nvlist_add_string(proplist, normnm, propval) != 0) { 335 | // (void) snprintf(_lasterr_, 1024, "internal " 336 | // "error: out of memory\n"); 337 | // return (1); 338 | // } 339 | 340 | // return (0); 341 | // } 342 | 343 | nvlist_t** nvlist_alloc_array(int count) { 344 | return malloc(count*sizeof(nvlist_t*)); 345 | } 346 | 347 | void nvlist_array_set(nvlist_t** a, int i, nvlist_t *item) { 348 | a[i] = item; 349 | } 350 | 351 | void nvlist_free_array(nvlist_t **a) { 352 | free(a); 353 | } 354 | 355 | nvlist_t *nvlist_array_at(nvlist_t **a, uint_t i) { 356 | return a[i]; 357 | } 358 | 359 | int refresh_stats(zpool_list_t *pool) 360 | { 361 | boolean_t missing; 362 | int err = zpool_refresh_stats(pool->zph, &missing); 363 | if ( err != 0 ) { 364 | return err; 365 | } 366 | if ( missing == B_TRUE ) { 367 | return -1; 368 | } 369 | return 0; 370 | } 371 | 372 | const char *get_vdev_type(nvlist_ptr nv) { 373 | char *value = NULL; 374 | int r = nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &value); 375 | if(r != 0) { 376 | return NULL; 377 | } 378 | return value; 379 | } 380 | 381 | uint64_t get_vdev_guid(nvlist_ptr nv) { 382 | uint64_t value = 0; 383 | nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value); 384 | return value; 385 | } 386 | 387 | const vdev_stat_ptr get_vdev_stats(nvlist_ptr nv) { 388 | vdev_stat_ptr vs = NULL; 389 | uint_t count; 390 | int r = nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS, (uint64_t**)&vs, &count); 391 | if(r != 0) { 392 | return NULL; 393 | } 394 | return vs; 395 | } 396 | 397 | pool_scan_stat_ptr get_vdev_scan_stats(nvlist_t *nv) { 398 | pool_scan_stat_ptr vds = NULL; 399 | uint_t c; 400 | int r = nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_SCAN_STATS, (uint64_t**)&vds, &c); 401 | if(r != 0) { 402 | return NULL; 403 | } 404 | return vds; 405 | } 406 | 407 | vdev_children_ptr get_vdev_children(nvlist_t *nv) { 408 | int r; 409 | vdev_children_ptr children = malloc(sizeof(vdev_children_t)); 410 | memset(children, 0, sizeof(vdev_children_t)); 411 | r = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &(children->first), &(children->count)); 412 | if (r != 0) { 413 | free(children); 414 | return NULL; 415 | } 416 | return children; 417 | } 418 | 419 | vdev_children_ptr get_vdev_spares(nvlist_t *nv) { 420 | int r; 421 | vdev_children_ptr children = malloc(sizeof(vdev_children_t)); 422 | memset(children, 0, sizeof(vdev_children_t)); 423 | r = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, &(children->first), &(children->count)); 424 | if (r != 0) { 425 | free(children); 426 | return NULL; 427 | } 428 | return children; 429 | } 430 | 431 | vdev_children_ptr get_vdev_l2cache(nvlist_t *nv) { 432 | int r; 433 | vdev_children_ptr children = malloc(sizeof(vdev_children_t)); 434 | memset(children, 0, sizeof(vdev_children_t)); 435 | r = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE, &(children->first), &(children->count)); 436 | if (r != 0) { 437 | free(children); 438 | return NULL; 439 | } 440 | return children; 441 | } 442 | 443 | const char *get_vdev_path(nvlist_ptr nv) { 444 | char *path = NULL; 445 | uint64_t notpresent = 0; 446 | int r = nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, ¬present); 447 | if (r == 0 || notpresent != 0) { 448 | if ( 0 != nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) ) { 449 | return NULL; 450 | } 451 | } 452 | return path; 453 | } 454 | 455 | uint64_t get_vdev_is_log(nvlist_ptr nv) { 456 | uint64_t islog = B_FALSE; 457 | nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog); 458 | return islog; 459 | } 460 | 461 | 462 | // return 463 | uint64_t get_zpool_state(nvlist_ptr nv) { 464 | uint64_t state = 0; 465 | nvlist_lookup_uint64(nv, ZPOOL_CONFIG_POOL_STATE, &state); 466 | return state; 467 | } 468 | 469 | uint64_t get_zpool_guid(nvlist_ptr nv) { 470 | uint64_t guid = 0; 471 | nvlist_lookup_uint64(nv, ZPOOL_CONFIG_POOL_GUID, &guid); 472 | return guid; 473 | } 474 | 475 | const char *get_zpool_name(nvlist_ptr nv) { 476 | char *name = NULL; 477 | if (0 != nvlist_lookup_string(nv, ZPOOL_CONFIG_POOL_NAME, &name)) { 478 | return NULL; 479 | } 480 | return name; 481 | } 482 | 483 | const char *get_zpool_comment(nvlist_ptr nv) { 484 | char *comment = NULL; 485 | if (0 != nvlist_lookup_string(nv, ZPOOL_CONFIG_COMMENT, &comment)) { 486 | return NULL; 487 | } 488 | return comment; 489 | } 490 | 491 | nvlist_ptr get_zpool_vdev_tree(nvlist_ptr nv) { 492 | nvlist_ptr vdev_tree = NULL; 493 | if ( 0 != nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_VDEV_TREE, &vdev_tree) ) { 494 | return NULL; 495 | } 496 | return vdev_tree; 497 | } 498 | 499 | 500 | nvlist_ptr go_zpool_search_import(libzfs_handle_ptr zfsh, int paths, char **path, boolean_t do_scan) { 501 | importargs_t idata; 502 | memset(&idata, 0, sizeof(importargs_t)); 503 | nvlist_ptr pools = NULL; 504 | idata.path = path; 505 | idata.paths = paths; 506 | // idata.scan = 0; 507 | 508 | tpool_t *t; 509 | t = tpool_create(1, 5 * sysconf(_SC_NPROCESSORS_ONLN), 0, NULL); 510 | if (t == NULL) 511 | return NULL; 512 | 513 | pools = zpool_search_import(zfsh, &idata, &libzfs_config_ops); 514 | 515 | tpool_wait(t); 516 | tpool_destroy(t); 517 | return pools; 518 | } 519 | 520 | 521 | int do_zpool_clear(zpool_list_t *pool, const char *device, u_int32_t load_policy) { 522 | nvlist_t *policy = NULL; 523 | int ret = 0; 524 | if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 || 525 | nvlist_add_uint32(policy, ZPOOL_LOAD_POLICY, load_policy) != 0) 526 | return (1); 527 | 528 | if (zpool_clear(pool->zph, device, policy) != 0) 529 | ret = 1; 530 | 531 | nvlist_free(policy); 532 | 533 | return (ret); 534 | } 535 | 536 | void collect_zpool_leaves(zpool_handle_t *zhp, nvlist_t *nvroot, nvlist_t *nv){ 537 | uint_t children = 0; 538 | nvlist_t **child; 539 | uint_t i; 540 | 541 | (void) nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, 542 | &child, &children); 543 | 544 | if (children == 0) { 545 | char *path = zpool_vdev_name(libzfsHandle, zhp, nvroot, 546 | VDEV_NAME_PATH); 547 | 548 | if (strcmp(path, VDEV_TYPE_INDIRECT) != 0) 549 | fnvlist_add_boolean(nv, path); 550 | 551 | free(path); 552 | return; 553 | } 554 | 555 | for (i = 0; i < children; i++) { 556 | collect_zpool_leaves(zhp, child[i], nv); 557 | } 558 | } -------------------------------------------------------------------------------- /zpool.go: -------------------------------------------------------------------------------- 1 | package zfs 2 | 3 | // #cgo CFLAGS: -D__USE_LARGEFILE64=1 4 | // #include 5 | // #include 6 | // #include "common.h" 7 | // #include "zpool.h" 8 | // #include "zfs.h" 9 | import "C" 10 | 11 | import ( 12 | "errors" 13 | "fmt" 14 | "strconv" 15 | "time" 16 | "unsafe" 17 | ) 18 | 19 | const ( 20 | msgPoolIsNil = "Pool handle not initialized or its closed" 21 | ) 22 | 23 | // Enable or disable pool feature with this constants 24 | const ( 25 | FENABLED = "enabled" 26 | FDISABLED = "disabled" 27 | ) 28 | 29 | // PoolProperties type is map of pool properties name -> value 30 | type PoolProperties map[Prop]string 31 | 32 | /* 33 | * ZIO types. Needed to interpret vdev statistics below. 34 | */ 35 | const ( 36 | ZIOTypeNull = iota 37 | ZIOTypeRead 38 | ZIOTypeWrite 39 | ZIOTypeFree 40 | ZIOTypeClaim 41 | ZIOTypeIOCtl 42 | ZIOTypes 43 | ) 44 | 45 | // Scan states 46 | const ( 47 | DSSNone = iota // No scan 48 | DSSScanning // Scanning 49 | DSSFinished // Scan finished 50 | DSSCanceled // Scan canceled 51 | DSSNumStates // Total number of scan states 52 | ) 53 | 54 | // Scan functions 55 | const ( 56 | PoolScanNone = iota // No scan function 57 | PoolScanScrub // Pools is checked against errors 58 | PoolScanResilver // Pool is resilvering 59 | PoolScanFuncs // Number of scan functions 60 | ) 61 | 62 | // PoolInitializeAction type representing pool initialize action 63 | type PoolInitializeAction int 64 | 65 | // Initialize actions 66 | const ( 67 | PoolInitializeStart PoolInitializeAction = iota // start initialization 68 | PoolInitializeCancel // cancel initialization 69 | PoolInitializeSuspend // suspend initialization 70 | ) 71 | 72 | // VDevStat - Vdev statistics. Note: all fields should be 64-bit because this 73 | // is passed between kernel and userland as an nvlist uint64 array. 74 | type VDevStat struct { 75 | Timestamp time.Duration /* time since vdev load (nanoseconds)*/ 76 | State VDevState /* vdev state */ 77 | Aux VDevAux /* see vdev_aux_t */ 78 | Alloc uint64 /* space allocated */ 79 | Space uint64 /* total capacity */ 80 | DSpace uint64 /* deflated capacity */ 81 | RSize uint64 /* replaceable dev size */ 82 | ESize uint64 /* expandable dev size */ 83 | Ops [ZIOTypes]uint64 /* operation count */ 84 | Bytes [ZIOTypes]uint64 /* bytes read/written */ 85 | ReadErrors uint64 /* read errors */ 86 | WriteErrors uint64 /* write errors */ 87 | ChecksumErrors uint64 /* checksum errors */ 88 | SelfHealed uint64 /* self-healed bytes */ 89 | ScanRemoving uint64 /* removing? */ 90 | ScanProcessed uint64 /* scan processed bytes */ 91 | Fragmentation uint64 /* device fragmentation */ 92 | } 93 | 94 | // PoolScanStat - Pool scan statistics 95 | type PoolScanStat struct { 96 | // Values stored on disk 97 | Func uint64 // Current scan function e.g. none, scrub ... 98 | State uint64 // Current scan state e.g. scanning, finished ... 99 | StartTime uint64 // Scan start time 100 | EndTime uint64 // Scan end time 101 | ToExamine uint64 // Total bytes to scan 102 | Examined uint64 // Total bytes scaned 103 | ToProcess uint64 // Total bytes to processed 104 | Processed uint64 // Total bytes processed 105 | Errors uint64 // Scan errors 106 | // Values not stored on disk 107 | PassExam uint64 // Examined bytes per scan pass 108 | PassStart uint64 // Start time of scan pass 109 | } 110 | 111 | // VDevTree ZFS virtual device tree 112 | type VDevTree struct { 113 | Type VDevType 114 | Devices []VDevTree // groups other devices (e.g. mirror) 115 | Spares []VDevTree 116 | L2Cache []VDevTree 117 | Logs *VDevTree 118 | GUID uint64 119 | Parity uint 120 | Path string 121 | Name string 122 | Stat VDevStat 123 | ScanStat PoolScanStat 124 | } 125 | 126 | // ExportedPool is type representing ZFS pool available for import 127 | type ExportedPool struct { 128 | VDevs VDevTree 129 | Name string 130 | Comment string 131 | GUID uint64 132 | State PoolState 133 | Status PoolStatus 134 | } 135 | 136 | // Pool object represents handler to single ZFS pool 137 | // 138 | /* Pool.Properties map[string]Property 139 | */ 140 | // Map of all ZFS pool properties, changing any of this will not affect ZFS 141 | // pool, for that use SetProperty( name, value string) method of the pool 142 | // object. This map is initial loaded when ever you open or create pool to 143 | // give easy access to listing all available properties. It can be refreshed 144 | // with up to date values with call to (*Pool) ReloadProperties 145 | type Pool struct { 146 | list C.zpool_list_ptr 147 | Properties []Property 148 | Features map[string]string 149 | } 150 | 151 | // PoolOpen open ZFS pool handler by name. 152 | // Returns Pool object, requires Pool.Close() to be called explicitly 153 | // for memory cleanup after object is not needed anymore. 154 | func PoolOpen(name string) (pool Pool, err error) { 155 | csName := C.CString(name) 156 | defer C.free(unsafe.Pointer(csName)) 157 | pool.list = C.zpool_list_open(csName) 158 | 159 | if pool.list != nil { 160 | err = pool.ReloadProperties() 161 | return 162 | } 163 | err = LastError() 164 | return 165 | } 166 | 167 | func poolGetConfig(name string, nv C.nvlist_ptr) (vdevs VDevTree, err error) { 168 | var dtype C.char_ptr 169 | var vs C.vdev_stat_ptr 170 | var ps C.pool_scan_stat_ptr 171 | var children C.vdev_children_ptr 172 | if dtype = C.get_vdev_type(nv); dtype == nil { 173 | err = fmt.Errorf("Failed to fetch %s", C.ZPOOL_CONFIG_TYPE) 174 | return 175 | } 176 | vdevs.Name = name 177 | vdevs.Type = VDevType(C.GoString(dtype)) 178 | if vdevs.Type == VDevTypeMissing || vdevs.Type == VDevTypeHole { 179 | return 180 | } 181 | 182 | vdevs.GUID = uint64(C.get_vdev_guid(nv)) 183 | 184 | // Fetch vdev state 185 | if vs = C.get_vdev_stats(nv); vs == nil { 186 | err = fmt.Errorf("Failed to fetch %s", C.ZPOOL_CONFIG_VDEV_STATS) 187 | return 188 | } 189 | vdevs.Stat.Timestamp = time.Duration(vs.vs_timestamp) 190 | vdevs.Stat.State = VDevState(vs.vs_state) 191 | vdevs.Stat.Aux = VDevAux(vs.vs_aux) 192 | vdevs.Stat.Alloc = uint64(vs.vs_alloc) 193 | vdevs.Stat.Space = uint64(vs.vs_space) 194 | vdevs.Stat.DSpace = uint64(vs.vs_dspace) 195 | vdevs.Stat.RSize = uint64(vs.vs_rsize) 196 | vdevs.Stat.ESize = uint64(vs.vs_esize) 197 | for z := 0; z < ZIOTypes; z++ { 198 | vdevs.Stat.Ops[z] = uint64(vs.vs_ops[z]) 199 | vdevs.Stat.Bytes[z] = uint64(vs.vs_bytes[z]) 200 | } 201 | vdevs.Stat.ReadErrors = uint64(vs.vs_read_errors) 202 | vdevs.Stat.WriteErrors = uint64(vs.vs_write_errors) 203 | vdevs.Stat.ChecksumErrors = uint64(vs.vs_checksum_errors) 204 | vdevs.Stat.SelfHealed = uint64(vs.vs_self_healed) 205 | vdevs.Stat.ScanRemoving = uint64(vs.vs_scan_removing) 206 | vdevs.Stat.ScanProcessed = uint64(vs.vs_scan_processed) 207 | vdevs.Stat.Fragmentation = uint64(vs.vs_fragmentation) 208 | 209 | // Fetch vdev scan stats 210 | if ps = C.get_vdev_scan_stats(nv); ps != nil { 211 | vdevs.ScanStat.Func = uint64(ps.pss_func) 212 | vdevs.ScanStat.State = uint64(ps.pss_state) 213 | vdevs.ScanStat.StartTime = uint64(ps.pss_start_time) 214 | vdevs.ScanStat.EndTime = uint64(ps.pss_end_time) 215 | vdevs.ScanStat.ToExamine = uint64(ps.pss_to_examine) 216 | vdevs.ScanStat.Examined = uint64(ps.pss_examined) 217 | vdevs.ScanStat.ToProcess = uint64(ps.pss_to_process) 218 | vdevs.ScanStat.Processed = uint64(ps.pss_processed) 219 | vdevs.ScanStat.Errors = uint64(ps.pss_errors) 220 | vdevs.ScanStat.PassExam = uint64(ps.pss_pass_exam) 221 | vdevs.ScanStat.PassStart = uint64(ps.pss_pass_start) 222 | } 223 | 224 | // Fetch the children 225 | children = C.get_vdev_children(nv) 226 | if children != nil { 227 | // this object that reference childrens and count should be deallocated from memory 228 | defer C.free(unsafe.Pointer(children)) 229 | vdevs.Devices = make([]VDevTree, 0, children.count) 230 | } 231 | path := C.get_vdev_path(nv) 232 | if path != nil { 233 | vdevs.Path = C.GoString(path) 234 | } 235 | for c := C.uint_t(0); children != nil && c < children.count; c++ { 236 | var islog = C.uint64_t(C.B_FALSE) 237 | 238 | islog = C.get_vdev_is_log(C.nvlist_array_at(children.first, c)) 239 | 240 | vname := C.zpool_vdev_name(C.libzfsHandle, nil, C.nvlist_array_at(children.first, c), 241 | C.B_TRUE) 242 | var vdev VDevTree 243 | vdev, err = poolGetConfig(C.GoString(vname), 244 | C.nvlist_array_at(children.first, c)) 245 | C.free(unsafe.Pointer(vname)) 246 | if err != nil { 247 | return 248 | } 249 | if islog != C.B_FALSE { 250 | vdevs.Logs = &vdev 251 | } else { 252 | vdevs.Devices = append(vdevs.Devices, vdev) 253 | } 254 | } 255 | return 256 | } 257 | 258 | func poolGetSpares(name string, nv C.nvlist_ptr) (vdevs []VDevTree, err error) { 259 | // Fetch the spares 260 | var spares C.vdev_children_ptr 261 | spares = C.get_vdev_spares(nv) 262 | if spares != nil { 263 | // this object that reference spares and count should be deallocated from memory 264 | defer C.free(unsafe.Pointer(spares)) 265 | vdevs = make([]VDevTree, 0, spares.count) 266 | } 267 | for c := C.uint_t(0); spares != nil && c < spares.count; c++ { 268 | vname := C.zpool_vdev_name(C.libzfsHandle, nil, C.nvlist_array_at(spares.first, c), 269 | C.B_TRUE) 270 | var vdev VDevTree 271 | vdev, err = poolGetConfig(C.GoString(vname), 272 | C.nvlist_array_at(spares.first, c)) 273 | C.free(unsafe.Pointer(vname)) 274 | if err != nil { 275 | return 276 | } 277 | vdevs = append(vdevs, vdev) 278 | } 279 | return 280 | } 281 | 282 | func poolGetL2Cache(name string, nv C.nvlist_ptr) (vdevs []VDevTree, err error) { 283 | // Fetch the spares 284 | var l2cache C.vdev_children_ptr 285 | l2cache = C.get_vdev_l2cache(nv) 286 | if l2cache != nil { 287 | // this object that reference l2cache and count should be deallocated from memory 288 | defer C.free(unsafe.Pointer(l2cache)) 289 | vdevs = make([]VDevTree, 0, l2cache.count) 290 | } 291 | for c := C.uint_t(0); l2cache != nil && c < l2cache.count; c++ { 292 | vname := C.zpool_vdev_name(C.libzfsHandle, nil, C.nvlist_array_at(l2cache.first, c), 293 | C.B_TRUE) 294 | var vdev VDevTree 295 | vdev, err = poolGetConfig(C.GoString(vname), 296 | C.nvlist_array_at(l2cache.first, c)) 297 | C.free(unsafe.Pointer(vname)) 298 | if err != nil { 299 | return 300 | } 301 | vdevs = append(vdevs, vdev) 302 | } 303 | return 304 | } 305 | 306 | // PoolImportSearch - Search pools available to import but not imported. 307 | // Returns array of found pools. 308 | func PoolImportSearch(searchpaths []string) (epools []ExportedPool, err error) { 309 | var config, nvroot C.nvlist_ptr 310 | var cname, msgid, comment C.char_ptr 311 | var reason C.zpool_status_t 312 | var errata C.zpool_errata_t 313 | config = nil 314 | var elem C.nvpair_ptr 315 | numofp := len(searchpaths) 316 | cpaths := C.alloc_cstrings(C.int(numofp)) 317 | defer C.free(unsafe.Pointer(cpaths)) 318 | for i, path := range searchpaths { 319 | csPath := C.CString(path) 320 | defer C.free(unsafe.Pointer(csPath)) 321 | C.strings_setat(cpaths, C.int(i), csPath) 322 | } 323 | 324 | pools := C.go_zpool_search_import(C.libzfsHandle, C.int(numofp), cpaths, C.B_FALSE) 325 | defer C.nvlist_free(pools) 326 | elem = C.nvlist_next_nvpair(pools, elem) 327 | epools = make([]ExportedPool, 0, 1) 328 | for ; elem != nil; elem = C.nvlist_next_nvpair(pools, elem) { 329 | ep := ExportedPool{} 330 | if C.nvpair_value_nvlist(elem, (**C.struct_nvlist)(&config)) != 0 { 331 | err = LastError() 332 | return 333 | } 334 | 335 | ep.State = PoolState(C.get_zpool_state(config)) 336 | if ep.State == PoolStateDestroyed { 337 | continue // skip destroyed pools 338 | } 339 | 340 | if cname = C.get_zpool_name(config); cname == nil { 341 | err = fmt.Errorf("Failed to fetch %s", C.ZPOOL_CONFIG_POOL_NAME) 342 | return 343 | } 344 | ep.Name = C.GoString(cname) 345 | 346 | ep.GUID = uint64(C.get_zpool_guid(config)) 347 | 348 | reason = C.zpool_import_status(config, (**C.char)(&msgid), &errata) 349 | ep.Status = PoolStatus(reason) 350 | 351 | if comment = C.get_zpool_comment(config); comment != nil { 352 | ep.Comment = C.GoString(comment) 353 | } 354 | 355 | if nvroot = C.get_zpool_vdev_tree(config); nvroot == nil { 356 | err = fmt.Errorf("Failed to fetch %s", C.ZPOOL_CONFIG_VDEV_TREE) 357 | return 358 | } 359 | ep.VDevs, err = poolGetConfig(ep.Name, nvroot) 360 | epools = append(epools, ep) 361 | } 362 | return 363 | } 364 | 365 | func poolSearchImport(q string, searchpaths []string, guid bool) (name string, 366 | err error) { 367 | var config C.nvlist_ptr 368 | var cname C.char_ptr 369 | config = nil 370 | errPoolList := errors.New("Failed to list pools") 371 | var elem *C.nvpair_t 372 | numofp := len(searchpaths) 373 | cpaths := C.alloc_cstrings(C.int(numofp)) 374 | defer C.free(unsafe.Pointer(cpaths)) 375 | for i, path := range searchpaths { 376 | csPath := C.CString(path) 377 | defer C.free(unsafe.Pointer(csPath)) 378 | C.strings_setat(cpaths, C.int(i), csPath) 379 | } 380 | 381 | pools := C.go_zpool_search_import(C.libzfsHandle, C.int(numofp), cpaths, C.B_FALSE) 382 | defer C.nvlist_free(pools) 383 | 384 | elem = C.nvlist_next_nvpair(pools, elem) 385 | for ; elem != nil; elem = C.nvlist_next_nvpair(pools, elem) { 386 | var cq *C.char 387 | var tconfig *C.nvlist_t 388 | retcode := C.nvpair_value_nvlist(elem, (**C.struct_nvlist)(&tconfig)) 389 | if retcode != 0 { 390 | err = errPoolList 391 | return 392 | } 393 | if PoolState(C.get_zpool_state(tconfig)) == PoolStateDestroyed { 394 | continue // skip destroyed pools 395 | } 396 | if guid { 397 | sguid := fmt.Sprint(C.get_zpool_guid(tconfig)) 398 | if q == sguid { 399 | config = tconfig 400 | break 401 | } 402 | } else { 403 | if cq = C.get_zpool_name(tconfig); cq == nil { 404 | err = errPoolList 405 | return 406 | } 407 | cname = cq 408 | name = C.GoString(cq) 409 | if q == name { 410 | config = tconfig 411 | break 412 | } 413 | } 414 | } 415 | if config == nil { 416 | err = fmt.Errorf("No pool found %s", q) 417 | return 418 | } 419 | if guid { 420 | // We need to get name so we can open pool by name 421 | if cname = C.get_zpool_name(config); cname == nil { 422 | err = errPoolList 423 | return 424 | } 425 | name = C.GoString(cname) 426 | } 427 | if retcode := C.zpool_import_props(C.libzfsHandle, config, cname, 428 | nil, C.ZFS_IMPORT_NORMAL|C.ZFS_IMPORT_ANY_HOST); retcode != 0 { 429 | err = fmt.Errorf("Import pool properties failed: %s", LastError().Error()) 430 | return 431 | } 432 | return 433 | } 434 | 435 | // PoolImport given a list of directories to search, find and import pool with matching 436 | // name stored on disk. 437 | func PoolImport(name string, searchpaths []string) (pool Pool, err error) { 438 | _, err = poolSearchImport(name, searchpaths, false) 439 | if err != nil { 440 | return 441 | } 442 | pool, err = PoolOpen(name) 443 | return 444 | } 445 | 446 | // PoolImportByGUID given a list of directories to search, find and import pool 447 | // with matching GUID stored on disk. 448 | func PoolImportByGUID(guid string, searchpaths []string) (pool Pool, err error) { 449 | var name string 450 | name, err = poolSearchImport(guid, searchpaths, true) 451 | if err != nil { 452 | return 453 | } 454 | pool, err = PoolOpen(name) 455 | return 456 | } 457 | 458 | // func PoolList(paths []string, cache string) (pools []Pool, err error) { 459 | // 460 | // } 461 | 462 | // PoolOpenAll open all active ZFS pools on current system. 463 | // Returns array of Pool handlers, each have to be closed after not needed 464 | // anymore. Call Pool.Close() method. 465 | func PoolOpenAll() (pools []Pool, err error) { 466 | var pool Pool 467 | if pool.list = C.zpool_list_openall(); pool.list == nil { 468 | err = LastError() 469 | return 470 | } 471 | for pool.list != nil { 472 | err = pool.ReloadProperties() 473 | if err != nil { 474 | return 475 | } 476 | next := C.zpool_next(pool.list) 477 | pool.list.pnext = nil 478 | pools = append(pools, pool) 479 | pool.list = next 480 | } 481 | return 482 | } 483 | 484 | // PoolCloseAll close all pools in given slice 485 | func PoolCloseAll(pools []Pool) { 486 | for _, p := range pools { 487 | p.Close() 488 | } 489 | } 490 | 491 | // PoolPropertyToName convert property to name 492 | // ( returns built in string representation of property name). 493 | // This is optional, you can represent each property with string 494 | // name of choice. 495 | func PoolPropertyToName(p Prop) (name string) { 496 | if p == PoolNumProps { 497 | return "numofprops" 498 | } 499 | prop := C.zpool_prop_t(p) 500 | name = C.GoString(C.zpool_prop_to_name(prop)) 501 | return 502 | } 503 | 504 | // PoolStateToName maps POOL STATE to string. 505 | func PoolStateToName(state PoolState) (name string) { 506 | ps := C.pool_state_t(state) 507 | name = C.GoString(C.zpool_pool_state_to_name(ps)) 508 | return 509 | } 510 | 511 | // RefreshStats the pool's vdev statistics, e.g. bytes read/written. 512 | func (pool *Pool) RefreshStats() (err error) { 513 | if 0 != C.refresh_stats(pool.list) { 514 | return errors.New("error refreshing stats") 515 | } 516 | return nil 517 | } 518 | 519 | // ReloadProperties re-read ZFS pool properties and features, refresh 520 | // Pool.Properties and Pool.Features map 521 | func (pool *Pool) ReloadProperties() (err error) { 522 | propList := C.read_zpool_properties(pool.list) 523 | if propList == nil { 524 | err = LastError() 525 | return 526 | } 527 | 528 | pool.Properties = make([]Property, PoolNumProps+1) 529 | next := propList 530 | for next != nil { 531 | pool.Properties[next.property] = Property{Value: C.GoString(&(next.value[0])), Source: C.GoString(&(next.source[0]))} 532 | next = C.next_property(next) 533 | } 534 | C.free_properties(propList) 535 | 536 | // read features 537 | pool.Features = map[string]string{ 538 | "async_destroy": "disabled", 539 | "empty_bpobj": "disabled", 540 | "lz4_compress": "disabled", 541 | "spacemap_histogram": "disabled", 542 | "enabled_txg": "disabled", 543 | "hole_birth": "disabled", 544 | "extensible_dataset": "disabled", 545 | "embedded_data": "disabled", 546 | "bookmarks": "disabled", 547 | "filesystem_limits": "disabled", 548 | "large_blocks": "disabled"} 549 | for name := range pool.Features { 550 | _, ferr := pool.GetFeature(name) 551 | if ferr != nil { 552 | // tolerate it 553 | } 554 | } 555 | return 556 | } 557 | 558 | // GetProperty reload and return single specified property. This also reloads requested 559 | // property in Properties map. 560 | func (pool *Pool) GetProperty(p Prop) (prop Property, err error) { 561 | if pool.list != nil { 562 | // First check if property exist at all 563 | if p < PoolPropName || p > PoolNumProps { 564 | err = errors.New(fmt.Sprint("Unknown zpool property: ", 565 | PoolPropertyToName(p))) 566 | return 567 | } 568 | list := C.read_zpool_property(pool.list, C.int(p)) 569 | if list == nil { 570 | err = LastError() 571 | return 572 | } 573 | defer C.free_properties(list) 574 | prop.Value = C.GoString(&(list.value[0])) 575 | prop.Source = C.GoString(&(list.source[0])) 576 | pool.Properties[p] = prop 577 | return 578 | } 579 | return prop, errors.New(msgPoolIsNil) 580 | } 581 | 582 | // GetFeature reload and return single specified feature. This also reloads requested 583 | // feature in Features map. 584 | func (pool *Pool) GetFeature(name string) (value string, err error) { 585 | var fvalue [512]C.char 586 | csName := C.CString(fmt.Sprint("feature@", name)) 587 | r := C.zpool_prop_get_feature(pool.list.zph, csName, &(fvalue[0]), 512) 588 | C.free(unsafe.Pointer(csName)) 589 | if r != 0 { 590 | err = errors.New(fmt.Sprint("Unknown zpool feature: ", name)) 591 | return 592 | } 593 | value = C.GoString(&(fvalue[0])) 594 | pool.Features[name] = value 595 | return 596 | } 597 | 598 | // SetProperty set ZFS pool property to value. Not all properties can be set, 599 | // some can be set only at creation time and some are read only. 600 | // Always check if returned error and its description. 601 | func (pool *Pool) SetProperty(p Prop, value string) (err error) { 602 | if pool.list != nil { 603 | // First check if property exist at all 604 | if p < PoolPropName || p > PoolNumProps { 605 | err = errors.New(fmt.Sprint("Unknown zpool property: ", 606 | PoolPropertyToName(p))) 607 | return 608 | } 609 | csPropName := C.CString(PoolPropertyToName(p)) 610 | csPropValue := C.CString(value) 611 | r := C.zpool_set_prop(pool.list.zph, csPropName, csPropValue) 612 | C.free(unsafe.Pointer(csPropName)) 613 | C.free(unsafe.Pointer(csPropValue)) 614 | if r != 0 { 615 | err = LastError() 616 | } else { 617 | // Update Properties member with change made 618 | if _, err = pool.GetProperty(p); err != nil { 619 | return 620 | } 621 | } 622 | return 623 | } 624 | return errors.New(msgPoolIsNil) 625 | } 626 | 627 | // Close ZFS pool handler and release associated memory. 628 | // Do not use Pool object after this. 629 | func (pool *Pool) Close() { 630 | if pool.list != nil { 631 | C.zpool_list_close(pool.list) 632 | pool.list = nil 633 | } 634 | } 635 | 636 | // Name get (re-read) ZFS pool name property 637 | func (pool *Pool) Name() (name string, err error) { 638 | if pool.list == nil { 639 | err = errors.New(msgPoolIsNil) 640 | } else { 641 | name = C.GoString(C.zpool_get_name(pool.list.zph)) 642 | pool.Properties[PoolPropName] = Property{Value: name, Source: "none"} 643 | } 644 | return 645 | } 646 | 647 | // State get ZFS pool state 648 | // Return the state of the pool (ACTIVE or UNAVAILABLE) 649 | func (pool *Pool) State() (state PoolState, err error) { 650 | if pool.list == nil { 651 | err = errors.New(msgPoolIsNil) 652 | } else { 653 | state = PoolState(C.zpool_read_state(pool.list.zph)) 654 | } 655 | return 656 | } 657 | 658 | func (vdev *VDevTree) isGrouping() (grouping bool, mindevs, maxdevs int) { 659 | maxdevs = int(^uint(0) >> 1) 660 | if vdev.Type == VDevTypeRaidz { 661 | grouping = true 662 | if vdev.Parity == 0 { 663 | vdev.Parity = 1 664 | } 665 | if vdev.Parity > 254 { 666 | vdev.Parity = 254 667 | } 668 | mindevs = int(vdev.Parity) + 1 669 | maxdevs = 255 670 | } else if vdev.Type == VDevTypeMirror { 671 | grouping = true 672 | mindevs = 2 673 | } else if vdev.Type == VDevTypeLog || vdev.Type == VDevTypeSpare || vdev.Type == VDevTypeL2cache { 674 | grouping = true 675 | mindevs = 1 676 | } 677 | return 678 | } 679 | 680 | func (vdev *VDevTree) isLog() (r C.uint64_t) { 681 | r = 0 682 | if vdev.Type == VDevTypeLog { 683 | r = 1 684 | } 685 | return 686 | } 687 | 688 | func toCPoolProperties(props PoolProperties) (cprops C.nvlist_ptr) { 689 | cprops = C.new_property_nvlist() 690 | for prop, value := range props { 691 | name := C.zpool_prop_to_name(C.zpool_prop_t(prop)) 692 | csPropValue := C.CString(value) 693 | r := C.property_nvlist_add(cprops, name, csPropValue) 694 | C.free(unsafe.Pointer(csPropValue)) 695 | if r != 0 { 696 | if cprops != nil { 697 | C.nvlist_free(cprops) 698 | cprops = nil 699 | } 700 | return 701 | } 702 | } 703 | return 704 | } 705 | 706 | func toCDatasetProperties(props DatasetProperties) (cprops C.nvlist_ptr) { 707 | cprops = C.new_property_nvlist() 708 | for prop, value := range props { 709 | name := C.zfs_prop_to_name(C.zfs_prop_t(prop)) 710 | csPropValue := C.CString(value) 711 | r := C.property_nvlist_add(cprops, name, csPropValue) 712 | C.free(unsafe.Pointer(csPropValue)) 713 | if r != 0 { 714 | if cprops != nil { 715 | C.nvlist_free(cprops) 716 | cprops = nil 717 | } 718 | return 719 | } 720 | } 721 | return 722 | } 723 | 724 | func buildVdev(vdev VDevTree, ashift int) (nvvdev *C.struct_nvlist, err error) { 725 | if r := C.nvlist_alloc(&nvvdev, C.NV_UNIQUE_NAME, 0); r != 0 { 726 | err = errors.New("Failed to allocate vdev") 727 | return 728 | } 729 | csType := C.CString(string(vdev.Type)) 730 | r := C.nvlist_add_string(nvvdev, C.sZPOOL_CONFIG_TYPE, 731 | csType) 732 | C.free(unsafe.Pointer(csType)) 733 | if r != 0 { 734 | err = errors.New("Failed to set vdev type") 735 | return 736 | } 737 | if r := C.nvlist_add_uint64(nvvdev, C.sZPOOL_CONFIG_IS_LOG, 738 | vdev.isLog()); r != 0 { 739 | err = errors.New("Failed to allocate vdev (is_log)") 740 | return 741 | } 742 | if r := C.nvlist_add_uint64(nvvdev, 743 | C.sZPOOL_CONFIG_WHOLE_DISK, 1); r != 0 { 744 | err = errors.New("Failed to allocate vdev nvvdev (whdisk)") 745 | return 746 | } 747 | if len(vdev.Path) > 0 { 748 | csPath := C.CString(vdev.Path) 749 | r := C.nvlist_add_string( 750 | nvvdev, C.sZPOOL_CONFIG_PATH, 751 | csPath) 752 | C.free(unsafe.Pointer(csPath)) 753 | if r != 0 { 754 | err = errors.New("Failed to allocate vdev nvvdev (type)") 755 | return 756 | } 757 | if ashift > 0 { 758 | if r := C.nvlist_add_uint64(nvvdev, 759 | C.sZPOOL_CONFIG_ASHIFT, 760 | C.uint64_t(ashift)); r != 0 { 761 | err = errors.New("Failed to allocate vdev nvvdev (ashift)") 762 | return 763 | } 764 | } 765 | } 766 | return 767 | } 768 | 769 | func buildVDevTree(root *C.nvlist_t, rtype VDevType, vdevs, spares, l2cache []VDevTree, 770 | props PoolProperties) (err error) { 771 | count := len(vdevs) 772 | if count == 0 { 773 | return 774 | } 775 | childrens := C.nvlist_alloc_array(C.int(count)) 776 | if childrens == nil { 777 | err = errors.New("No enough memory") 778 | return 779 | } 780 | defer C.nvlist_free_array(childrens) 781 | for i, vdev := range vdevs { 782 | grouping, mindevs, maxdevs := vdev.isGrouping() 783 | var child *C.struct_nvlist 784 | vcount := len(vdev.Devices) 785 | if vcount < mindevs || vcount > maxdevs { 786 | err = fmt.Errorf( 787 | "Invalid vdev specification: %s supports no less than %d or more than %d devices", 788 | vdev.Type, mindevs, maxdevs) 789 | return 790 | } 791 | if grouping { 792 | if r := C.nvlist_alloc(&child, C.NV_UNIQUE_NAME, 0); r != 0 { 793 | err = errors.New("Failed to allocate vdev") 794 | return 795 | } 796 | csType := C.CString(string(vdev.Type)) 797 | r := C.nvlist_add_string(child, C.sZPOOL_CONFIG_TYPE, 798 | csType) 799 | C.free(unsafe.Pointer(csType)) 800 | if r != 0 { 801 | err = errors.New("Failed to set vdev type") 802 | return 803 | } 804 | if vdev.Type == VDevTypeRaidz { 805 | r := C.nvlist_add_uint64(child, 806 | C.sZPOOL_CONFIG_NPARITY, 807 | C.uint64_t(mindevs-1)) 808 | if r != 0 { 809 | err = errors.New("Failed to allocate vdev (parity)") 810 | return 811 | } 812 | } 813 | if err = buildVDevTree(child, vdev.Type, vdev.Devices, nil, nil, 814 | props); err != nil { 815 | return 816 | } 817 | } else { 818 | ashift, _ := strconv.Atoi(props[PoolPropAshift]) 819 | if child, err = buildVdev(vdev, ashift); err != nil { 820 | return 821 | } 822 | } 823 | C.nvlist_array_set(childrens, C.int(i), child) 824 | } 825 | if count > 0 { 826 | if r := C.nvlist_add_nvlist_array(root, 827 | C.sZPOOL_CONFIG_CHILDREN, childrens, 828 | C.uint_t(count)); r != 0 { 829 | err = errors.New("Failed to allocate vdev children") 830 | return 831 | } 832 | } 833 | if len(spares) > 0 { 834 | ashift, _ := strconv.Atoi(props[PoolPropAshift]) 835 | if err = buildVdevSpares(root, VDevTypeRoot, spares, ashift); err != nil { 836 | return 837 | } 838 | } 839 | if len(l2cache) > 0 { 840 | ashift, _ := strconv.Atoi(props[PoolPropAshift]) 841 | if err = buildVdevL2Cache(root, VDevTypeRoot, l2cache, ashift); err != nil { 842 | return 843 | } 844 | } 845 | return 846 | } 847 | 848 | func buildVdevSpares(root *C.nvlist_t, rtype VDevType, vdevs []VDevTree, ashift int) (err error) { 849 | count := len(vdevs) 850 | if count == 0 { 851 | return 852 | } 853 | spares := C.nvlist_alloc_array(C.int(count)) 854 | if spares == nil { 855 | err = errors.New("No enough memory buildVdevSpares") 856 | return 857 | } 858 | defer C.nvlist_free_array(spares) 859 | for i, vdev := range vdevs { 860 | var child *C.struct_nvlist 861 | if child, err = buildVdev(vdev, ashift); err != nil { 862 | return 863 | } 864 | C.nvlist_array_set(spares, C.int(i), child) 865 | } 866 | if r := C.nvlist_add_nvlist_array(root, 867 | C.sZPOOL_CONFIG_SPARES, spares, C.uint_t(len(vdevs))); r != 0 { 868 | err = errors.New("Failed to allocate vdev spare") 869 | } 870 | return 871 | } 872 | 873 | func buildVdevL2Cache(root *C.nvlist_t, rtype VDevType, vdevs []VDevTree, ashift int) (err error) { 874 | count := len(vdevs) 875 | if count == 0 { 876 | return 877 | } 878 | l2cache := C.nvlist_alloc_array(C.int(count)) 879 | if l2cache == nil { 880 | err = errors.New("No enough memory buildVdevL2Cache") 881 | return 882 | } 883 | defer C.nvlist_free_array(l2cache) 884 | for i, vdev := range vdevs { 885 | var child *C.struct_nvlist 886 | if child, err = buildVdev(vdev, ashift); err != nil { 887 | return 888 | } 889 | C.nvlist_array_set(l2cache, C.int(i), child) 890 | } 891 | if r := C.nvlist_add_nvlist_array(root, 892 | C.sZPOOL_CONFIG_SPARES, l2cache, C.uint_t(len(vdevs))); r != 0 { 893 | err = errors.New("Failed to allocate vdev l2cache") 894 | } 895 | return 896 | } 897 | 898 | // PoolCreate create ZFS pool per specs, features and properties of pool and root dataset 899 | func PoolCreate(name string, vdev VDevTree, features map[string]string, 900 | props PoolProperties, fsprops DatasetProperties) (pool Pool, err error) { 901 | // create root vdev nvroot 902 | var nvroot *C.struct_nvlist 903 | if r := C.nvlist_alloc(&nvroot, C.NV_UNIQUE_NAME, 0); r != 0 { 904 | err = errors.New("Failed to allocate root vdev") 905 | return 906 | } 907 | csTypeRoot := C.CString(string(VDevTypeRoot)) 908 | r := C.nvlist_add_string(nvroot, C.sZPOOL_CONFIG_TYPE, 909 | csTypeRoot) 910 | C.free(unsafe.Pointer(csTypeRoot)) 911 | if r != 0 { 912 | err = errors.New("Failed to allocate root vdev") 913 | return 914 | } 915 | defer C.nvlist_free(nvroot) 916 | 917 | // Now we need to build specs (vdev hierarchy) 918 | if err = buildVDevTree(nvroot, VDevTypeRoot, vdev.Devices, vdev.Spares, vdev.L2Cache, props); err != nil { 919 | return 920 | } 921 | 922 | // Enable 0.6.5 features per default 923 | features["spacemap_histogram"] = FENABLED 924 | features["enabled_txg"] = FENABLED 925 | features["hole_birth"] = FENABLED 926 | features["extensible_dataset"] = FENABLED 927 | features["embedded_data"] = FENABLED 928 | features["bookmarks"] = FENABLED 929 | features["filesystem_limits"] = FENABLED 930 | features["large_blocks"] = FENABLED 931 | 932 | // Enable 0.7.x features per default 933 | features["multi_vdev_crash_dump"] = FENABLED 934 | features["large_dnode"] = FENABLED 935 | features["sha512"] = FENABLED 936 | features["skein"] = FENABLED 937 | features["edonr"] = FENABLED 938 | features["userobj_accounting"] = FENABLED 939 | 940 | // convert properties 941 | cprops := toCPoolProperties(props) 942 | if cprops != nil { 943 | defer C.nvlist_free(cprops) 944 | } else if len(props) > 0 { 945 | err = errors.New("Failed to allocate pool properties") 946 | return 947 | } 948 | cfsprops := toCDatasetProperties(fsprops) 949 | if cfsprops != nil { 950 | defer C.nvlist_free(cfsprops) 951 | } else if len(fsprops) > 0 { 952 | err = errors.New("Failed to allocate FS properties") 953 | return 954 | } 955 | for fname, fval := range features { 956 | csName := C.CString(fmt.Sprintf("feature@%s", fname)) 957 | csVal := C.CString(fval) 958 | r := C.property_nvlist_add(cprops, csName, csVal) 959 | C.free(unsafe.Pointer(csName)) 960 | C.free(unsafe.Pointer(csVal)) 961 | if r != 0 { 962 | if cprops != nil { 963 | C.nvlist_free(cprops) 964 | cprops = nil 965 | } 966 | return 967 | } 968 | } 969 | 970 | // Create actual pool then open 971 | csName := C.CString(name) 972 | defer C.free(unsafe.Pointer(csName)) 973 | if r := C.zpool_create(C.libzfsHandle, csName, nvroot, 974 | cprops, cfsprops); r != 0 { 975 | err = LastError() 976 | err = errors.New(err.Error() + " (zpool_create)") 977 | return 978 | } 979 | 980 | // Open created pool and return handle 981 | pool, err = PoolOpen(name) 982 | return 983 | } 984 | 985 | // Status get pool status. Let you check if pool healthy. 986 | func (pool *Pool) Status() (status PoolStatus, err error) { 987 | var msgid *C.char 988 | var reason C.zpool_status_t 989 | var errata C.zpool_errata_t 990 | if pool.list == nil { 991 | err = errors.New(msgPoolIsNil) 992 | return 993 | } 994 | reason = C.zpool_get_status(pool.list.zph, &msgid, &errata) 995 | status = PoolStatus(reason) 996 | return 997 | } 998 | 999 | // Destroy the pool. It is up to the caller to ensure that there are no 1000 | // datasets left in the pool. logStr is optional if specified it is 1001 | // appended to ZFS history 1002 | func (pool *Pool) Destroy(logStr string) (err error) { 1003 | if pool.list == nil { 1004 | err = errors.New(msgPoolIsNil) 1005 | return 1006 | } 1007 | csLog := C.CString(logStr) 1008 | defer C.free(unsafe.Pointer(csLog)) 1009 | retcode := C.zpool_destroy(pool.list.zph, csLog) 1010 | if retcode != 0 { 1011 | err = LastError() 1012 | } 1013 | return 1014 | } 1015 | 1016 | // Export exports the pool from the system. 1017 | // Before exporting the pool, all datasets within the pool are unmounted. 1018 | // A pool can not be exported if it has a shared spare that is currently 1019 | // being used. 1020 | func (pool *Pool) Export(force bool, log string) (err error) { 1021 | var forcet C.boolean_t 1022 | if force { 1023 | forcet = 1 1024 | } 1025 | csLog := C.CString(log) 1026 | defer C.free(unsafe.Pointer(csLog)) 1027 | if rc := C.zpool_disable_datasets(pool.list.zph, forcet); rc != 0 { 1028 | err = LastError() 1029 | return 1030 | } 1031 | if rc := C.zpool_export(pool.list.zph, forcet, csLog); rc != 0 { 1032 | err = LastError() 1033 | } 1034 | return 1035 | } 1036 | 1037 | // ExportForce hard force export of the pool from the system. 1038 | func (pool *Pool) ExportForce(log string) (err error) { 1039 | csLog := C.CString(log) 1040 | defer C.free(unsafe.Pointer(csLog)) 1041 | if rc := C.zpool_export_force(pool.list.zph, csLog); rc != 0 { 1042 | err = LastError() 1043 | } 1044 | return 1045 | } 1046 | 1047 | // VDevTree - Fetch pool's current vdev tree configuration, state and stats 1048 | func (pool *Pool) VDevTree() (vdevs VDevTree, err error) { 1049 | var nvroot *C.struct_nvlist 1050 | var poolName string 1051 | config := C.zpool_get_config(pool.list.zph, nil) 1052 | if config == nil { 1053 | err = fmt.Errorf("Failed zpool_get_config") 1054 | return 1055 | } 1056 | if C.nvlist_lookup_nvlist(config, C.sZPOOL_CONFIG_VDEV_TREE, &nvroot) != 0 { 1057 | err = fmt.Errorf("Failed to fetch %s", C.ZPOOL_CONFIG_VDEV_TREE) 1058 | return 1059 | } 1060 | if poolName, err = pool.Name(); err != nil { 1061 | return 1062 | } 1063 | if vdevs, err = poolGetConfig(poolName, nvroot); err != nil { 1064 | return 1065 | } 1066 | vdevs.Spares, err = poolGetSpares(poolName, nvroot) 1067 | vdevs.L2Cache, err = poolGetL2Cache(poolName, nvroot) 1068 | return 1069 | } 1070 | 1071 | // Initialize - initializes pool 1072 | func (pool *Pool) Initialize() (err error) { 1073 | return pool.initialize(PoolInitializeStart) 1074 | } 1075 | 1076 | // CancelInitialization - cancels ongoing initialization 1077 | func (pool *Pool) CancelInitialization() (err error) { 1078 | return pool.initialize(PoolInitializeCancel) 1079 | } 1080 | 1081 | // SuspendInitialization - suspends ongoing initialization 1082 | func (pool *Pool) SuspendInitialization() (err error) { 1083 | return pool.initialize(PoolInitializeSuspend) 1084 | } 1085 | 1086 | func (pool *Pool) initialize(action PoolInitializeAction) (err error) { 1087 | var nvroot *C.struct_nvlist 1088 | 1089 | config := C.zpool_get_config(pool.list.zph, nil) 1090 | if config == nil { 1091 | err = fmt.Errorf("Failed zpool_get_config") 1092 | return 1093 | } 1094 | if C.nvlist_lookup_nvlist(config, C.sZPOOL_CONFIG_VDEV_TREE, &nvroot) != 0 { 1095 | err = fmt.Errorf("Failed to fetch %s", C.ZPOOL_CONFIG_VDEV_TREE) 1096 | return 1097 | } 1098 | 1099 | var vds *C.nvlist_t 1100 | if r := C.nvlist_alloc(&vds, C.NV_UNIQUE_NAME, 0); r != 0 { 1101 | err = errors.New("Failed to allocate vdev") 1102 | return 1103 | } 1104 | defer C.nvlist_free(vds) 1105 | 1106 | C.collect_zpool_leaves(pool.list.zph, nvroot, vds) 1107 | 1108 | if C.zpool_initialize(pool.list.zph, C.pool_initialize_func_t(action), vds) != 0 { 1109 | err = fmt.Errorf("Initialization action %s failed", action.String()) 1110 | return 1111 | } 1112 | return 1113 | } 1114 | 1115 | func (s PoolState) String() string { 1116 | switch s { 1117 | case PoolStateActive: 1118 | return "ACTIVE" 1119 | case PoolStateExported: 1120 | return "EXPORTED" 1121 | case PoolStateDestroyed: 1122 | return "DESTROYED" 1123 | case PoolStateSpare: 1124 | return "SPARE" 1125 | case PoolStateL2cache: 1126 | return "L2CACHE" 1127 | case PoolStateUninitialized: 1128 | return "UNINITIALIZED" 1129 | case PoolStateUnavail: 1130 | return "UNAVAILABLE" 1131 | case PoolStatePotentiallyActive: 1132 | return "POTENTIALLYACTIVE" 1133 | default: 1134 | return "UNKNOWN" 1135 | } 1136 | } 1137 | 1138 | func (s VDevState) String() string { 1139 | switch s { 1140 | case VDevStateUnknown: 1141 | return "UNINITIALIZED" 1142 | case VDevStateClosed: 1143 | return "CLOSED" 1144 | case VDevStateOffline: 1145 | return "OFFLINE" 1146 | case VDevStateRemoved: 1147 | return "REMOVED" 1148 | case VDevStateCantOpen: 1149 | return "CANT_OPEN" 1150 | case VDevStateFaulted: 1151 | return "FAULTED" 1152 | case VDevStateDegraded: 1153 | return "DEGRADED" 1154 | case VDevStateHealthy: 1155 | return "ONLINE" 1156 | default: 1157 | return "UNKNOWN" 1158 | } 1159 | } 1160 | 1161 | func (s PoolStatus) String() string { 1162 | switch s { 1163 | case PoolStatusCorruptCache: 1164 | return "CORRUPT_CACHE" 1165 | case PoolStatusMissingDevR: 1166 | return "MISSING_DEV_R" /* missing device with replicas */ 1167 | case PoolStatusMissingDevNr: /* missing device with no replicas */ 1168 | return "MISSING_DEV_NR" 1169 | case PoolStatusCorruptLabelR: /* bad device label with replicas */ 1170 | return "CORRUPT_LABEL_R" 1171 | case PoolStatusCorruptLabelNr: /* bad device label with no replicas */ 1172 | return "CORRUPT_LABEL_NR" 1173 | case PoolStatusBadGUIDSum: /* sum of device guids didn't match */ 1174 | return "BAD_GUID_SUM" 1175 | case PoolStatusCorruptPool: /* pool metadata is corrupted */ 1176 | return "CORRUPT_POOL" 1177 | case PoolStatusCorruptData: /* data errors in user (meta)data */ 1178 | return "CORRUPT_DATA" 1179 | case PoolStatusFailingDev: /* device experiencing errors */ 1180 | return "FAILLING_DEV" 1181 | case PoolStatusVersionNewer: /* newer on-disk version */ 1182 | return "VERSION_NEWER" 1183 | case PoolStatusHostidMismatch: /* last accessed by another system */ 1184 | return "HOSTID_MISMATCH" 1185 | case PoolStatusHosidActive: /* currently active on another system */ 1186 | return "HOSTID_ACTIVE" 1187 | case PoolStatusHostidRequired: /* multihost=on and hostid=0 */ 1188 | return "HOSTID_REQUIRED" 1189 | case PoolStatusIoFailureWait: /* failed I/O, failmode 'wait' */ 1190 | return "FAILURE_WAIT" 1191 | case PoolStatusIoFailureContinue: /* failed I/O, failmode 'continue' */ 1192 | return "FAILURE_CONTINUE" 1193 | case PoolStatusIOFailureMap: /* ailed MMP, failmode not 'panic' */ 1194 | return "HOSTID_FAILURE_MAP" 1195 | case PoolStatusBadLog: /* cannot read log chain(s) */ 1196 | return "BAD_LOG" 1197 | case PoolStatusErrata: /* informational errata available */ 1198 | return "ERRATA" 1199 | 1200 | /* 1201 | * If the pool has unsupported features but can still be opened in 1202 | * read-only mode, its status is ZPOOL_STATUS_UNSUP_FEAT_WRITE. If the 1203 | * pool has unsupported features but cannot be opened at all, its 1204 | * status is ZPOOL_STATUS_UNSUP_FEAT_READ. 1205 | */ 1206 | case PoolStatusUnsupFeatRead: /* unsupported features for read */ 1207 | return "UNSUP_FEAT_READ" 1208 | case PoolStatusUnsupFeatWrite: /* unsupported features for write */ 1209 | return "UNSUP_FEAT_WRITE" 1210 | 1211 | /* 1212 | * These faults have no corresponding message ID. At the time we are 1213 | * checking the status, the original reason for the FMA fault (I/O or 1214 | * checksum errors) has been lost. 1215 | */ 1216 | case PoolStatusFaultedDevR: /* faulted device with replicas */ 1217 | return "FAULTED_DEV_R" 1218 | case PoolStatusFaultedDevNr: /* faulted device with no replicas */ 1219 | return "FAULTED_DEV_NR" 1220 | 1221 | /* 1222 | * The following are not faults per se, but still an error possibly 1223 | * requiring administrative attention. There is no corresponding 1224 | * message ID. 1225 | */ 1226 | case PoolStatusVersionOlder: /* older legacy on-disk version */ 1227 | return "VERSION_OLDER" 1228 | case PoolStatusFeatDisabled: /* supported features are disabled */ 1229 | return "FEAT_DISABLED" 1230 | case PoolStatusResilvering: /* device being resilvered */ 1231 | return "RESILVERIN" 1232 | case PoolStatusOfflineDev: /* device online */ 1233 | return "OFFLINE_DEV" 1234 | case PoolStatusRemovedDev: /* removed device */ 1235 | return "REMOVED_DEV" 1236 | 1237 | /* 1238 | * Finally, the following indicates a healthy pool. 1239 | */ 1240 | case PoolStatusOk: 1241 | return "OK" 1242 | default: 1243 | return "OK" 1244 | } 1245 | } 1246 | 1247 | func (s PoolInitializeAction) String() string { 1248 | switch s { 1249 | case PoolInitializeStart: 1250 | return "START" 1251 | case PoolInitializeCancel: 1252 | return "CANCEL" 1253 | case PoolInitializeSuspend: 1254 | return "SUSPEND" 1255 | default: 1256 | return "UNKNOWN" 1257 | } 1258 | } 1259 | -------------------------------------------------------------------------------- /zpool.h: -------------------------------------------------------------------------------- 1 | /* C wrappers around some zfs calls and C in general that should simplify 2 | * using libzfs from go language, make go code shorter and more readable. 3 | */ 4 | 5 | #ifndef SERVERWARE_ZPOOL_H 6 | #define SERVERWARE_ZPOOL_H 7 | 8 | /* Rewind request information */ 9 | #define ZPOOL_NO_REWIND 1 /* No policy - default behavior */ 10 | #define ZPOOL_NEVER_REWIND 2 /* Do not search for best txg or rewind */ 11 | #define ZPOOL_TRY_REWIND 4 /* Search for best txg, but do not rewind */ 12 | #define ZPOOL_DO_REWIND 8 /* Rewind to best txg w/in deferred frees */ 13 | #define ZPOOL_EXTREME_REWIND 16 /* Allow extreme measures to find best txg */ 14 | #define ZPOOL_REWIND_MASK 28 /* All the possible rewind bits */ 15 | #define ZPOOL_REWIND_POLICIES 31 /* All the possible policy bits */ 16 | 17 | struct zpool_list { 18 | zpool_handle_t *zph; 19 | void *pnext; 20 | }; 21 | 22 | struct vdev_children { 23 | nvlist_t **first; 24 | uint_t count; 25 | }; 26 | 27 | typedef struct zpool_list zpool_list_t; 28 | typedef struct zpool_list* zpool_list_ptr; 29 | typedef struct vdev_children vdev_children_t; 30 | typedef struct vdev_children* vdev_children_ptr; 31 | 32 | typedef struct pool_scan_stat* pool_scan_stat_ptr; 33 | 34 | zpool_list_t *create_zpool_list_item(); 35 | void zprop_source_tostr(char *dst, zprop_source_t source); 36 | 37 | zpool_list_t* zpool_list_open(const char *name); 38 | zpool_list_ptr zpool_list_openall(); 39 | zpool_list_t *zpool_next(zpool_list_t *pool); 40 | 41 | void zpool_list_free(zpool_list_t *list); 42 | void zpool_list_close(zpool_list_t *pool); 43 | 44 | property_list_ptr read_zpool_property(zpool_list_ptr pool, int prop); 45 | property_list_t *read_zpool_properties(zpool_list_ptr pool); 46 | property_list_t *next_property(property_list_t *list); 47 | 48 | pool_state_t zpool_read_state(zpool_handle_t *zh); 49 | 50 | 51 | const char *lasterr(void); 52 | 53 | // int 54 | // add_prop_list(const char *propname, char *propval, nvlist_t **props, 55 | // boolean_t poolprop); 56 | 57 | nvlist_t** nvlist_alloc_array(int count); 58 | void nvlist_array_set(nvlist_t** a, int i, nvlist_t *item); 59 | void nvlist_free_array(nvlist_t **a); 60 | nvlist_t *nvlist_array_at(nvlist_t **a, uint_t i); 61 | 62 | int refresh_stats(zpool_list_t *pool); 63 | 64 | const char *get_vdev_type(nvlist_ptr nv); 65 | uint64_t get_vdev_guid(nvlist_ptr nv); 66 | const vdev_stat_ptr get_vdev_stats(nvlist_ptr nv); 67 | pool_scan_stat_ptr get_vdev_scan_stats(nvlist_t *nv); 68 | vdev_children_ptr get_vdev_children(nvlist_t *nv); 69 | vdev_children_ptr get_vdev_spares(nvlist_t *nv); 70 | vdev_children_ptr get_vdev_l2cache(nvlist_t *nv); 71 | const char *get_vdev_path(nvlist_ptr nv); 72 | uint64_t get_vdev_is_log(nvlist_ptr nv); 73 | 74 | uint64_t get_zpool_state(nvlist_ptr nv); 75 | uint64_t get_zpool_guid(nvlist_ptr nv); 76 | const char *get_zpool_name(nvlist_ptr nv); 77 | const char *get_zpool_comment(nvlist_ptr nv); 78 | 79 | nvlist_ptr get_zpool_vdev_tree(nvlist_ptr nv); 80 | 81 | nvlist_ptr go_zpool_search_import(libzfs_handle_ptr zfsh, int paths, char **path, boolean_t do_scan); 82 | 83 | uint64_t set_zpool_vdev_online(zpool_list_t *pool, const char *path, int flags); 84 | int set_zpool_vdev_offline(zpool_list_t *pool, const char *path, boolean_t istmp, boolean_t force); 85 | int do_zpool_clear(zpool_list_t *pool, const char *device, u_int32_t rewind_policy); 86 | void collect_zpool_leaves(zpool_handle_t *zhp, nvlist_t *nvroot, nvlist_t *nv); 87 | 88 | 89 | extern char *sZPOOL_CONFIG_VERSION; 90 | extern char *sZPOOL_CONFIG_POOL_NAME; 91 | extern char *sZPOOL_CONFIG_POOL_STATE; 92 | extern char *sZPOOL_CONFIG_POOL_TXG; 93 | extern char *sZPOOL_CONFIG_POOL_GUID; 94 | extern char *sZPOOL_CONFIG_CREATE_TXG; 95 | extern char *sZPOOL_CONFIG_TOP_GUID; 96 | extern char *sZPOOL_CONFIG_VDEV_TREE; 97 | extern char *sZPOOL_CONFIG_TYPE; 98 | extern char *sZPOOL_CONFIG_CHILDREN; 99 | extern char *sZPOOL_CONFIG_ID; 100 | extern char *sZPOOL_CONFIG_GUID; 101 | extern char *sZPOOL_CONFIG_PATH; 102 | extern char *sZPOOL_CONFIG_DEVID; 103 | extern char *sZPOOL_CONFIG_METASLAB_ARRAY; 104 | extern char *sZPOOL_CONFIG_METASLAB_SHIFT; 105 | extern char *sZPOOL_CONFIG_ASHIFT; 106 | extern char *sZPOOL_CONFIG_ASIZE; 107 | extern char *sZPOOL_CONFIG_DTL; 108 | extern char *sZPOOL_CONFIG_SCAN_STATS; 109 | extern char *sZPOOL_CONFIG_VDEV_STATS; 110 | extern char *sZPOOL_CONFIG_WHOLE_DISK; 111 | extern char *sZPOOL_CONFIG_ERRCOUNT; 112 | extern char *sZPOOL_CONFIG_NOT_PRESENT; 113 | extern char *sZPOOL_CONFIG_SPARES; 114 | extern char *sZPOOL_CONFIG_IS_SPARE; 115 | extern char *sZPOOL_CONFIG_NPARITY; 116 | extern char *sZPOOL_CONFIG_HOSTID; 117 | extern char *sZPOOL_CONFIG_HOSTNAME; 118 | extern char *sZPOOL_CONFIG_LOADED_TIME; 119 | extern char *sZPOOL_CONFIG_UNSPARE; 120 | extern char *sZPOOL_CONFIG_PHYS_PATH; 121 | extern char *sZPOOL_CONFIG_IS_LOG; 122 | extern char *sZPOOL_CONFIG_L2CACHE; 123 | extern char *sZPOOL_CONFIG_HOLE_ARRAY; 124 | extern char *sZPOOL_CONFIG_VDEV_CHILDREN; 125 | extern char *sZPOOL_CONFIG_IS_HOLE; 126 | extern char *sZPOOL_CONFIG_DDT_HISTOGRAM; 127 | extern char *sZPOOL_CONFIG_DDT_OBJ_STATS; 128 | extern char *sZPOOL_CONFIG_DDT_STATS; 129 | extern char *sZPOOL_CONFIG_SPLIT; 130 | extern char *sZPOOL_CONFIG_ORIG_GUID; 131 | extern char *sZPOOL_CONFIG_SPLIT_GUID; 132 | extern char *sZPOOL_CONFIG_SPLIT_LIST; 133 | extern char *sZPOOL_CONFIG_REMOVING; 134 | extern char *sZPOOL_CONFIG_RESILVER_TXG; 135 | extern char *sZPOOL_CONFIG_COMMENT; 136 | extern char *sZPOOL_CONFIG_SUSPENDED; 137 | extern char *sZPOOL_CONFIG_TIMESTAMP; 138 | extern char *sZPOOL_CONFIG_BOOTFS; 139 | extern char *sZPOOL_CONFIG_MISSING_DEVICES; 140 | extern char *sZPOOL_CONFIG_LOAD_INFO; 141 | extern char *sZPOOL_CONFIG_REWIND_INFO; 142 | extern char *sZPOOL_CONFIG_UNSUP_FEAT; 143 | extern char *sZPOOL_CONFIG_ENABLED_FEAT; 144 | extern char *sZPOOL_CONFIG_CAN_RDONLY; 145 | extern char *sZPOOL_CONFIG_FEATURES_FOR_READ; 146 | extern char *sZPOOL_CONFIG_FEATURE_STATS; 147 | extern char *sZPOOL_CONFIG_ERRATA; 148 | extern char *sZPOOL_CONFIG_OFFLINE; 149 | extern char *sZPOOL_CONFIG_FAULTED; 150 | extern char *sZPOOL_CONFIG_DEGRADED; 151 | extern char *sZPOOL_CONFIG_REMOVED; 152 | extern char *sZPOOL_CONFIG_FRU; 153 | extern char *sZPOOL_CONFIG_AUX_STATE; 154 | extern char *sZPOOL_LOAD_POLICY; 155 | extern char *sZPOOL_LOAD_REWIND_POLICY; 156 | extern char *sZPOOL_LOAD_REQUEST_TXG; 157 | extern char *sZPOOL_LOAD_META_THRESH; 158 | extern char *sZPOOL_LOAD_DATA_THRESH; 159 | extern char *sZPOOL_CONFIG_LOAD_TIME; 160 | extern char *sZPOOL_CONFIG_LOAD_DATA_ERRORS; 161 | extern char *sZPOOL_CONFIG_REWIND_TIME; 162 | 163 | 164 | #endif 165 | /* SERVERWARE_ZPOOL_H */ 166 | -------------------------------------------------------------------------------- /zpool_test.go: -------------------------------------------------------------------------------- 1 | package zfs_test 2 | 3 | import ( 4 | "fmt" 5 | "io/ioutil" 6 | "os" 7 | "testing" 8 | "time" 9 | 10 | zfs "github.com/bicomsystems/go-libzfs" 11 | ) 12 | 13 | /* ------------------------------------------------------------------------- */ 14 | // HELPERS: 15 | 16 | var TSTPoolName = "TESTPOOL" 17 | var TSTPoolGUID string 18 | 19 | func CreateTmpSparse(prefix string, size int64) (path string, err error) { 20 | sf, err := ioutil.TempFile("/tmp", prefix) 21 | if err != nil { 22 | return 23 | } 24 | defer sf.Close() 25 | if err = sf.Truncate(size); err != nil { 26 | return 27 | } 28 | path = sf.Name() 29 | return 30 | } 31 | 32 | var s1path, s2path, s3path string 33 | 34 | // This will create sparse files in tmp directory, 35 | // for purpose of creating test pool. 36 | func createTestpoolVdisks() (err error) { 37 | if s1path, err = CreateTmpSparse("zfs_test_", 0x140000000); err != nil { 38 | return 39 | } 40 | if s2path, err = CreateTmpSparse("zfs_test_", 0x140000000); err != nil { 41 | // try cleanup 42 | os.Remove(s1path) 43 | return 44 | } 45 | if s3path, err = CreateTmpSparse("zfs_test_", 0x140000000); err != nil { 46 | // try cleanup 47 | os.Remove(s1path) 48 | os.Remove(s2path) 49 | return 50 | } 51 | return 52 | } 53 | 54 | func removeVDisk(path string) { 55 | if err := os.Remove(path); err != nil { 56 | println("Error: ", err.Error()) 57 | } 58 | } 59 | 60 | // Cleanup sparse files used for tests 61 | func cleanupVDisks() { 62 | // try cleanup 63 | removeVDisk(s1path) 64 | removeVDisk(s2path) 65 | removeVDisk(s3path) 66 | } 67 | 68 | /* ------------------------------------------------------------------------- */ 69 | // TESTS: 70 | 71 | // Create 3 sparse file in /tmp directory each 5G size, and use them to create 72 | // mirror TESTPOOL with one spare "disk" 73 | func zpoolTestPoolCreate(t *testing.T) { 74 | println("TEST PoolCreate ... ") 75 | // first check if pool with same name already exist 76 | // we don't want conflict 77 | for { 78 | p, err := zfs.PoolOpen(TSTPoolName) 79 | if err != nil { 80 | break 81 | } 82 | p.Close() 83 | TSTPoolName += "0" 84 | } 85 | var err error 86 | 87 | if err = createTestpoolVdisks(); err != nil { 88 | t.Error(err) 89 | return 90 | } 91 | 92 | disks := [2]string{s1path, s2path} 93 | 94 | var vdev zfs.VDevTree 95 | var vdevs, mdevs, sdevs []zfs.VDevTree 96 | for _, d := range disks { 97 | mdevs = append(mdevs, 98 | zfs.VDevTree{Type: zfs.VDevTypeFile, Path: d}) 99 | } 100 | sdevs = []zfs.VDevTree{ 101 | {Type: zfs.VDevTypeFile, Path: s3path}} 102 | vdevs = []zfs.VDevTree{ 103 | zfs.VDevTree{Type: zfs.VDevTypeMirror, Devices: mdevs}, 104 | } 105 | vdev.Devices = vdevs 106 | vdev.Spares = sdevs 107 | 108 | props := make(map[zfs.Prop]string) 109 | fsprops := make(map[zfs.Prop]string) 110 | features := make(map[string]string) 111 | fsprops[zfs.DatasetPropMountpoint] = "none" 112 | features["async_destroy"] = zfs.FENABLED 113 | features["empty_bpobj"] = zfs.FENABLED 114 | features["lz4_compress"] = zfs.FENABLED 115 | 116 | pool, err := zfs.PoolCreate(TSTPoolName, vdev, features, props, fsprops) 117 | if err != nil { 118 | t.Error(err) 119 | // try cleanup 120 | os.Remove(s1path) 121 | os.Remove(s2path) 122 | os.Remove(s3path) 123 | return 124 | } 125 | defer pool.Close() 126 | 127 | pguid, _ := pool.GetProperty(zfs.PoolPropGUID) 128 | TSTPoolGUID = pguid.Value 129 | 130 | print("PASS\n\n") 131 | } 132 | 133 | // Open and list all pools and them state on the system 134 | // Then list properties of last pool in the list 135 | func zpoolTestPoolOpenAll(t *testing.T) { 136 | println("TEST PoolOpenAll() ... ") 137 | var pname string 138 | pools, err := zfs.PoolOpenAll() 139 | if err != nil { 140 | t.Error(err) 141 | return 142 | } 143 | println("\tThere is ", len(pools), " ZFS pools.") 144 | for _, p := range pools { 145 | pname, err = p.Name() 146 | if err != nil { 147 | t.Error(err) 148 | p.Close() 149 | return 150 | } 151 | pstate, err := p.State() 152 | if err != nil { 153 | t.Error(err) 154 | p.Close() 155 | return 156 | } 157 | println("\tPool: ", pname, " state: ", pstate) 158 | p.Close() 159 | } 160 | print("PASS\n\n") 161 | } 162 | 163 | func zpoolTestPoolDestroy(t *testing.T) { 164 | println("TEST POOL Destroy( ", TSTPoolName, " ) ... ") 165 | p, err := zfs.PoolOpen(TSTPoolName) 166 | if err != nil { 167 | t.Error(err) 168 | return 169 | } 170 | defer p.Close() 171 | if err = p.Destroy(TSTPoolName); err != nil { 172 | t.Error(err.Error()) 173 | return 174 | } 175 | print("PASS\n\n") 176 | } 177 | 178 | func zpoolTestFailPoolOpen(t *testing.T) { 179 | println("TEST open of non existing pool ... ") 180 | pname := "fail to open this pool" 181 | p, err := zfs.PoolOpen(pname) 182 | if err != nil { 183 | print("PASS\n\n") 184 | return 185 | } 186 | t.Error("PoolOpen pass when it should fail") 187 | p.Close() 188 | } 189 | 190 | func zpoolTestExport(t *testing.T) { 191 | println("TEST POOL Export( ", TSTPoolName, " ) ... ") 192 | p, err := zfs.PoolOpen(TSTPoolName) 193 | if err != nil { 194 | t.Error(err) 195 | return 196 | } 197 | p.Export(false, "Test exporting pool") 198 | defer p.Close() 199 | print("PASS\n\n") 200 | } 201 | 202 | func zpoolTestExportForce(t *testing.T) { 203 | println("TEST POOL ExportForce( ", TSTPoolName, " ) ... ") 204 | p, err := zfs.PoolOpen(TSTPoolName) 205 | if err != nil { 206 | t.Error(err) 207 | return 208 | } 209 | p.ExportForce("Test force exporting pool") 210 | defer p.Close() 211 | print("PASS\n\n") 212 | } 213 | 214 | func zpoolTestImport(t *testing.T) { 215 | println("TEST POOL Import( ", TSTPoolName, " ) ... ") 216 | p, err := zfs.PoolImport(TSTPoolName, []string{"/tmp"}) 217 | if err != nil { 218 | t.Error(err) 219 | return 220 | } 221 | defer p.Close() 222 | print("PASS\n\n") 223 | } 224 | 225 | func zpoolTestImportByGUID(t *testing.T) { 226 | println("TEST POOL ImportByGUID( ", TSTPoolGUID, " ) ... ") 227 | p, err := zfs.PoolImportByGUID(TSTPoolGUID, []string{"/tmp"}) 228 | if err != nil { 229 | t.Error(err) 230 | return 231 | } 232 | defer p.Close() 233 | print("PASS\n\n") 234 | } 235 | 236 | func printVDevTree(vt zfs.VDevTree, pref string) { 237 | first := pref + vt.Name 238 | fmt.Printf("%-30s | %-10s | %-10s | %s\n", first, vt.Type, 239 | vt.Stat.State.String(), vt.Path) 240 | for _, v := range vt.Devices { 241 | printVDevTree(v, " "+pref) 242 | } 243 | if len(vt.Spares) > 0 { 244 | fmt.Println("spares:") 245 | for _, v := range vt.Spares { 246 | printVDevTree(v, " "+pref) 247 | } 248 | } 249 | 250 | if len(vt.L2Cache) > 0 { 251 | fmt.Println("l2cache:") 252 | for _, v := range vt.L2Cache { 253 | printVDevTree(v, " "+pref) 254 | } 255 | } 256 | } 257 | 258 | func zpoolTestPoolImportSearch(t *testing.T) { 259 | println("TEST PoolImportSearch") 260 | pools, err := zfs.PoolImportSearch([]string{"/tmp"}) 261 | if err != nil { 262 | t.Error(err.Error()) 263 | return 264 | } 265 | for _, p := range pools { 266 | println() 267 | println("---------------------------------------------------------------") 268 | println("pool: ", p.Name) 269 | println("guid: ", p.GUID) 270 | println("state: ", p.State.String()) 271 | fmt.Printf("%-30s | %-10s | %-10s | %s\n", "NAME", "TYPE", "STATE", "PATH") 272 | println("---------------------------------------------------------------") 273 | printVDevTree(p.VDevs, "") 274 | } 275 | print("PASS\n\n") 276 | } 277 | 278 | func zpoolTestPoolProp(t *testing.T) { 279 | println("TEST PoolProp on ", TSTPoolName, " ... ") 280 | if pool, err := zfs.PoolOpen(TSTPoolName); err == nil { 281 | defer pool.Close() 282 | // Turn on snapshot listing for pool 283 | pool.SetProperty(zfs.PoolPropListsnaps, "off") 284 | // Verify change is succesfull 285 | if pool.Properties[zfs.PoolPropListsnaps].Value != "off" { 286 | t.Error(fmt.Errorf("Update of pool property failed")) 287 | return 288 | } 289 | 290 | // Test fetching property 291 | propHealth, err := pool.GetProperty(zfs.PoolPropHealth) 292 | if err != nil { 293 | t.Error(err) 294 | return 295 | } 296 | println("Pool property health: ", propHealth.Value) 297 | 298 | propGUID, err := pool.GetProperty(zfs.PoolPropGUID) 299 | if err != nil { 300 | t.Error(err) 301 | return 302 | } 303 | println("Pool property GUID: ", propGUID.Value) 304 | 305 | // this test pool should not be bootable 306 | prop, err := pool.GetProperty(zfs.PoolPropBootfs) 307 | if err != nil { 308 | t.Error(err) 309 | return 310 | } 311 | if prop.Value != "-" { 312 | t.Errorf("Failed at bootable fs property evaluation") 313 | return 314 | } 315 | 316 | // fetch all properties 317 | if err = pool.ReloadProperties(); err != nil { 318 | t.Error(err) 319 | return 320 | } 321 | } else { 322 | t.Error(err) 323 | return 324 | } 325 | print("PASS\n\n") 326 | } 327 | 328 | func zpoolTestPoolStatusAndState(t *testing.T) { 329 | println("TEST pool Status/State ( ", TSTPoolName, " ) ... ") 330 | pool, err := zfs.PoolOpen(TSTPoolName) 331 | if err != nil { 332 | t.Error(err.Error()) 333 | return 334 | } 335 | defer pool.Close() 336 | 337 | if _, err = pool.Status(); err != nil { 338 | t.Error(err.Error()) 339 | return 340 | } 341 | 342 | var pstate zfs.PoolState 343 | if pstate, err = pool.State(); err != nil { 344 | t.Error(err.Error()) 345 | return 346 | } 347 | println("POOL", TSTPoolName, "state:", zfs.PoolStateToName(pstate)) 348 | 349 | print("PASS\n\n") 350 | } 351 | 352 | func zpoolTestPoolVDevTree(t *testing.T) { 353 | var vdevs zfs.VDevTree 354 | println("TEST pool VDevTree ( ", TSTPoolName, " ) ... ") 355 | pool, err := zfs.PoolOpen(TSTPoolName) 356 | if err != nil { 357 | t.Error(err.Error()) 358 | return 359 | } 360 | defer pool.Close() 361 | vdevs, err = pool.VDevTree() 362 | if err != nil { 363 | t.Error(err.Error()) 364 | return 365 | } 366 | fmt.Printf("%-30s | %-10s | %-10s | %s\n", "NAME", "TYPE", "STATE", "PATH") 367 | println("---------------------------------------------------------------") 368 | printVDevTree(vdevs, "") 369 | print("PASS\n\n") 370 | } 371 | 372 | func zpoolTestInitialization(t *testing.T) { 373 | println("TEST POOL Initialization ( ", TSTPoolName, " ) ... ") 374 | pool, err := zfs.PoolOpen(TSTPoolName) 375 | if err != nil { 376 | t.Error(err.Error()) 377 | return 378 | } 379 | defer pool.Close() 380 | 381 | err = pool.Initialize() 382 | if err != nil { 383 | t.Error(err.Error()) 384 | return 385 | } 386 | time.Sleep(1 * time.Second) 387 | err = pool.SuspendInitialization() 388 | if err != nil { 389 | t.Error(err.Error()) 390 | return 391 | } 392 | err = pool.Initialize() 393 | if err != nil { 394 | t.Error(err.Error()) 395 | return 396 | } 397 | time.Sleep(1 * time.Second) 398 | err = pool.CancelInitialization() 399 | if err != nil { 400 | t.Error(err.Error()) 401 | return 402 | } 403 | print("PASS\n\n") 404 | } 405 | 406 | /* ------------------------------------------------------------------------- */ 407 | // EXAMPLES: 408 | 409 | func ExamplePoolProp() { 410 | if pool, err := zfs.PoolOpen("SSD"); err == nil { 411 | print("Pool size is: ", pool.Properties[zfs.PoolPropSize].Value) 412 | // Turn on snapshot listing for pool 413 | pool.SetProperty(zfs.PoolPropListsnaps, "on") 414 | println("Changed property", 415 | zfs.PoolPropertyToName(zfs.PoolPropListsnaps), "to value:", 416 | pool.Properties[zfs.PoolPropListsnaps].Value) 417 | 418 | prop, err := pool.GetProperty(zfs.PoolPropHealth) 419 | if err != nil { 420 | panic(err) 421 | } 422 | println("Update and print out pool health:", prop.Value) 423 | } else { 424 | print("Error: ", err) 425 | } 426 | } 427 | 428 | // Open and list all pools on system with them properties 429 | func ExamplePoolOpenAll() { 430 | // Lets open handles to all active pools on system 431 | pools, err := zfs.PoolOpenAll() 432 | if err != nil { 433 | println(err) 434 | } 435 | 436 | // Print each pool name and properties 437 | for _, p := range pools { 438 | // Print fancy header 439 | fmt.Printf("\n -----------------------------------------------------------\n") 440 | fmt.Printf(" POOL: %49s \n", p.Properties[zfs.PoolPropName].Value) 441 | fmt.Printf("|-----------------------------------------------------------|\n") 442 | fmt.Printf("| PROPERTY | VALUE | SOURCE |\n") 443 | fmt.Printf("|-----------------------------------------------------------|\n") 444 | 445 | // Iterate pool properties and print name, value and source 446 | for key, prop := range p.Properties { 447 | pkey := zfs.Prop(key) 448 | if pkey == zfs.PoolPropName { 449 | continue // Skip name its already printed above 450 | } 451 | fmt.Printf("|%14s | %20s | %15s |\n", 452 | zfs.PoolPropertyToName(pkey), 453 | prop.Value, prop.Source) 454 | println("") 455 | } 456 | println("") 457 | 458 | // Close pool handle and free memory, since it will not be used anymore 459 | p.Close() 460 | } 461 | } 462 | 463 | func ExamplePoolCreate() { 464 | disks := [2]string{"/dev/disk/by-id/ATA-123", "/dev/disk/by-id/ATA-456"} 465 | 466 | var vdev zfs.VDevTree 467 | var vdevs, mdevs, sdevs []zfs.VDevTree 468 | 469 | // build mirror devices specs 470 | for _, d := range disks { 471 | mdevs = append(mdevs, 472 | zfs.VDevTree{Type: zfs.VDevTypeDisk, Path: d}) 473 | } 474 | 475 | // spare device specs 476 | sdevs = []zfs.VDevTree{ 477 | {Type: zfs.VDevTypeDisk, Path: "/dev/disk/by-id/ATA-789"}} 478 | 479 | // pool specs 480 | vdevs = []zfs.VDevTree{ 481 | zfs.VDevTree{Type: zfs.VDevTypeMirror, Devices: mdevs}, 482 | } 483 | 484 | vdev.Devices = vdevs 485 | vdev.Spares = sdevs 486 | 487 | // pool properties 488 | props := make(map[zfs.Prop]string) 489 | // root dataset filesystem properties 490 | fsprops := make(map[zfs.Prop]string) 491 | // pool features 492 | features := make(map[string]string) 493 | 494 | // Turn off auto mounting by ZFS 495 | fsprops[zfs.DatasetPropMountpoint] = "none" 496 | 497 | // Enable some features 498 | features["async_destroy"] = "enabled" 499 | features["empty_bpobj"] = "enabled" 500 | features["lz4_compress"] = "enabled" 501 | 502 | // Based on specs formed above create test pool as 2 disk mirror and 503 | // one spare disk 504 | pool, err := zfs.PoolCreate("TESTPOOL", vdev, features, props, fsprops) 505 | if err != nil { 506 | println("Error: ", err.Error()) 507 | return 508 | } 509 | defer pool.Close() 510 | } 511 | 512 | func ExamplePool_Destroy() { 513 | pname := "TESTPOOL" 514 | 515 | // Need handle to pool at first place 516 | p, err := zfs.PoolOpen(pname) 517 | if err != nil { 518 | println("Error: ", err.Error()) 519 | return 520 | } 521 | 522 | // Make sure pool handle is free after we are done here 523 | defer p.Close() 524 | 525 | if err = p.Destroy("Example of pool destroy (TESTPOOL)"); err != nil { 526 | println("Error: ", err.Error()) 527 | return 528 | } 529 | } 530 | 531 | func ExamplePoolImport() { 532 | p, err := zfs.PoolImport("TESTPOOL", []string{"/dev/disk/by-id"}) 533 | if err != nil { 534 | panic(err) 535 | } 536 | p.Close() 537 | } 538 | 539 | func ExamplePool_Export() { 540 | p, err := zfs.PoolOpen("TESTPOOL") 541 | if err != nil { 542 | panic(err) 543 | } 544 | defer p.Close() 545 | if err = p.Export(false, "Example exporting pool"); err != nil { 546 | panic(err) 547 | } 548 | } 549 | 550 | func ExamplePool_ExportForce() { 551 | p, err := zfs.PoolOpen("TESTPOOL") 552 | if err != nil { 553 | panic(err) 554 | } 555 | defer p.Close() 556 | if err = p.ExportForce("Example exporting pool"); err != nil { 557 | panic(err) 558 | } 559 | } 560 | 561 | func ExamplePool_State() { 562 | p, err := zfs.PoolOpen("TESTPOOL") 563 | if err != nil { 564 | panic(err) 565 | } 566 | defer p.Close() 567 | pstate, err := p.State() 568 | if err != nil { 569 | panic(err) 570 | } 571 | println("POOL TESTPOOL state:", zfs.PoolStateToName(pstate)) 572 | } 573 | 574 | // func TestPool_VDevTree(t *testing.T) { 575 | // type fields struct { 576 | // poolName string 577 | // } 578 | // tests := []struct { 579 | // name string 580 | // fields fields 581 | // wantErr bool 582 | // }{ 583 | // // TODO: Add test cases. 584 | // { 585 | // name: "test1", 586 | // fields: fields{"TESTPOOL"}, 587 | // wantErr: false, 588 | // }, 589 | // } 590 | // for _, tt := range tests { 591 | // t.Run(tt.name, func(t *testing.T) { 592 | // pool, _ := zfs.PoolOpen(tt.fields.poolName) 593 | // defer pool.Close() 594 | // gotVdevs, err := pool.VDevTree() 595 | // if (err != nil) != tt.wantErr { 596 | // t.Errorf("Pool.VDevTree() error = %v, wantErr %v", err, tt.wantErr) 597 | // return 598 | // } 599 | // jsonData, _ := json.MarshalIndent(gotVdevs, "", "\t") 600 | // t.Logf("gotVdevs: %s", string(jsonData)) 601 | // }) 602 | // } 603 | // } 604 | -------------------------------------------------------------------------------- /zpool_vdev.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | #include "common.h" 8 | #include "zpool.h" 9 | 10 | 11 | uint64_t set_zpool_vdev_online(zpool_list_t *pool, const char *path, int flags) { 12 | vdev_state_t newstate = VDEV_STATE_UNKNOWN; 13 | zpool_vdev_online(pool->zph, path, flags, &newstate); 14 | return newstate; 15 | } 16 | 17 | int set_zpool_vdev_offline(zpool_list_t *pool, const char *path, boolean_t istmp, boolean_t force) { 18 | int ret = 0; 19 | // if (force) { 20 | // uint64_t guid = zpool_vdev_path_to_guid(pool->zph, path); 21 | // vdev_aux_t aux; 22 | // if (istmp == B_FALSE) { 23 | // /* Force the fault to persist across imports */ 24 | // aux = VDEV_AUX_EXTERNAL_PERSIST; 25 | // } else { 26 | // aux = VDEV_AUX_EXTERNAL; 27 | // } 28 | 29 | // if (guid == 0 || zpool_vdev_fault(pool->zph, guid, aux) != 0) 30 | // ret = 1; 31 | // } else { 32 | if (zpool_vdev_offline(pool->zph, path, istmp) != 0) 33 | ret = 1; 34 | // } 35 | return ret; 36 | } 37 | 38 | -------------------------------------------------------------------------------- /zpool_vdev.go: -------------------------------------------------------------------------------- 1 | package zfs 2 | 3 | // #include 4 | // #include 5 | // #include "common.h" 6 | // #include "zpool.h" 7 | // #include "zfs.h" 8 | import "C" 9 | import ( 10 | "fmt" 11 | "unsafe" 12 | ) 13 | 14 | // Online try to set dev online 15 | // expand - expand storage 16 | func (pool *Pool) Online(expand bool, devs ...string) (err error) { 17 | cflags := C.int(0) 18 | if expand { 19 | cflags = C.ZFS_ONLINE_EXPAND 20 | } 21 | for _, dev := range devs { 22 | csdev := C.CString(dev) 23 | var newstate VDevState 24 | if newstate = VDevState(C.set_zpool_vdev_online(pool.list, csdev, cflags)); newstate != VDevStateUnknown { 25 | if newstate != VDevStateHealthy { 26 | err = fmt.Errorf( 27 | "Device '%s' onlined, but remains in faulted state", 28 | dev) 29 | } 30 | } else { 31 | err = LastError() 32 | } 33 | C.free(unsafe.Pointer(csdev)) 34 | } 35 | return 36 | } 37 | 38 | // Offline Take the device/s in offline state 39 | func (pool *Pool) Offline(force bool, devs ...string) (err error) { 40 | return pool.offline(false, force, devs...) 41 | } 42 | 43 | // OfflineTemp Take the device/s in offline state temporary, 44 | // upon reboot, the specified physical device reverts to its previous state. 45 | // force - Force the device into a faulted state. 46 | func (pool *Pool) OfflineTemp(force bool, devs ...string) (err error) { 47 | return pool.offline(true, force, devs...) 48 | } 49 | 50 | // temp - Upon reboot, the specified physical device reverts to its previous state. 51 | // force - Force the device into a faulted state. 52 | func (pool *Pool) offline(temp, force bool, devs ...string) (err error) { 53 | for _, dev := range devs { 54 | csdev := C.CString(dev) 55 | var newstate VDevState 56 | if newstate = VDevState(C.set_zpool_vdev_offline(pool.list, csdev, booleanT(temp), booleanT(force))); newstate != VDevStateUnknown { 57 | if newstate != VDevStateHealthy { 58 | err = fmt.Errorf( 59 | "Device '%s' offlined, but remains in faulted state", 60 | dev) 61 | } 62 | } else { 63 | err = LastError() 64 | } 65 | C.free(unsafe.Pointer(csdev)) 66 | } 67 | return 68 | } 69 | 70 | // Clear - Clear all errors associated with a pool or a particular device. 71 | func (pool *Pool) Clear(device string) (err error) { 72 | csdev := C.CString(device) 73 | if len(device) == 0 { 74 | csdev = nil 75 | } 76 | if sc := C.do_zpool_clear(pool.list, csdev, C.ZPOOL_NO_REWIND); sc != 0 { 77 | err = fmt.Errorf("Pool clear failed") 78 | } 79 | C.free(unsafe.Pointer(csdev)) 80 | return 81 | } 82 | 83 | // Attach test 84 | // func (pool *Pool) attach(props PoolProperties, devs ...string) (err error) { 85 | // cprops := toCPoolProperties(props) 86 | // if cprops != nil { 87 | // defer C.nvlist_free(cprops) 88 | // } else { 89 | // return fmt.Errorf("Out of memory [Pool Attach properties]") 90 | // } 91 | // cdevs := C.alloc_cstrings(C.int(len(devs))) 92 | // if cdevs != nil { 93 | // defer C.free(unsafe.Pointer(cdevs)) 94 | // } else { 95 | // return fmt.Errorf("Out of memory [Pool Attach args]") 96 | // } 97 | // for i, dp := range devs { 98 | // tmp := C.CString(dp) 99 | // if tmp != nil { 100 | // defer C.free(unsafe.Pointer(tmp)) 101 | // } else { 102 | // return fmt.Errorf("Out of memory [Pool Attach dev]") 103 | // } 104 | // C.strings_setat(cdevs, C.int(i), tmp) 105 | // } 106 | // // vroot := C.make_root_vdev(pool.list.zph, cprops, 0, 0, 0, 0, len(devs), cdevs) 107 | // var nvroot *C.struct_nvlist 108 | // if r := C.nvlist_alloc(&nvroot, C.NV_UNIQUE_NAME, 0); r != 0 { 109 | // err = errors.New("Failed to allocate root vdev") 110 | // return 111 | // } 112 | // csTypeRoot := C.CString(string(VDevTypeRoot)) 113 | // r := C.nvlist_add_string(nvroot, C.sZPOOL_CONFIG_TYPE, 114 | // csTypeRoot) 115 | // C.free(unsafe.Pointer(csTypeRoot)) 116 | // if r != 0 { 117 | // err = errors.New("Failed to allocate root vdev") 118 | // return 119 | // } 120 | // defer C.nvlist_free(nvroot) 121 | 122 | // // Now we need to build specs (vdev hierarchy) 123 | // if err = buildVDevTree(nvroot, VDevTypeRoot, vdev.Devices, vdev.Spares, vdev.L2Cache, props); err != nil { 124 | // return 125 | // } 126 | 127 | // return 128 | // } 129 | 130 | // func (pool *Pool) AttachForce(devs ...string) (err error) { 131 | // return 132 | // } 133 | 134 | // func (pool *Pool) Detach(devs ...string) (err error) { 135 | // return 136 | // } 137 | 138 | // func (pool *Pool) DetachForce(devs ...string) (err error) { 139 | // return 140 | // } 141 | 142 | // func (pool *Pool) Replace(devs ...string) (err error) { 143 | // return 144 | // } 145 | --------------------------------------------------------------------------------