├── .gitignore ├── pkg ├── devices │ ├── objstore │ │ ├── const.go │ │ ├── nbd_test.go │ │ ├── stowutil.go │ │ ├── compress │ │ │ ├── mode.go │ │ │ └── stow.go │ │ ├── helpers_test.go │ │ ├── persist_test.go │ │ ├── options.go │ │ ├── device_test.go │ │ ├── encrypt │ │ │ ├── mode.go │ │ │ └── stow.go │ │ ├── segment.go │ │ ├── device.go │ │ └── storeparams.go │ ├── dedupdisk │ │ ├── impls │ │ │ ├── const.go │ │ │ ├── pebbleidstore.go │ │ │ ├── mmaplunmap.go │ │ │ ├── fileblkstore.go │ │ │ ├── lz4blkstore.go │ │ │ └── lz4.h │ │ ├── interfaces.go │ │ ├── dedupdisk_test │ │ │ └── dedupdisk_test.go │ │ └── dedupdisk.go │ ├── ramdisk │ │ ├── ramdisk_test.go │ │ └── ramdisk.go │ ├── filedisk │ │ ├── filedisk_test │ │ │ └── filedisk_test.go │ │ └── filedisk.go │ └── testutil │ │ ├── suites.go │ │ └── helpers.go ├── usbdlib │ ├── usbdlib_test │ │ └── ramdisk_test.go │ ├── dev.go │ ├── resp.go │ ├── req.go │ ├── nbdutil_linux.go │ ├── req_test.go │ ├── reqproc.go │ ├── nbdkern_linux.go │ └── nbdstrm.go └── util │ ├── consterr │ └── consterr.go │ ├── strms │ ├── devzero.go │ ├── readfirstcloseall.go │ ├── wtratwtr.go │ └── rdratrdr.go │ ├── zeros.go │ └── zeros_test.go ├── cmd └── usbdsrvd │ ├── conf │ ├── bakdev.go │ ├── capacity.go │ ├── cfg.go │ └── parseargs.go │ ├── dedup.go │ ├── osbd.go │ └── main.go ├── go.mod └── LICENSE /.gitignore: -------------------------------------------------------------------------------- 1 | cmd/usbdsrvd/usbdsrvd 2 | key.* 3 | *.key 4 | *.aes 5 | 6 | -------------------------------------------------------------------------------- /pkg/devices/objstore/const.go: -------------------------------------------------------------------------------- 1 | package objstore 2 | 3 | const ( 4 | osbdPrefix = "osbd" 5 | manifestPrefix = "-manifest_" 6 | devicePrefix = "-dev_" 7 | blockPrefix = "-blk_" 8 | ) 9 | -------------------------------------------------------------------------------- /pkg/devices/dedupdisk/impls/const.go: -------------------------------------------------------------------------------- 1 | package impls 2 | 3 | import ( 4 | "math" 5 | 6 | "github.com/tarndt/usbd/pkg/util/consterr" 7 | ) 8 | 9 | const zeroBlockID = math.MaxUint64 10 | 11 | var errNotPresent = consterr.ConstErr("ID is not present in database") 12 | -------------------------------------------------------------------------------- /pkg/devices/ramdisk/ramdisk_test.go: -------------------------------------------------------------------------------- 1 | package ramdisk 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/tarndt/usbd/pkg/devices/testutil" 7 | ) 8 | 9 | func TestRamdisk(t *testing.T) { 10 | const sizeBytes = 128 * 1024 * 1024 //128 MB 11 | 12 | testutil.TestUserspace(t, NewRAMDisk(sizeBytes), sizeBytes) 13 | testutil.TestNBD(t, NewRAMDisk(sizeBytes), sizeBytes) 14 | } 15 | -------------------------------------------------------------------------------- /pkg/usbdlib/usbdlib_test/ramdisk_test.go: -------------------------------------------------------------------------------- 1 | package usbdlib_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/tarndt/usbd/pkg/devices/ramdisk" 7 | "github.com/tarndt/usbd/pkg/devices/testutil" 8 | ) 9 | 10 | func TestReferenceImpl(t *testing.T) { 11 | const sizeBytes = 1024 * 1024 * 8 12 | testutil.TestUserspace(t, ramdisk.NewRAMDisk(1024*1024*8), sizeBytes) 13 | } 14 | -------------------------------------------------------------------------------- /pkg/usbdlib/dev.go: -------------------------------------------------------------------------------- 1 | package usbdlib 2 | 3 | //Device represents the contact required of Go user-space device imlplementations 4 | type Device interface { 5 | Size() int64 6 | BlockSize() int64 7 | ReadAt(buf []byte, pos int64) (count int, err error) 8 | WriteAt(buf []byte, pos int64) (count int, err error) 9 | Trim(pos int64, count int) error 10 | Flush() error 11 | Close() error 12 | } 13 | -------------------------------------------------------------------------------- /pkg/util/consterr/consterr.go: -------------------------------------------------------------------------------- 1 | package consterr 2 | 3 | //ConstErr is used to be able to declare constants that are errors that are strings 4 | type ConstErr string 5 | 6 | //Error returns the value of the underlying string 7 | func (errstr ConstErr) Error() string { return string(errstr) } 8 | 9 | //ErrNotImplemented can be used while things are under construction and serves 10 | // as an example of how to use this simple package 11 | const ErrNotImplemented = ConstErr("This feature is not implemented") 12 | 13 | var _ error = ErrNotImplemented //compile time type check 14 | -------------------------------------------------------------------------------- /pkg/util/strms/devzero.go: -------------------------------------------------------------------------------- 1 | package strms 2 | 3 | import ( 4 | "bytes" 5 | "io" 6 | ) 7 | 8 | //EmptyRdr is a reader that will always be empty 9 | var EmptyRdr io.Reader = bytes.NewReader([]byte{}) 10 | 11 | //DevZero is like is like /dev/zero for reading and /dev/null for writing 12 | var DevZero devZero 13 | 14 | type devZero struct{} 15 | 16 | var _ io.ReadWriter = devZero{} 17 | 18 | func (devZero) Read(buf []byte) (int, error) { 19 | for i := range buf { 20 | buf[i] = 0 21 | } 22 | return len(buf), nil 23 | } 24 | 25 | func (devZero) Write(buf []byte) (int, error) { 26 | return len(buf), nil 27 | } 28 | -------------------------------------------------------------------------------- /pkg/util/strms/readfirstcloseall.go: -------------------------------------------------------------------------------- 1 | package strms 2 | 3 | import ( 4 | "io" 5 | ) 6 | 7 | type readFirstCloseList struct { 8 | io.Reader 9 | closers []io.Closer 10 | } 11 | 12 | var _ io.ReadCloser = readFirstCloseList{} 13 | 14 | //NewReadFirstCloseList is a wraper that reads the provided reader but when 15 | // closed will close the provided readers. Useful when you have io.Readers 16 | // wrapping io.ReadClosers. 17 | func NewReadFirstCloseList(rdr io.Reader, closers ...io.Closer) io.ReadCloser { 18 | return readFirstCloseList{ 19 | Reader: rdr, 20 | closers: closers, 21 | } 22 | } 23 | 24 | func (rfca readFirstCloseList) Close() (err error) { 25 | for _, closer := range rfca.closers { 26 | if clsrErr := closer.Close(); clsrErr != nil && err == nil { 27 | err = clsrErr 28 | } 29 | } 30 | return err 31 | } 32 | -------------------------------------------------------------------------------- /pkg/devices/filedisk/filedisk_test/filedisk_test.go: -------------------------------------------------------------------------------- 1 | package filedisk_test //this is in its own pkg due to testhelpers importing filedisk, this avoid circ dep 2 | 3 | import ( 4 | "path/filepath" 5 | "testing" 6 | 7 | "github.com/tarndt/usbd/pkg/devices/filedisk" 8 | "github.com/tarndt/usbd/pkg/devices/testutil" 9 | "github.com/tarndt/usbd/pkg/usbdlib" 10 | ) 11 | 12 | func TestFileDisk(t *testing.T) { 13 | const sizeBytes = 128 * 1024 * 1024 //128 MB 14 | 15 | testutil.TestUserspace(t, createDevice(t, sizeBytes), sizeBytes) 16 | testutil.TestNBD(t, createDevice(t, sizeBytes), sizeBytes) 17 | } 18 | 19 | func createDevice(t *testing.T, sizeBytes uint) usbdlib.Device { 20 | dev, err := filedisk.NewFileDisk(filepath.Join(t.TempDir(), "test.bin"), int64(sizeBytes)) 21 | if err != nil { 22 | t.Fatalf("Could not create file backed test device: %s", err) 23 | } 24 | return dev 25 | } 26 | -------------------------------------------------------------------------------- /pkg/util/strms/wtratwtr.go: -------------------------------------------------------------------------------- 1 | package strms 2 | 3 | import ( 4 | "io" 5 | ) 6 | 7 | //WriterAtWriter is io.WriterAt + io.Writer 8 | type WriterAtWriter interface { 9 | io.WriterAt 10 | io.Writer 11 | } 12 | 13 | type writeAtWriter struct { 14 | cur int64 //we protect underlying writer's position 15 | io.WriterAt 16 | } 17 | 18 | var _ io.Writer = (*writeAtWriter)(nil) 19 | 20 | //NewWriteAtWriter returns a writer that using a io.WriterAt's WriteAt in conjunction 21 | // with its own position counter. See NewReadAtReader for a detailed explanation 22 | // of why is this is often useful. 23 | func NewWriteAtWriter(wtrAt io.WriterAt) WriterAtWriter { 24 | return &writeAtWriter{ 25 | cur: 0, 26 | WriterAt: wtrAt, 27 | } 28 | } 29 | 30 | func (waw *writeAtWriter) Write(buf []byte) (n int, err error) { 31 | n, err = waw.WriteAt(buf, waw.cur) 32 | waw.cur += int64(n) 33 | return n, err 34 | } 35 | -------------------------------------------------------------------------------- /pkg/util/strms/rdratrdr.go: -------------------------------------------------------------------------------- 1 | package strms 2 | 3 | import ( 4 | "io" 5 | ) 6 | 7 | //ReadAtReader is io.ReaderAt + io.Reader 8 | type ReadAtReader interface { 9 | io.ReaderAt 10 | io.Reader 11 | } 12 | 13 | type readAtReader struct { 14 | cur int64 //we protect underlying reader's position 15 | io.ReaderAt 16 | } 17 | 18 | var _ io.Reader = (*readAtReader)(nil) 19 | 20 | //NewReadAtReader returns a reader that using a io.ReaderAt's ReadAt in conjunction 21 | // with its own position counter. This is useful when you have a io.ReaderAt that 22 | // is not also a io.Reader or you want to share an io.ReaderAt that is also an io.Reader 23 | // without modifying its position. For example os.File's position is on the OS/kernel side 24 | // meaning normally sharing it requires locking... or using ReadAt at as we do here. 25 | func NewReadAtReader(rdrAt io.ReaderAt) ReadAtReader { 26 | return &readAtReader{ 27 | cur: 0, 28 | ReaderAt: rdrAt, 29 | } 30 | } 31 | 32 | func (rar *readAtReader) Read(buf []byte) (n int, err error) { 33 | n, err = rar.ReadAt(buf, rar.cur) 34 | rar.cur += int64(n) 35 | return n, err 36 | } 37 | -------------------------------------------------------------------------------- /pkg/devices/dedupdisk/interfaces.go: -------------------------------------------------------------------------------- 1 | package dedupdisk 2 | 3 | import "io" 4 | 5 | //FlushClose is the Close() and Flush() in one interface 6 | type FlushClose interface { 7 | io.Closer 8 | Flush() error 9 | } 10 | 11 | //LUNMap describes the capability to map logic block addresses to deduplicated 12 | // blocks of data. Any series of blocks can then be seen as a series of dedup IDs 13 | type LUNMap interface { 14 | GetID(block uint64) (dedupID uint64, err error) 15 | GetIDs(startBlock uint64, dedupIDs []uint64) error 16 | PutID(block uint64, dedupID uint64) error 17 | Size() int64 18 | FlushClose 19 | } 20 | 21 | //IDStore describes the capability to map content (the hashes of blocks) to dedup IDs 22 | type IDStore interface { 23 | GetID(block []byte) (dedupID uint64, hash []byte, err error) 24 | PutID(hash []byte, dedupID uint64) error 25 | GetErrNotPresent() error 26 | FlushClose 27 | } 28 | 29 | //BlockStore describes the capability to store and retrieve the contents of blocks 30 | // by their dedup IDs 31 | type BlockStore interface { 32 | GetBlock(dedupID uint64, buf []byte) error 33 | PutBlock(buf []byte) (dedupID uint64, err error) 34 | FlushClose 35 | } 36 | -------------------------------------------------------------------------------- /pkg/util/zeros.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( //Dark magic to make simple things fast... 4 | "reflect" 5 | "unsafe" 6 | ) 7 | 8 | //IsZeros confirms the provided byte slice contains only zeros by returning true 9 | func IsZeros(block []byte) bool { 10 | const maxAlignedMagic int = 0xFFFFFFF8 11 | 12 | bytesHdr := *((*reflect.SliceHeader)(unsafe.Pointer(&block))) 13 | alignedCount := (bytesHdr.Len & maxAlignedMagic) / 8 14 | var remainingStart int 15 | 16 | if alignedCount > 0 { 17 | longsHdr := reflect.SliceHeader{Data: bytesHdr.Data, Len: alignedCount, Cap: alignedCount} 18 | longs := *(*[]uint64)(unsafe.Pointer(&longsHdr)) 19 | for _, long := range longs { 20 | if long != 0 { 21 | return false 22 | } 23 | } 24 | remainingStart = alignedCount * 8 25 | } 26 | 27 | for _, char := range block[remainingStart:] { 28 | if char != 0 { 29 | return false 30 | } 31 | } 32 | 33 | return true 34 | } 35 | 36 | //ZeroFill ensures the provided byte slice contains only zeros 37 | func ZeroFill(block []byte) { 38 | if len(block) < 1 { 39 | return 40 | } 41 | 42 | block[0] = 0 43 | for i := 1; i < len(block); i <<= 1 { 44 | copy(block[i:], block[:i]) 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /cmd/usbdsrvd/conf/bakdev.go: -------------------------------------------------------------------------------- 1 | package conf 2 | 3 | import ( 4 | "strings" 5 | ) 6 | 7 | //These are enums that map to each of the available device implementations 8 | const ( 9 | DevUnknown BackingDevice = iota 10 | DevMem 11 | DevFile 12 | DevDedupFile 13 | DevObjStore 14 | ) 15 | 16 | //BackingDevice type represents the available device implementations 17 | type BackingDevice uint8 18 | 19 | //NewBackingDevice constructs a BackingDevice from a human textual short name (from config) 20 | func NewBackingDevice(devDesc string) BackingDevice { 21 | switch strings.ToLower(devDesc) { 22 | case "mem", "memory": 23 | return DevMem 24 | case "file", "disk": 25 | return DevFile 26 | case "dedup", "dedup-file", "dedup-disk": 27 | return DevDedupFile 28 | case "osbd", "objstore": 29 | return DevObjStore 30 | default: 31 | return DevUnknown 32 | } 33 | } 34 | 35 | //String is a human readable description of the device for display 36 | func (bd BackingDevice) String() string { 37 | switch bd { 38 | case DevMem: 39 | return "ramdisk" 40 | case DevFile: 41 | return "filedisk" 42 | case DevDedupFile: 43 | return "deduplicated-filedisk" 44 | case DevObjStore: 45 | return "locally-cached objectstore" 46 | default: 47 | return "unknown" 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /cmd/usbdsrvd/conf/capacity.go: -------------------------------------------------------------------------------- 1 | package conf 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | 7 | "github.com/dustin/go-humanize" 8 | ) 9 | 10 | //Capacity is a count/size in bytes, its primary purpose is to allow the flags 11 | // package to parse human IEC values like "10 MiB" 12 | type Capacity int64 13 | 14 | //String returns capacity in human readable IEC units, see: flag.Value interface 15 | func (c *Capacity) String() string { 16 | return humanize.IBytes(uint64(*c)) 17 | } 18 | 19 | //Set is used by the flag package, see: flag.Value interface 20 | func (c *Capacity) Set(str string) error { 21 | val, err := humanize.ParseBytes(str) 22 | if err != nil { 23 | return fmt.Errorf("Parseing %q failed: %w", str, err) 24 | } 25 | 26 | *c = Capacity(val) 27 | return nil 28 | } 29 | 30 | //Get is used by the flag package, see: flag.Getter interface 31 | func (c *Capacity) Get() interface{} { return Capacity(*c) } 32 | 33 | //Analog to methods like flag.DurationVar for use in parsing capacity command-line args 34 | func flagCapacityVar(p *Capacity, name string, value Capacity, usage string) { 35 | flag.CommandLine.Var(newCapacityValue(value, p), name, usage) 36 | } 37 | 38 | func newCapacityValue(val Capacity, p *Capacity) *Capacity { 39 | *p = val 40 | return p 41 | } 42 | -------------------------------------------------------------------------------- /cmd/usbdsrvd/dedup.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "path/filepath" 6 | 7 | "github.com/tarndt/usbd/cmd/usbdsrvd/conf" 8 | "github.com/tarndt/usbd/pkg/devices/dedupdisk" 9 | "github.com/tarndt/usbd/pkg/devices/dedupdisk/impls" 10 | "github.com/tarndt/usbd/pkg/usbdlib" 11 | ) 12 | 13 | func dedupDiskFromCfg(cfg *conf.Config) (usbdlib.Device, error) { 14 | blockSize := new(usbdlib.DefaultBlockSize).BlockSize() 15 | 16 | lunMap, err := impls.NewMmapLUNmap(filepath.Join(cfg.StorageDirectory, cfg.StorageName+".map"), blockSize, int64(cfg.StorageBytes)) 17 | if err != nil { 18 | return nil, fmt.Errorf("dedupDiskFromCfg: Could not create LUN map; Details: %w", err) 19 | } 20 | 21 | idStore, err := impls.NewPebbleIDStore(filepath.Join(cfg.StorageDirectory, cfg.StorageName+".ids"), int(cfg.DedupConfig.IDStoreMemoryCacheBytes), 5) 22 | if err != nil { 23 | return nil, fmt.Errorf("dedupDiskFromCfg: Could not create ID store; Details: %w", err) 24 | } 25 | 26 | blockStore, err := impls.NewFileBlockStore(filepath.Join(cfg.StorageDirectory, cfg.StorageName+".blks"), blockSize) 27 | if err != nil { 28 | return nil, fmt.Errorf("dedupDiskFromCfg: Could not create block store; Details: %w", err) 29 | } 30 | 31 | return dedupdisk.NewDedupDisk(lunMap, idStore, blockStore), nil 32 | } 33 | -------------------------------------------------------------------------------- /pkg/devices/objstore/nbd_test.go: -------------------------------------------------------------------------------- 1 | package objstore 2 | 3 | import ( 4 | "net/http/httptest" 5 | "os" 6 | "testing" 7 | 8 | "github.com/tarndt/usbd/pkg/devices/objstore/compress" 9 | "github.com/tarndt/usbd/pkg/devices/objstore/encrypt" 10 | "github.com/tarndt/usbd/pkg/devices/testutil" 11 | 12 | "github.com/graymeta/stow" 13 | "github.com/graymeta/stow/s3" 14 | "github.com/johannesboyne/gofakes3" 15 | "github.com/johannesboyne/gofakes3/backend/s3mem" 16 | ) 17 | 18 | func TestNBD(t *testing.T) { 19 | const rootUID = 0 20 | if os.Geteuid() != rootUID { 21 | t.Skip("Must be root for this test, try: go test -c -race && sudo ./objstore.test -test.v -test.timeout=120s && rm ./objstore.test") 22 | } 23 | 24 | srv := httptest.NewServer(gofakes3.New(s3mem.New()).Server()) 25 | defer srv.Close() 26 | cfg := stow.ConfigMap{ 27 | s3.ConfigEndpoint: srv.URL, 28 | s3.ConfigAccessKeyID: "fake", 29 | s3.ConfigSecretKey: "fake", 30 | } 31 | if err := ValidateConfig(KindS3, cfg); err != nil { 32 | t.Fatalf("Could not validate store config: %s", err) 33 | } 34 | store, err := NewStore(KindS3, cfg) 35 | if err != nil { 36 | t.Fatalf("Could not create s3 object store: %s", err) 37 | } 38 | 39 | key, err := encrypt.MakeRandomAESKey() 40 | if err != nil { 41 | t.Fatalf("Could not create AES key: %s", err) 42 | } 43 | const totalBytes = 256 * 1024 * 1024 //256 MB 44 | dev := createDevice( 45 | t, createContainer(t, store), "", totalBytes, totalBytes, 46 | OptCompressRemoteObjects(compress.ModeS2), 47 | OptEncrypt{Mode: encrypt.ModeAESRec, Key: key}, 48 | ) 49 | 50 | testutil.TestNBD(t, dev, totalBytes) 51 | } 52 | -------------------------------------------------------------------------------- /pkg/usbdlib/resp.go: -------------------------------------------------------------------------------- 1 | package usbdlib 2 | 3 | import ( 4 | "bytes" 5 | "encoding/binary" 6 | "fmt" 7 | "io" 8 | ) 9 | 10 | //NBD protocol details: https://github.com/NetworkBlockDevice/nbd/blob/master/doc/proto.md#simple-reply-message 11 | type response struct { 12 | respBuffer *bytes.Buffer 13 | reqType uint32 14 | readBuffer []byte 15 | errCode nbdErr 16 | } 17 | 18 | func newResponse() interface{} { 19 | return &response{ 20 | respBuffer: bytes.NewBuffer(newNbdRawResp()[:0]), 21 | } 22 | } 23 | 24 | func (resp *response) Set(req *request, errCode nbdErr) { 25 | resp.reqType = req.reqType 26 | resp.errCode = errCode 27 | 28 | //Build response 29 | resp.respBuffer.Truncate(0) 30 | binary.Write(resp.respBuffer, binary.LittleEndian, &nbdReplyMagic) //TODO use binary.LittleEndian to do this once and cache []bytes 31 | binary.Write(resp.respBuffer, binary.BigEndian, &errCode) 32 | resp.respBuffer.Write(req.handle) 33 | } 34 | 35 | func (resp *response) GetReadBuffer(req *request) []byte { 36 | if cap(resp.readBuffer) < req.count { 37 | resp.readBuffer = make([]byte, req.count) 38 | } else { 39 | resp.readBuffer = resp.readBuffer[:req.count] 40 | } 41 | return resp.readBuffer 42 | } 43 | 44 | func (resp *response) Write(strm io.Writer) error { 45 | _, err := resp.respBuffer.WriteTo(strm) 46 | if err != nil { 47 | return fmt.Errorf("Could not write response to response stream: %w", err) 48 | } 49 | if resp.reqType == nbdRead { 50 | if _, err = strm.Write(resp.readBuffer); err != nil { 51 | return fmt.Errorf("Could not write data read to response stream: %w", err) 52 | } 53 | } 54 | return nil 55 | } 56 | -------------------------------------------------------------------------------- /pkg/devices/filedisk/filedisk.go: -------------------------------------------------------------------------------- 1 | package filedisk 2 | 3 | import ( 4 | "bufio" 5 | "fmt" 6 | "os" 7 | 8 | "github.com/tarndt/usbd/pkg/usbdlib" 9 | ) 10 | 11 | //FileDisk is a simple file backed user-space block device 12 | type FileDisk struct { 13 | *os.File 14 | SizeBytes int64 15 | usbdlib.DefaultBlockSize 16 | } 17 | 18 | //NewFileDisk is the constructor for simple file backed devices 19 | func NewFileDisk(filename string, ifCreateSize int64) (*FileDisk, error) { 20 | fdsk := new(FileDisk) 21 | var err error 22 | if fdsk.File, err = os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0); err != nil { 23 | return nil, fmt.Errorf("Could not open backing file %q: %w", filename, err) 24 | } 25 | if info, err := fdsk.Stat(); err != nil { 26 | return nil, fmt.Errorf("Could not stat backing file %q: %w", filename, err) 27 | } else if size := info.Size(); size < 1 { //Create disk file 28 | strm := bufio.NewWriter(fdsk) 29 | for i := int64(0); i < ifCreateSize; i++ { 30 | if err = strm.WriteByte(0); err != nil { 31 | return nil, fmt.Errorf("Could not zero fill backing file %q, write failed: %w", filename, err) 32 | } 33 | } 34 | if err = strm.Flush(); err != nil { 35 | return nil, fmt.Errorf("Could not zero fill backing file: %q, flush failed: %w", filename, err) 36 | } 37 | if err = fdsk.Sync(); err != nil { 38 | return nil, fmt.Errorf("Could not zero fill backing file: %q, sync failed: %w", filename, err) 39 | } 40 | fdsk.SizeBytes = ifCreateSize 41 | } else { //disk file exists 42 | fdsk.SizeBytes = size 43 | } 44 | return fdsk, nil 45 | } 46 | 47 | //Size of this device in bytes 48 | func (fdsk *FileDisk) Size() int64 { 49 | return fdsk.SizeBytes 50 | } 51 | 52 | //Trim fufills part of usbdlib.Device 53 | func (*FileDisk) Trim(pos int64, count int) error { 54 | return nil 55 | } 56 | 57 | //Flush fufills part of usbdlib.Device 58 | func (fdsk *FileDisk) Flush() error { 59 | return fdsk.Sync() 60 | } 61 | -------------------------------------------------------------------------------- /pkg/devices/objstore/stowutil.go: -------------------------------------------------------------------------------- 1 | package objstore 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/graymeta/stow" 7 | 8 | //Load drivers 9 | "github.com/graymeta/stow/azure" //Azure storage 10 | "github.com/graymeta/stow/b2" //Backblaze storage 11 | "github.com/graymeta/stow/google" //Google storage 12 | "github.com/graymeta/stow/local" //local storage 13 | "github.com/graymeta/stow/oracle" //oracle storage 14 | "github.com/graymeta/stow/s3" //s3 storage 15 | "github.com/graymeta/stow/sftp" //sftp storage 16 | "github.com/graymeta/stow/swift" //swift storage 17 | ) 18 | 19 | //The list of all the known ObjectStore (stow.Location) kinds without having 20 | // to import the driver package for each. 21 | const ( 22 | KindAzure = azure.Kind 23 | KindBackBlazeB2 = b2.Kind 24 | KindGoogleCloudStorage = google.Kind 25 | KindLocalTest = local.Kind 26 | KindS3 = s3.Kind 27 | KindOracleObjectStorage = oracle.Kind 28 | KindSFTP = sftp.Kind 29 | KindSwift = swift.Kind 30 | ) 31 | 32 | //SupportsMetaData returns false if the provided ObjectStore kind is known to 33 | // not support metadata 34 | func SupportsMetaData(kind string) bool { 35 | switch kind { 36 | case KindLocalTest, KindSFTP: 37 | return false 38 | } 39 | return true 40 | } 41 | 42 | // NewStore dials stow storage. See stow.Dial for more info 43 | func NewStore(kind string, config stow.Config) (stow.Location, error) { 44 | return stow.Dial(kind, config) 45 | } 46 | 47 | // ValidateConfig verifies config parameters. See stow.Validate for more info 48 | func ValidateConfig(kind string, config stow.Config) error { 49 | return stow.Validate(kind, config) 50 | } 51 | 52 | func describeContainer(container stow.Container) string { 53 | return fmt.Sprintf("remote container %q (%q)", container.ID(), container.Name()) 54 | } 55 | 56 | func describeItem(item stow.Item) string { 57 | return fmt.Sprintf("remote object %q (%q at %q)", item.ID(), item.Name(), item.URL()) 58 | } 59 | -------------------------------------------------------------------------------- /cmd/usbdsrvd/osbd.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | 8 | "github.com/tarndt/usbd/cmd/usbdsrvd/conf" 9 | "github.com/tarndt/usbd/pkg/devices/objstore" 10 | "github.com/tarndt/usbd/pkg/devices/objstore/compress" 11 | "github.com/tarndt/usbd/pkg/devices/objstore/encrypt" 12 | "github.com/tarndt/usbd/pkg/usbdlib" 13 | 14 | "github.com/graymeta/stow" 15 | ) 16 | 17 | func osbdFromCfg(cfg *conf.Config) (usbdlib.Device, error) { 18 | oscfg := cfg.ObjStoreConfig 19 | 20 | store, err := objstore.NewStore(oscfg.Kind, oscfg.Config) 21 | if err != nil { 22 | return nil, fmt.Errorf("Could not create s3 object store: %w", err) 23 | } 24 | 25 | container, err := store.Container(cfg.StorageName) 26 | if err != nil { 27 | if !errors.Is(err, stow.ErrNotFound) { 28 | return nil, fmt.Errorf("Could not open remote container %q: %w", cfg.StorageName, err) 29 | } 30 | if container, err = store.CreateContainer(cfg.StorageName); err != nil { 31 | return nil, fmt.Errorf("Could not create remote container %q: %w", cfg.StorageName, err) 32 | } 33 | } 34 | 35 | opts := []objstore.Option{objstore.OptConcurFlushCount(oscfg.ConcurFlush)} 36 | if oscfg.LocalDiskCacheBytes > 0 { 37 | opts = append(opts, objstore.OptQuotaBytes(oscfg.LocalDiskCacheBytes)) 38 | } 39 | if oscfg.PersistCache { 40 | opts = append(opts, objstore.OptPersistCache(true)) 41 | } 42 | if oscfg.AESMode != encrypt.ModeIdentity { 43 | opts = append(opts, objstore.OptEncrypt{Mode: oscfg.AESMode, Key: oscfg.AESKey}) 44 | } 45 | if oscfg.CompressMode != compress.ModeIdentity { 46 | opts = append(opts, objstore.OptCompressRemoteObjects(oscfg.CompressMode)) 47 | } 48 | if oscfg.FlushInterval > 0 { 49 | opts = append(opts, objstore.OptAutoflushIval(oscfg.FlushInterval)) 50 | } 51 | if !objstore.SupportsMetaData(oscfg.Kind) { 52 | opts = append(opts, objstore.OptNoMetadataSupport(true)) 53 | } 54 | 55 | return objstore.NewDevice( 56 | context.Background(), container, cfg.StorageDirectory, 57 | uint(cfg.StorageBytes), uint(oscfg.ObjectBytes), opts..., 58 | ) 59 | } 60 | -------------------------------------------------------------------------------- /pkg/devices/objstore/compress/mode.go: -------------------------------------------------------------------------------- 1 | package compress 2 | 3 | import ( 4 | "compress/gzip" 5 | "fmt" 6 | "io" 7 | 8 | "github.com/klauspost/compress/s2" 9 | ) 10 | 11 | //Mode represents a compression mode 12 | type Mode uint8 13 | 14 | //Enumerate available modes and their textual names 15 | const ( 16 | ModeIdentity Mode = iota 17 | ModeUnknown 18 | ModeS2 19 | ModeGzip 20 | 21 | ModeIdentityName = "identity" 22 | ModeS2Name = "s2" 23 | ModeGzipName = "gzip" 24 | ModeUknownName = "unknown" 25 | ) 26 | 27 | //ModeFromName constructs a Mode from a textual name 28 | func ModeFromName(name string) Mode { 29 | switch name { 30 | case "", ModeIdentityName: 31 | return ModeIdentity 32 | case ModeS2Name: 33 | return ModeS2 34 | case ModeGzipName: 35 | return ModeGzip 36 | } 37 | return ModeUnknown 38 | } 39 | 40 | //AlgoName returns the textual name of a Mode 41 | func (m Mode) AlgoName() string { 42 | switch m { 43 | case ModeIdentity: 44 | return ModeIdentityName 45 | case ModeS2: 46 | return ModeS2Name 47 | case ModeGzip: 48 | return ModeGzipName 49 | } 50 | return ModeUknownName 51 | } 52 | 53 | //String is a synonym for AlgoName 54 | func (m Mode) String() string { 55 | return m.AlgoName() 56 | } 57 | 58 | //NewReader constructs a reader wrapper that applies this mode's decompression 59 | func (m Mode) NewReader(rdr io.Reader) (io.Reader, error) { 60 | switch m { 61 | case ModeIdentity: 62 | return rdr, nil 63 | case ModeGzip: 64 | return gzip.NewReader(rdr) 65 | case ModeS2: 66 | return s2.NewReader(rdr), nil 67 | } 68 | return nil, fmt.Errorf("Cannot create decompressor for unknown compression mode") 69 | } 70 | 71 | //NewWriter constructs a writer wrapper that applies this mode's compression 72 | func (m Mode) NewWriter(wtr io.WriteCloser) (io.WriteCloser, error) { 73 | switch m { 74 | case ModeIdentity: 75 | return wtr, nil 76 | case ModeGzip: 77 | return gzip.NewWriter(wtr), nil 78 | case ModeS2: 79 | return s2.NewWriter(wtr), nil 80 | } 81 | return nil, fmt.Errorf("Cannot create decompressor for unknown compression mode") 82 | } 83 | -------------------------------------------------------------------------------- /pkg/devices/ramdisk/ramdisk.go: -------------------------------------------------------------------------------- 1 | package ramdisk 2 | 3 | import ( 4 | "io" 5 | "sync/atomic" 6 | 7 | "github.com/tarndt/usbd/pkg/usbdlib" 8 | "github.com/tarndt/usbd/pkg/util/consterr" 9 | ) 10 | 11 | const errClosed = consterr.ConstErr("Device is shutdown") 12 | 13 | //RAMDisk is a simple memory (heap) backed user-space block device 14 | type RAMDisk struct { 15 | disk []byte 16 | size int 17 | usbdlib.DefaultBlockSize 18 | 19 | atomicOnline uint64 20 | } 21 | 22 | //NewRAMDisk constructs a memory backed user-space device of the provided size 23 | func NewRAMDisk(size int64) *RAMDisk { 24 | return &RAMDisk{ 25 | disk: make([]byte, int(size)), 26 | size: int(size), 27 | atomicOnline: 1, 28 | } 29 | } 30 | 31 | //Size of this device in bytes 32 | func (rdsk *RAMDisk) Size() int64 { 33 | return int64(rdsk.size) 34 | } 35 | 36 | //ReadAt fufills io.ReaderAt and in turn part of usbdlib.Device 37 | func (rdsk *RAMDisk) ReadAt(buf []byte, pos int64) (count int, err error) { 38 | if atomic.LoadUint64(&rdsk.atomicOnline) != 1 { 39 | return 0, errClosed 40 | } 41 | 42 | count = len(buf) 43 | end := int(pos) + count 44 | if end > rdsk.size { 45 | return 0, io.EOF 46 | } 47 | copy(buf, rdsk.disk[pos:end]) 48 | return 49 | } 50 | 51 | //WriteAt fufills io.WriterAt and in turn part of usbdlib.Device 52 | func (rdsk *RAMDisk) WriteAt(buf []byte, pos int64) (count int, err error) { 53 | if atomic.LoadUint64(&rdsk.atomicOnline) != 1 { 54 | return 0, errClosed 55 | } 56 | 57 | count = len(buf) 58 | end := int(pos) + count 59 | if end > rdsk.size { 60 | return 0, io.ErrUnexpectedEOF 61 | } 62 | copy(rdsk.disk[pos:end], buf) 63 | return 64 | } 65 | 66 | //Trim fufills part of usbdlib.Device 67 | func (rdsk *RAMDisk) Trim(pos int64, count int) error { 68 | if atomic.LoadUint64(&rdsk.atomicOnline) != 1 { 69 | return errClosed 70 | } 71 | 72 | return nil 73 | } 74 | 75 | //Flush fufills part of usbdlib.Device 76 | func (rdsk *RAMDisk) Flush() error { 77 | if atomic.LoadUint64(&rdsk.atomicOnline) != 1 { 78 | return errClosed 79 | } 80 | 81 | return nil 82 | } 83 | 84 | //Close fufills io.Closer and in turn part of usbdlib.Device 85 | func (rdsk *RAMDisk) Close() error { 86 | atomic.StoreUint64(&rdsk.atomicOnline, 0) 87 | return nil 88 | } 89 | -------------------------------------------------------------------------------- /pkg/devices/objstore/helpers_test.go: -------------------------------------------------------------------------------- 1 | package objstore 2 | 3 | import ( 4 | "net/http/httptest" 5 | "strconv" 6 | "testing" 7 | "time" 8 | 9 | "github.com/tarndt/usbd/pkg/devices/testutil" 10 | "github.com/tarndt/usbd/pkg/usbdlib" 11 | 12 | "github.com/graymeta/stow" 13 | "github.com/graymeta/stow/s3" 14 | "github.com/johannesboyne/gofakes3" 15 | "github.com/johannesboyne/gofakes3/backend/s3mem" 16 | ) 17 | 18 | func s3Server() *httptest.Server { 19 | return httptest.NewServer(gofakes3.New(s3mem.New()).Server()) 20 | } 21 | 22 | func s3Store(t *testing.T, srv *httptest.Server) stow.Location { 23 | cfg := stow.ConfigMap{ 24 | s3.ConfigEndpoint: srv.URL, 25 | s3.ConfigAccessKeyID: "fake", 26 | s3.ConfigSecretKey: "fake", 27 | } 28 | if err := ValidateConfig(KindS3, cfg); err != nil { 29 | t.Fatalf("Could not validate store config: %s", err) 30 | } 31 | 32 | store, err := NewStore(KindS3, cfg) 33 | if err != nil { 34 | t.Fatalf("Could not create local object store: %s", err) 35 | } 36 | 37 | return store 38 | } 39 | 40 | func createContainer(t *testing.T, store stow.Location) stow.Container { 41 | container, err := store.CreateContainer(strconv.FormatInt(time.Now().UnixNano(), 36)) 42 | if err != nil { 43 | t.Fatalf("Could not create container: %s", err) 44 | } 45 | return container 46 | } 47 | 48 | func createDevice(t *testing.T, container stow.Container, optCacheDir string, totalBytes, objectBytes uint, options ...Option) usbdlib.Device { 49 | if optCacheDir == "" { 50 | optCacheDir = t.TempDir() 51 | } 52 | 53 | dev, err := NewDevice(testutil.CreateContext(t), container, optCacheDir, totalBytes, objectBytes, options...) 54 | if err != nil { 55 | t.Fatalf("Could not create device: %s", err) 56 | } 57 | return dev 58 | } 59 | 60 | func testExistingRemote(t *testing.T, container stow.Container, optReUseCacheDir string, totalBytes, objectBytes uint, expectedHash []byte, options ...Option) { 61 | t.Run("existing-remote-data", func(t *testing.T) { 62 | dev := createDevice(t, container, optReUseCacheDir, totalBytes, objectBytes, options...) 63 | testutil.TestReadHash(t, dev, expectedHash) 64 | if dev.Size() > 0 { //Double check device is still writable 65 | if _, err := dev.WriteAt([]byte{7}, 0); err != nil { 66 | t.Fatalf("Reconstructed device was not still writable: %s", err) 67 | } 68 | } 69 | testutil.TestClose(t, dev) 70 | }) 71 | } 72 | -------------------------------------------------------------------------------- /pkg/devices/objstore/persist_test.go: -------------------------------------------------------------------------------- 1 | package objstore 2 | 3 | import ( 4 | "io" 5 | "testing" 6 | 7 | "github.com/tarndt/usbd/pkg/devices/objstore/compress" 8 | "github.com/tarndt/usbd/pkg/devices/testutil" 9 | "github.com/tarndt/usbd/pkg/util/consterr" 10 | 11 | "github.com/graymeta/stow" 12 | ) 13 | 14 | const errNoDownloadAllowed = consterr.ConstErr("Downloads are disabled") 15 | 16 | func TestCachepersistence(t *testing.T) { 17 | t.Parallel() 18 | 19 | srv := s3Server() 20 | defer srv.Close() 21 | 22 | store := s3Store(t, srv) 23 | options := []Option{ 24 | OptCompressRemoteObjects(compress.ModeS2), 25 | OptPersistCache(true), 26 | } 27 | 28 | const ( 29 | totalBytes = 8 * 1024 * 1024 //8 MB 30 | objectBytes = 512 * 1024 //512 KB 31 | ) 32 | 33 | container := createContainer(t, store) 34 | cacheDir := t.TempDir() 35 | dev := createDevice(t, container, cacheDir, totalBytes, objectBytes, options...) 36 | 37 | if !dev.(*device).persistCache { 38 | t.Fatal("Cache persistence is off when it should be on!") 39 | } 40 | 41 | //Some standard tests 42 | testutil.TestDevSize(t, dev, totalBytes) 43 | testutil.TestReadEmpty(t, dev, objectBytes) 44 | 45 | //Test persistence specifically 46 | devHash := testutil.TestWriteReadPattern(t, dev) 47 | testutil.TestClose(t, dev) 48 | 49 | //noRemoteDownloadContainer will explode if item.Get is called. 50 | // This allows to know that all of the items were serviced from local cache 51 | // and no downloads of remote objects occurred 52 | testExistingRemote(t, noRemoteDownloadContainer{container}, cacheDir, totalBytes, objectBytes, devHash, options...) 53 | } 54 | 55 | type noRemoteDownloadContainer struct { 56 | stow.Container 57 | } 58 | 59 | func (cont noRemoteDownloadContainer) Item(id string) (stow.Item, error) { 60 | item, err := cont.Container.Item(id) 61 | if item != nil { 62 | item = noDownloadItem{item} 63 | } 64 | return item, err 65 | } 66 | 67 | func (cont noRemoteDownloadContainer) Items(prefix, cursor string, count int) ([]stow.Item, string, error) { 68 | items, cursor, err := cont.Container.Items(prefix, cursor, count) 69 | for i := range items { 70 | items[i] = noDownloadItem{items[i]} 71 | } 72 | return items, cursor, err 73 | } 74 | 75 | type noDownloadItem struct { 76 | stow.Item 77 | } 78 | 79 | func (item noDownloadItem) Open() (io.ReadCloser, error) { 80 | return nil, errNoDownloadAllowed 81 | } 82 | -------------------------------------------------------------------------------- /pkg/devices/dedupdisk/dedupdisk_test/dedupdisk_test.go: -------------------------------------------------------------------------------- 1 | package dedupdisk_test 2 | 3 | import ( 4 | "errors" 5 | "path/filepath" 6 | "testing" 7 | 8 | "github.com/tarndt/usbd/pkg/devices/dedupdisk" 9 | "github.com/tarndt/usbd/pkg/devices/dedupdisk/impls" 10 | "github.com/tarndt/usbd/pkg/devices/testutil" 11 | "github.com/tarndt/usbd/pkg/usbdlib" 12 | "github.com/tarndt/usbd/pkg/util/consterr" 13 | ) 14 | 15 | func TestDedupDisk(t *testing.T) { 16 | const sizeBytes = 128 * 1024 * 1024 //128 MB 17 | 18 | for _, bs := range blockStoreConstructors() { 19 | t.Run("with-"+bs.name, func(t *testing.T) { 20 | testutil.TestUserspace(t, createDevice(t, sizeBytes, bs.newBlockStore), sizeBytes) 21 | testutil.TestNBD(t, createDevice(t, sizeBytes, bs.newBlockStore), sizeBytes) 22 | }) 23 | } 24 | } 25 | 26 | type blockStoreConstructor func(filename string, blockSize int64) (dedupdisk.BlockStore, error) 27 | 28 | func createDevice(t *testing.T, sizeBytes uint, newBlockStore blockStoreConstructor) usbdlib.Device { 29 | cacheSize := sizeBytes / 4 30 | if cacheSize < 1 { 31 | cacheSize = sizeBytes 32 | } 33 | blockSize := new(usbdlib.DefaultBlockSize).BlockSize() 34 | storeDir := t.TempDir() 35 | 36 | lunMap, err := impls.NewMmapLUNmap(filepath.Join(storeDir, "test.map"), blockSize, int64(sizeBytes)) 37 | if err != nil { 38 | t.Fatalf("Could not create LUN map; Details: %s", err) 39 | } 40 | 41 | idStore, err := impls.NewPebbleIDStore(filepath.Join(storeDir, "test.ids"), int(cacheSize), 5) 42 | if err != nil { 43 | t.Fatalf("Could not create ID store; Details: %s", err) 44 | } 45 | 46 | blockStore, err := newBlockStore(filepath.Join(storeDir, "test.blks"), blockSize) 47 | if err != nil { 48 | if errors.Is(err, consterr.ErrNotImplemented) { 49 | t.Skip("Skipping expirmental block store") 50 | } 51 | t.Fatalf("Could not create block store; Details: %s", err) 52 | } 53 | 54 | return dedupdisk.NewDedupDisk(lunMap, idStore, blockStore) 55 | } 56 | 57 | type blockStore struct { 58 | newBlockStore blockStoreConstructor 59 | name string 60 | } 61 | 62 | func blockStoreConstructors() []blockStore { 63 | return []blockStore{ 64 | blockStore{ 65 | newBlockStore: func(filename string, blockSize int64) (dedupdisk.BlockStore, error) { 66 | return impls.NewFileBlockStore(filename, blockSize) 67 | }, 68 | name: "FileBlockStore", 69 | }, 70 | blockStore{ 71 | newBlockStore: func(filename string, blockSize int64) (dedupdisk.BlockStore, error) { 72 | return impls.NewLZ4BlockStore(filename, blockSize) 73 | }, 74 | name: "LZ4BlockStore", 75 | }, 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /pkg/usbdlib/req.go: -------------------------------------------------------------------------------- 1 | package usbdlib 2 | 3 | import ( 4 | "bytes" 5 | "encoding/binary" 6 | "fmt" 7 | "io" 8 | "sync" 9 | ) 10 | 11 | //NBD protocol details: https://github.com/NetworkBlockDevice/nbd/blob/master/doc/proto.md#request-message 12 | type request struct { 13 | rawReq []byte 14 | //Request fields 15 | reqType uint32 16 | handle []byte 17 | pos int64 18 | count int 19 | //Resources to execute requests 20 | writeBuffer []byte 21 | //Flush mutex; all write ops must RLock, flushes Lock to ensure all previous 22 | //writes are committed before flushing 23 | flushMu *sync.RWMutex 24 | } 25 | 26 | func newRequest() interface{} { 27 | req := &request{ 28 | rawReq: newNbdRawReq(), 29 | handle: newNbdHandle(), 30 | } 31 | return req 32 | } 33 | 34 | func (req *request) Decode(strm io.Reader) error { 35 | _, err := io.ReadFull(strm, req.rawReq) 36 | if err != nil { 37 | return fmt.Errorf("Could not read request data: %w", err) 38 | } 39 | rdr := bytes.NewReader(req.rawReq) 40 | 41 | //Check magic number 42 | var magicNumber uint32 43 | if err = binary.Read(rdr, binary.LittleEndian, &magicNumber); err != nil { //TODO use binary.LittleEndian to do this once and cache []bytes 44 | return err 45 | } else if magicNumber != nbdReqMagic { 46 | return fmt.Errorf("Request did not have correct magic number: %w", err) 47 | } 48 | 49 | //Read request type 50 | if err = binary.Read(rdr, binary.BigEndian, &req.reqType); err != nil { 51 | return fmt.Errorf("Could not decode request type: %w", err) 52 | } 53 | 54 | //Read handle 55 | if _, err = io.ReadFull(rdr, req.handle); err != nil { 56 | return fmt.Errorf("Could not read operation handle: %w", err) 57 | } 58 | 59 | //Read pos 60 | var pos uint64 61 | if err = binary.Read(rdr, binary.BigEndian, &pos); err != nil { 62 | return fmt.Errorf("Could not decode position: %w", err) 63 | } 64 | req.pos = int64(pos) 65 | 66 | //Read count 67 | var count uint32 68 | if err = binary.Read(rdr, binary.BigEndian, &count); err != nil { 69 | return fmt.Errorf("Could not decode count: %w", err) 70 | } 71 | req.count = int(count) 72 | 73 | //Do we need to read our data to be written? 74 | if req.reqType == nbdWrite { 75 | if _, err = io.ReadFull(strm, req.getWriteBuffer()); err != nil { 76 | return fmt.Errorf("Could not read data to be written: %w", err) 77 | } 78 | } 79 | 80 | return nil 81 | } 82 | 83 | func (req *request) getWriteBuffer() []byte { 84 | if cap(req.writeBuffer) < req.count { 85 | req.writeBuffer = make([]byte, req.count) 86 | } else { 87 | req.writeBuffer = req.writeBuffer[:req.count] 88 | } 89 | return req.writeBuffer 90 | } 91 | -------------------------------------------------------------------------------- /pkg/devices/objstore/options.go: -------------------------------------------------------------------------------- 1 | package objstore 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/tarndt/usbd/pkg/devices/objstore/compress" 7 | "github.com/tarndt/usbd/pkg/devices/objstore/encrypt" 8 | ) 9 | 10 | //Option is an ObjectStore device option 11 | type Option interface { 12 | apply(*device) 13 | } 14 | 15 | //OptAutoflushIval instructs an ObjectStore device to flush every interval 16 | type OptAutoflushIval time.Duration 17 | 18 | func (interval OptAutoflushIval) apply(dev *device) { 19 | dev.autoflush = time.Duration(interval) 20 | } 21 | 22 | //OptConcurFlushCount instructs an ObjectStore device to use this many concurrent workers 23 | // this has memory usage implications (workers * objectsize) 24 | type OptConcurFlushCount uint 25 | 26 | func (count OptConcurFlushCount) apply(dev *device) { 27 | dev.concurFlush = uint(count) 28 | } 29 | 30 | //OptCompressRemoteObjects instructs an ObjectStore device to use the provided compression mode 31 | type OptCompressRemoteObjects compress.Mode 32 | 33 | func (compressMode OptCompressRemoteObjects) apply(dev *device) { 34 | dev.compressMode = compress.Mode(compressMode) 35 | } 36 | 37 | //OptNoMetadataSupport instructs an ObjectStore device reject configurations that 38 | // require the provided stow.Location to support metadata. (ex. Compression) 39 | type OptNoMetadataSupport bool 40 | 41 | func (noMeta OptNoMetadataSupport) apply(dev *device) { 42 | dev.noMetadata = bool(noMeta) 43 | } 44 | 45 | //OptThickProvisionLocalFiles instructs an ObjectStore device to not use sparse 46 | // files for local caching 47 | type OptThickProvisionLocalFiles bool 48 | 49 | func (thickProv OptThickProvisionLocalFiles) apply(dev *device) { 50 | dev.thickProvision = bool(thickProv) 51 | } 52 | 53 | //OptEncrypt instructs an ObjectStore device to use the provided AES encryption mode 54 | type OptEncrypt struct { 55 | Mode encrypt.Mode 56 | Key []byte 57 | } 58 | 59 | func (optEncrypt OptEncrypt) apply(dev *device) { 60 | dev.encryptMode = optEncrypt.Mode 61 | dev.encryptKey = optEncrypt.Key 62 | } 63 | 64 | //OptQuotaBytes instructs an ObjectStore device to limit the cache backed local disk 65 | // to an amount less than the full size of the device 66 | type OptQuotaBytes int64 67 | 68 | func (quota OptQuotaBytes) apply(dev *device) { 69 | dev.quotaBytes = int64(quota) 70 | } 71 | 72 | //OptPersistCache instructs an ObjectStore device to not clean up its local disk cache 73 | // files. Primarily for debugging, but also a performance optimization if no other 74 | // machines are allowed to mutate the remote objects. 75 | type OptPersistCache bool 76 | 77 | func (keepUseCache OptPersistCache) apply(dev *device) { 78 | dev.persistCache = bool(keepUseCache) 79 | } 80 | -------------------------------------------------------------------------------- /cmd/usbdsrvd/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "log" 7 | "os" 8 | "os/signal" 9 | "path/filepath" 10 | 11 | "github.com/tarndt/usbd/cmd/usbdsrvd/conf" 12 | "github.com/tarndt/usbd/pkg/devices/filedisk" 13 | "github.com/tarndt/usbd/pkg/devices/ramdisk" 14 | "github.com/tarndt/usbd/pkg/usbdlib" 15 | ) 16 | 17 | var deamonName = fmt.Sprintf("USBD Server (%s)", os.Args[0]) 18 | 19 | //Simple usage: go build && sudo ./usbdsrv 20 | func main() { 21 | cfg := conf.MustGetConfig() 22 | 23 | log.Println(deamonName + " started.") 24 | log.Printf(deamonName+" using config: %s", cfg) 25 | defer log.Println(deamonName + " terminated normally.") 26 | 27 | device := mustGetDevice(cfg) 28 | 29 | ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt, os.Kill) 30 | defer cancel() 31 | 32 | var ( 33 | ndbStream *usbdlib.NbdStream 34 | err error 35 | ) 36 | if cfg.NBDDevName != "" { 37 | if ndbStream, _, err = usbdlib.NewNbdHandler(ctx, device, cfg.NBDDevName); err != nil { 38 | err = fmt.Errorf("Could not use existing NBD device %q: %w", cfg.NBDDevName, err) 39 | } 40 | } else { 41 | if ndbStream, cfg.NBDDevName, err = usbdlib.NewNbdHandler(ctx, device, cfg.NBDDevCount); err != nil { 42 | err = fmt.Errorf("Could not create new NBD device: %w", err) 43 | } else { 44 | defer func() { 45 | if err := os.Remove(cfg.NBDDevName); err != nil { 46 | log.Printf("Warning: Could not remove autocreated NBD device %q: %s", cfg.NBDDevName, err) 47 | } 48 | }() 49 | } 50 | } 51 | if err != nil { 52 | log.Fatalf("Could not create NDB user-space device: %s", err) 53 | } 54 | 55 | log.Printf(deamonName+" is processing requests for %q.", cfg.NBDDevName) 56 | if err := ndbStream.ProcessRequests(); err != nil { 57 | log.Fatalf("Request processing failed: %s", err) 58 | } 59 | } 60 | 61 | func mustGetDevice(cfg *conf.Config) (device usbdlib.Device) { 62 | size := int64(cfg.StorageBytes) 63 | var err error 64 | 65 | switch cfg.BackingMode { 66 | case conf.DevMem: 67 | device = ramdisk.NewRAMDisk(size) 68 | 69 | case conf.DevFile: 70 | device, err = filedisk.NewFileDisk(filepath.Join(cfg.StorageDirectory, cfg.StorageName+".bin"), size) 71 | if err != nil { 72 | log.Fatalf("Could not create file backed virtual disk: %s", err) 73 | } 74 | 75 | case conf.DevDedupFile: 76 | device, err = dedupDiskFromCfg(cfg) 77 | if err != nil { 78 | log.Fatalf("Could not create pebbleDB backed deduplicating virtual disk: %s", err) 79 | } 80 | 81 | case conf.DevObjStore: 82 | device, err = osbdFromCfg(cfg) 83 | if err != nil { 84 | log.Fatalf("Could not create object storage backed virtual disk: %s", err) 85 | } 86 | 87 | default: 88 | log.Fatalf("Bug: Could not create store: unknown backing device mode enum: %d", cfg.BackingMode) 89 | } 90 | 91 | return device 92 | } 93 | -------------------------------------------------------------------------------- /pkg/devices/dedupdisk/impls/pebbleidstore.go: -------------------------------------------------------------------------------- 1 | package impls 2 | 3 | import ( 4 | "crypto/sha1" 5 | "encoding/binary" 6 | "encoding/hex" 7 | "fmt" 8 | "hash" 9 | "sync" 10 | 11 | "github.com/tarndt/usbd/pkg/devices/dedupdisk" 12 | "github.com/tarndt/usbd/pkg/util" 13 | 14 | "github.com/cockroachdb/pebble" 15 | "github.com/cockroachdb/pebble/bloom" 16 | ) 17 | 18 | type pebbleIDStore struct { 19 | dbPath string 20 | db *pebble.DB 21 | hashPool sync.Pool 22 | writeOpts *pebble.WriteOptions 23 | closeOnce sync.Once 24 | } 25 | 26 | //NewPebbleIDStore constructs a dedupdisk.IDStore implemented using PebbleBD 27 | func NewPebbleIDStore(dbPath string, cacheBytes, bloomBitsPerEntry int) (dedupdisk.IDStore, error) { 28 | opts := &pebble.Options{Cache: pebble.NewCache(int64(cacheBytes))} 29 | if bloomBitsPerEntry > 0 { 30 | levelOpts := new(pebble.LevelOptions).EnsureDefaults() 31 | levelOpts.FilterPolicy = bloom.FilterPolicy(bloomBitsPerEntry) 32 | opts.Levels = append(opts.Levels, *levelOpts) 33 | } 34 | 35 | db, err := pebble.Open(dbPath, opts) 36 | if err != nil { 37 | return nil, fmt.Errorf("Could not open database %q: %w", dbPath, err) 38 | } 39 | 40 | return &pebbleIDStore{ 41 | dbPath: dbPath, 42 | db: db, 43 | hashPool: sync.Pool{New: newSHA1}, 44 | writeOpts: &pebble.WriteOptions{Sync: false}, 45 | }, nil 46 | } 47 | 48 | func newSHA1() interface{} { 49 | return sha1.New() 50 | } 51 | 52 | func (ps *pebbleIDStore) GetID(block []byte) (dedupID uint64, sha1Hash []byte, err error) { 53 | if util.IsZeros(block) { 54 | return zeroBlockID, nil, nil 55 | } 56 | hashGen := ps.hashPool.Get().(hash.Hash) 57 | hashGen.Write(block) 58 | sha1Hash = hashGen.Sum(nil) 59 | hashGen.Reset() 60 | 61 | rawVal, closeVal, err := ps.db.Get(sha1Hash) 62 | switch err { 63 | case pebble.ErrNotFound: 64 | return 0, sha1Hash, errNotPresent 65 | case nil: 66 | dedupID = binary.LittleEndian.Uint64(rawVal) 67 | closeVal.Close() 68 | ps.hashPool.Put(hashGen) 69 | 70 | default: 71 | ps.hashPool.Put(hashGen) 72 | return 0, nil, fmt.Errorf("Database get %q failed: %w", hex.EncodeToString(sha1Hash), err) 73 | } 74 | 75 | return dedupID, nil, nil 76 | } 77 | 78 | func (ps *pebbleIDStore) PutID(sha1Hash []byte, dedupID uint64) error { 79 | rawVal := make([]byte, 8) 80 | binary.LittleEndian.PutUint64(rawVal, dedupID) 81 | if err := ps.db.Set(sha1Hash, rawVal, ps.writeOpts); err != nil { 82 | return fmt.Errorf("Database put %q failed: %w", hex.EncodeToString(sha1Hash), err) 83 | } 84 | return nil 85 | } 86 | 87 | func (ps *pebbleIDStore) GetErrNotPresent() error { 88 | return errNotPresent 89 | } 90 | 91 | func (ps *pebbleIDStore) Flush() error { 92 | return ps.db.Flush() 93 | } 94 | 95 | func (ps *pebbleIDStore) Close() (err error) { 96 | ps.closeOnce.Do(func() { 97 | err = ps.db.Close() 98 | }) 99 | return err 100 | } 101 | -------------------------------------------------------------------------------- /pkg/devices/dedupdisk/impls/mmaplunmap.go: -------------------------------------------------------------------------------- 1 | package impls 2 | 3 | import ( 4 | "bufio" 5 | "encoding/binary" 6 | "fmt" 7 | "io" 8 | "os" 9 | "reflect" 10 | "unsafe" 11 | 12 | "github.com/tarndt/usbd/pkg/devices/dedupdisk" 13 | 14 | "launchpad.net/gommap" 15 | ) 16 | 17 | type mmapLUNmap struct { 18 | file *os.File 19 | rawBytes gommap.MMap 20 | ids []uint64 21 | size int64 22 | } 23 | 24 | //NewMmapLUNmap contructs a dedupdisk.LUNMap implemented using a mem-mapped file 25 | func NewMmapLUNmap(filename string, lunBlockSize int64, ifCreateSize int64) (dedupdisk.LUNMap, error) { 26 | idCount := ifCreateSize / lunBlockSize 27 | //Create or open backing file 28 | file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0666) 29 | if err != nil { 30 | return nil, fmt.Errorf("Could not open backing file %q: %w", filename, err) 31 | } 32 | if info, err := file.Stat(); err != nil { 33 | return nil, fmt.Errorf("Could not stat backing file %q: %w", filename, err) 34 | } else if size := info.Size(); size < 1 { //Create disk file 35 | strm := bufio.NewWriter(file) 36 | zeroIDBytes := make([]byte, 8) 37 | binary.LittleEndian.PutUint64(zeroIDBytes, zeroBlockID) 38 | for i := int64(0); i < idCount; i++ { 39 | if _, err = strm.Write(zeroIDBytes); err != nil { 40 | return nil, fmt.Errorf("Could not zero fill backing file %q, write failed: %w", filename, err) 41 | } 42 | } 43 | if err = strm.Flush(); err != nil { 44 | return nil, fmt.Errorf("Could not zero fill backing file %q, flush failed: %w", filename, err) 45 | } 46 | if err = file.Sync(); err != nil { 47 | return nil, fmt.Errorf("Could not zero fill backing file %q, sync failed: %w", filename, err) 48 | } 49 | } 50 | mmap, err := gommap.Map(file.Fd(), gommap.PROT_READ|gommap.PROT_WRITE, gommap.MAP_SHARED) 51 | if err != nil { 52 | return nil, fmt.Errorf("Could not mmap backing file %q (fd %d): %w", filename, file.Fd(), err) 53 | } 54 | bytesHdr := *((*reflect.SliceHeader)(unsafe.Pointer(&mmap))) 55 | longsHdr := reflect.SliceHeader{Data: bytesHdr.Data, Len: bytesHdr.Len / 8, Cap: bytesHdr.Cap / 8} 56 | return &mmapLUNmap{file, mmap, *(*[]uint64)(unsafe.Pointer(&longsHdr)), idCount * lunBlockSize}, nil 57 | } 58 | 59 | func (lm *mmapLUNmap) GetID(block uint64) (dedupID uint64, err error) { 60 | if block > uint64(len(lm.ids)) { 61 | return 0, fmt.Errorf("Out of bounds read: %w", io.EOF) 62 | } 63 | return lm.ids[block], nil 64 | } 65 | 66 | func (lm *mmapLUNmap) GetIDs(startBlock uint64, dedupIds []uint64) error { 67 | end := startBlock + uint64(len(dedupIds)) 68 | if end > uint64(len(lm.ids)) { 69 | return fmt.Errorf("Out of bounds read: %w", io.EOF) 70 | } 71 | copy(dedupIds, lm.ids[startBlock:end]) 72 | return nil 73 | } 74 | 75 | func (lm *mmapLUNmap) PutID(block uint64, dedupID uint64) error { 76 | if block > uint64(lm.size) { 77 | return fmt.Errorf("Out of bounds write: %w", io.ErrUnexpectedEOF) 78 | } 79 | lm.ids[block] = dedupID 80 | lm.rawBytes.Sync(gommap.MS_ASYNC) 81 | return nil 82 | } 83 | 84 | func (lm *mmapLUNmap) Size() int64 { 85 | return lm.size 86 | } 87 | 88 | func (lm *mmapLUNmap) Close() error { 89 | flushErr := lm.Flush() 90 | err := lm.file.Close() 91 | if err == nil && flushErr != nil { 92 | err = flushErr 93 | } 94 | if err != nil { 95 | return fmt.Errorf("Could not close backing file: %w", err) 96 | } 97 | return nil 98 | } 99 | 100 | func (lm *mmapLUNmap) Flush() error { 101 | return lm.rawBytes.Sync(gommap.MS_SYNC) 102 | } 103 | -------------------------------------------------------------------------------- /pkg/util/zeros_test.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | ) 7 | 8 | func TestIsZeros(t *testing.T) { 9 | cases := []struct { 10 | desc string 11 | data []byte 12 | isZeros bool 13 | }{ 14 | { 15 | desc: "nil", 16 | data: nil, 17 | isZeros: true, 18 | }, 19 | { 20 | desc: "empty", 21 | data: []byte{}, 22 | isZeros: true, 23 | }, 24 | { 25 | desc: "single-zero", 26 | data: []byte{0}, 27 | isZeros: true, 28 | }, 29 | { 30 | desc: "two-zero", 31 | data: make([]byte, 2), 32 | isZeros: true, 33 | }, 34 | { 35 | desc: "seven-zero", 36 | data: make([]byte, 7), 37 | isZeros: true, 38 | }, 39 | { 40 | desc: "eight-zero", 41 | data: make([]byte, 8), 42 | isZeros: true, 43 | }, 44 | { 45 | desc: "nine-zero", 46 | data: make([]byte, 9), 47 | isZeros: true, 48 | }, 49 | { 50 | desc: "huge-zero-aligned", 51 | data: make([]byte, 256), 52 | isZeros: true, 53 | }, 54 | { 55 | desc: "huge-zero-unaligned", 56 | data: make([]byte, 257), 57 | isZeros: true, 58 | }, 59 | { 60 | desc: "two-one", 61 | data: oneFill(make([]byte, 2)), 62 | isZeros: false, 63 | }, 64 | { 65 | desc: "seven-one", 66 | data: oneFill(make([]byte, 7)), 67 | isZeros: false, 68 | }, 69 | { 70 | desc: "eight-one", 71 | data: oneFill(make([]byte, 8)), 72 | isZeros: false, 73 | }, 74 | { 75 | desc: "nine-one", 76 | data: oneFill(make([]byte, 9)), 77 | isZeros: false, 78 | }, 79 | { 80 | desc: "huge-one-aligned", 81 | data: oneFill(make([]byte, 256)), 82 | isZeros: false, 83 | }, 84 | { 85 | desc: "huge-one-unaligned", 86 | data: oneFill(make([]byte, 257)), 87 | isZeros: false, 88 | }, 89 | { 90 | desc: "zero-one", 91 | data: []byte{0, 1}, 92 | isZeros: false, 93 | }, 94 | { 95 | desc: "eight-zero-single-one", 96 | data: []byte{0, 0, 0, 0, 0, 0, 0, 0, 1}, 97 | isZeros: false, 98 | }, 99 | { 100 | desc: "nine-zero-single-one", 101 | data: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, 102 | isZeros: false, 103 | }, 104 | } 105 | 106 | for _, tcase := range cases { 107 | t.Run(tcase.desc, func(t *testing.T) { 108 | if actual := IsZeros(tcase.data); actual != tcase.isZeros { 109 | t.Fatalf("isZero(%v) returned %t rather than %t", tcase.data, actual, tcase.isZeros) 110 | } 111 | }) 112 | } 113 | } 114 | 115 | func oneFill(data []byte) []byte { 116 | for i := range data { 117 | data[i] = 1 118 | } 119 | return data 120 | } 121 | 122 | func TestZeroFill(t *testing.T) { 123 | t.Run("count-nil", func(t *testing.T) { 124 | if ZeroFill(nil); !IsZeros(nil) { 125 | t.Fatal("Empty block was not correctly zero-filled") 126 | } 127 | }) 128 | 129 | for count := 1; count <= 8192*4; count <<= 1 { 130 | t.Run(fmt.Sprintf("count-%d", count), func(t *testing.T) { 131 | block := oneFill(make([]byte, count)) 132 | if ZeroFill(block); !IsZeros(block) { 133 | t.Fatalf("Block of %d ones was not correctly zero-filled", count) 134 | } 135 | }) 136 | } 137 | } 138 | 139 | func BenchmarkIsZeros(b *testing.B) { 140 | block := make([]byte, b.N) 141 | b.ReportAllocs() 142 | b.ResetTimer() 143 | 144 | IsZeros(block) 145 | } 146 | 147 | func BenchmarkZerofill(b *testing.B) { 148 | block := make([]byte, b.N) 149 | b.ReportAllocs() 150 | b.ResetTimer() 151 | 152 | ZeroFill(block) 153 | } 154 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/tarndt/usbd 2 | 3 | go 1.17 4 | 5 | replace github.com/graymeta/stow => github.com/tarndt/stow v0.2.8-0.20220119010100-e5705480d170 6 | 7 | require ( 8 | github.com/cockroachdb/pebble v0.0.0-20220120225425-b510eb6b70c9 9 | github.com/dustin/go-humanize v1.0.0 10 | github.com/graymeta/stow v0.0.0-00010101000000-000000000000 11 | github.com/johannesboyne/gofakes3 v0.0.0-20210819161434-5c8dfcfe5310 12 | github.com/klauspost/compress v1.14.1 13 | github.com/pmorjan/kmod v1.0.0 14 | github.com/tarndt/sema v0.0.0-20220219212302-c67df5cf21d0 15 | golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 16 | launchpad.net/gommap v0.0.0-20121012075617-000000000015 17 | ) 18 | 19 | require ( 20 | cloud.google.com/go v0.38.0 // indirect 21 | github.com/Azure/azure-sdk-for-go v32.5.0+incompatible // indirect 22 | github.com/Azure/go-autorest/autorest v0.9.0 // indirect 23 | github.com/Azure/go-autorest/autorest/adal v0.5.0 // indirect 24 | github.com/Azure/go-autorest/autorest/date v0.1.0 // indirect 25 | github.com/Azure/go-autorest/logger v0.1.0 // indirect 26 | github.com/Azure/go-autorest/tracing v0.5.0 // indirect 27 | github.com/DataDog/zstd v1.4.5 // indirect 28 | github.com/aws/aws-sdk-go v1.23.4 // indirect 29 | github.com/cespare/xxhash/v2 v2.1.1 // indirect 30 | github.com/cockroachdb/errors v1.8.1 // indirect 31 | github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f // indirect 32 | github.com/cockroachdb/redact v1.0.8 // indirect 33 | github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2 // indirect 34 | github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect 35 | github.com/gogo/protobuf v1.3.2 // indirect 36 | github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect 37 | github.com/golang/protobuf v1.4.2 // indirect 38 | github.com/golang/snappy v0.0.3 // indirect 39 | github.com/google/readahead v0.0.0-20161222183148-eaceba169032 // indirect 40 | github.com/googleapis/gax-go/v2 v2.0.5 // indirect 41 | github.com/hashicorp/errwrap v1.0.0 // indirect 42 | github.com/hashicorp/go-multierror v1.0.0 // indirect 43 | github.com/hashicorp/golang-lru v0.5.1 // indirect 44 | github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af // indirect 45 | github.com/kr/fs v0.1.0 // indirect 46 | github.com/kr/pretty v0.1.0 // indirect 47 | github.com/kr/text v0.1.0 // indirect 48 | github.com/ncw/swift v1.0.49 // indirect 49 | github.com/pkg/errors v0.9.1 // indirect 50 | github.com/pkg/sftp v1.10.0 // indirect 51 | github.com/pquerna/ffjson v0.0.0-20190813045741-dac163c6c0a9 // indirect 52 | github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 // indirect 53 | github.com/satori/go.uuid v1.2.0 // indirect 54 | github.com/shabbyrobe/gocovmerge v0.0.0-20180507124511-f6ea450bfb63 // indirect 55 | go.opencensus.io v0.21.0 // indirect 56 | golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 // indirect 57 | golang.org/x/exp v0.0.0-20200513190911-00229845015e // indirect 58 | golang.org/x/net v0.0.0-20201021035429-f5854403a974 // indirect 59 | golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 // indirect 60 | golang.org/x/text v0.3.3 // indirect 61 | golang.org/x/tools v0.0.0-20210106214847-113979e3529a // indirect 62 | google.golang.org/api v0.8.0 // indirect 63 | google.golang.org/appengine v1.5.0 // indirect 64 | google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 // indirect 65 | google.golang.org/grpc v1.29.1 // indirect 66 | google.golang.org/protobuf v1.23.0 // indirect 67 | gopkg.in/kothar/go-backblaze.v0 v0.0.0-20190520213052-702d4e7eb465 // indirect 68 | launchpad.net/gocheck v0.0.0-20140225173054-000000000087 // indirect 69 | ) 70 | -------------------------------------------------------------------------------- /pkg/devices/testutil/suites.go: -------------------------------------------------------------------------------- 1 | package testutil 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "os" 7 | "path" 8 | "strings" 9 | "sync" 10 | "testing" 11 | "time" 12 | 13 | "github.com/tarndt/usbd/pkg/devices/filedisk" 14 | "github.com/tarndt/usbd/pkg/usbdlib" 15 | ) 16 | 17 | //TestUserspace runs a suite of basic sanity tests on the provided device directly 18 | // without using Linux kernel facilities such as NBD 19 | func TestUserspace(t *testing.T, dev usbdlib.Device, expectedSize uint) { 20 | t.Run("user-space", func(t *testing.T) { 21 | t.Parallel() 22 | 23 | if _, err := dev.ReadAt([]byte{0}, 0); err != nil { 24 | t.Fatalf("1 byte test read to %#v failed: %s", dev, err) 25 | } 26 | 27 | TestDevSize(t, dev, expectedSize) 28 | TestReadEmpty(t, dev, uint(dev.Size())) 29 | 30 | if _, err := dev.WriteAt([]byte{0}, 0); err != nil { 31 | t.Fatalf("1 byte test write to %#v failed: %s", dev, err) 32 | } 33 | 34 | TestReadHash(t, dev, TestWriteReadPattern(t, dev)) 35 | TestClose(t, dev) 36 | }) 37 | } 38 | 39 | //TestNBD runs a similar test suite as TestUserspace but puts kernel-space NBD 40 | // in the middle for an end-to-end test. This requires privilege execution! 41 | func TestNBD(t *testing.T, dev usbdlib.Device, expectedSize uint) { 42 | t.Run("with-nbd", func(t *testing.T) { 43 | t.Parallel() 44 | 45 | const rootUID = 0 46 | if os.Geteuid() != rootUID { 47 | pkg := pkgName(dev) 48 | t.Skipf("Must be root for this test, try: go test -c [-race] && sudo ./%s.test -test.v -test.timeout=120s && rm ./%s.test", pkg, pkg) 49 | } 50 | 51 | ctx, cancel := context.WithCancel(CreateContext(t)) 52 | defer cancel() 53 | 54 | ndbStream, devPath, err := usbdlib.NewNbdHandler(ctx, dev) 55 | if err != nil { 56 | t.Fatalf("Could not find a usable NBD: %s", err) 57 | } 58 | t.Logf("Using NBD: %q", devPath) 59 | 60 | f, err := os.OpenFile(devPath, os.O_RDWR, 0) 61 | if err != nil { 62 | t.Skipf("Could not open NBD file %q: %s", devPath, err) 63 | } 64 | defer f.Close() 65 | devFile := &filedisk.FileDisk{File: f, SizeBytes: int64(expectedSize)} 66 | 67 | var processingStarted, processingDone sync.WaitGroup 68 | processingStarted.Add(1) 69 | processingDone.Add(1) 70 | var processErr error 71 | go func() { 72 | defer processingDone.Done() 73 | 74 | processingStarted.Done() 75 | err := ndbStream.ProcessRequests() 76 | if err != nil { 77 | cancel() 78 | processErr = fmt.Errorf("Request processing failed: %w", err) 79 | } 80 | }() 81 | 82 | processingStarted.Wait() 83 | time.Sleep(time.Millisecond * 10) 84 | 85 | if _, err := f.ReadAt([]byte{0}, 0); err != nil { 86 | t.Fatalf("1 byte test read to %q failed: %s", devPath, err) 87 | } 88 | TestDevSize(t, devFile, expectedSize) 89 | TestReadEmpty(t, devFile, uint(devFile.Size())) 90 | 91 | if _, err := f.WriteAt([]byte{0}, 0); err != nil { 92 | t.Fatalf("1 byte test write to %q failed: %s", devPath, err) 93 | } 94 | TestReadHash(t, devFile, TestWriteReadPattern(t, devFile)) 95 | 96 | t.Run("shutdown", func(t *testing.T) { 97 | if err := ndbStream.Close(); err != nil { 98 | t.Fatalf("Failed to close NBD stream: %s", err) 99 | } 100 | processingDone.Wait() 101 | if err := processErr; err != nil { 102 | t.Fatalf("Errors occurred while processing NBD stream: %s", err) 103 | } 104 | }) 105 | }) 106 | } 107 | 108 | func pkgName(x interface{}) string { 109 | varType := strings.TrimPrefix(fmt.Sprintf("%T", x), "*") 110 | return strings.TrimSuffix(varType, path.Ext(varType)) 111 | } 112 | -------------------------------------------------------------------------------- /cmd/usbdsrvd/conf/cfg.go: -------------------------------------------------------------------------------- 1 | package conf 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "strings" 7 | "time" 8 | 9 | "github.com/tarndt/usbd/pkg/devices/objstore/compress" 10 | "github.com/tarndt/usbd/pkg/devices/objstore/encrypt" 11 | 12 | "github.com/dustin/go-humanize" 13 | "github.com/graymeta/stow" 14 | ) 15 | 16 | //Config is a representation of command line config parameters 17 | type Config struct { 18 | NBDDevName string 19 | NBDDevCount uint 20 | BackingMode BackingDevice 21 | StorageDirectory string 22 | StorageName string 23 | StorageBytes Capacity 24 | DedupConfig 25 | ObjStoreConfig 26 | } 27 | 28 | //String generates human-readable prose describing a configuration 29 | func (cfg *Config) String() string { 30 | devName := "next available NBD device" 31 | if cfg.NBDDevName != "" { 32 | devName = cfg.NBDDevName 33 | } 34 | 35 | driverParams := "" 36 | switch cfg.BackingMode { 37 | case DevDedupFile: 38 | driverParams = " " + cfg.DedupConfig.String() 39 | case DevObjStore: 40 | driverParams = " " + cfg.ObjStoreConfig.String() 41 | } 42 | 43 | return fmt.Sprintf("Exporting %s volume %q as %s with local storage at %q using driver %s%s.", 44 | humanize.IBytes(uint64(cfg.StorageBytes)), cfg.StorageName, 45 | devName, cfg.StorageDirectory, cfg.BackingMode, driverParams, 46 | ) 47 | } 48 | 49 | //DedupConfig is the dedup-disk specific configuration parameters 50 | type DedupConfig struct { 51 | IDStoreMemoryCacheBytes int64 52 | } 53 | 54 | //String generates human-readable prose describing a DedupConfig 55 | func (dc *DedupConfig) String() string { 56 | return fmt.Sprintf("using up to %s memory for ID Store cache", humanize.IBytes(uint64(dc.IDStoreMemoryCacheBytes))) 57 | } 58 | 59 | //ObjStoreConfig is the objectstore specific configuration parameters 60 | type ObjStoreConfig struct { 61 | Kind string 62 | Config stow.ConfigMap 63 | LocalDiskCacheBytes Capacity 64 | PersistCache bool 65 | ObjectBytes Capacity 66 | AESMode encrypt.Mode 67 | AESKey []byte 68 | CompressMode compress.Mode 69 | ConcurFlush uint 70 | FlushInterval time.Duration 71 | } 72 | 73 | //String generates human-readable prose describing a ObjStoreConfig 74 | func (c *ObjStoreConfig) String() string { 75 | size := "as much as total device size" 76 | if c.LocalDiskCacheBytes > 0 { 77 | size = humanize.IBytes(uint64(c.LocalDiskCacheBytes)) 78 | } 79 | 80 | var flushDesc string 81 | if c.FlushInterval > 0 { 82 | flushDesc = fmt.Sprintf(", flushing to remote story every %s using %d workers", c.FlushInterval, c.ConcurFlush) 83 | } 84 | 85 | persistMode := "non-persistent" 86 | if c.PersistCache { 87 | persistMode = "persistent" 88 | } 89 | 90 | return fmt.Sprintf("using a %s remote object store (%s), with %s objects, using up to %s of %s local storage for cache, %s compression, %s encryption%s", 91 | c.Kind, stowCfgStr(c.Config), humanize.IBytes(uint64(c.ObjectBytes)), 92 | size, persistMode, c.CompressMode, c.AESMode, flushDesc, 93 | ) 94 | } 95 | 96 | func stowCfgStr(cm stow.ConfigMap) string { 97 | var str bytes.Buffer 98 | for k, v := range cm { 99 | str.WriteString(k) 100 | str.WriteByte('=') 101 | 102 | if mayBeSecret(k) { 103 | str.WriteString("") 104 | } else { 105 | str.WriteByte('"') 106 | str.WriteString(v) 107 | str.WriteByte('"') 108 | } 109 | str.WriteString(", ") 110 | } 111 | if str.Len() > 2 { 112 | str.Truncate(str.Len() - 2) 113 | } 114 | return str.String() 115 | } 116 | 117 | func mayBeSecret(s string) bool { 118 | s = strings.ToLower(s) 119 | for _, cannidate := range []string{"secret", "cred", "pass", "token"} { 120 | if strings.Contains(s, cannidate) { 121 | return true 122 | } 123 | } 124 | return false 125 | } 126 | -------------------------------------------------------------------------------- /pkg/usbdlib/nbdutil_linux.go: -------------------------------------------------------------------------------- 1 | package usbdlib 2 | 3 | import ( 4 | "fmt" 5 | ) 6 | 7 | /* 8 | #include //needed for _IO macro in nbd.h 9 | #include //struct nbd_request, struct nbd_reply 10 | #include //size_t 11 | #include //htonl 12 | 13 | const size_t sizeofNdbReq = sizeof(struct nbd_request); 14 | const size_t sizeofNdbResp = sizeof(struct nbd_reply); 15 | struct nbd_reply dummyReply; 16 | const int ndbHandleLen = sizeof(dummyReply.handle); 17 | 18 | 19 | #if defined NBD_SET_FLAGS && defined NBD_FLAG_SEND_TRIM 20 | const int ndbTrimSupported = 1; 21 | #else 22 | const int ndbTrimSupported = 0; 23 | #endif 24 | 25 | */ 26 | import "C" 27 | 28 | const ( 29 | //ioctl constants 30 | nbdSetBlockSize = uintptr(C.NBD_SET_BLKSIZE) 31 | nbdSetSizeBlocks = uintptr(C.NBD_SET_SIZE_BLOCKS) 32 | ndbClearSock = uintptr(C.NBD_CLEAR_SOCK) 33 | nbdSetSock = uintptr(C.NBD_SET_SOCK) 34 | nbdSetFlags = uintptr(C.NBD_SET_FLAGS) 35 | nbdFlagSendTrim = uintptr(C.NBD_FLAG_SEND_TRIM) 36 | nbdDoIt = uintptr(C.NBD_DO_IT) 37 | nbdClearQueue = uintptr(C.NBD_CLEAR_QUE) 38 | ndbDisconnect = uintptr(C.NBD_DISCONNECT) 39 | 40 | //NBD commands 41 | nbdRead = uint32(C.NBD_CMD_READ) 42 | nbdWrite = uint32(C.NBD_CMD_WRITE) 43 | nbdDisconnect = uint32(C.NBD_CMD_DISC) 44 | nbdFlush = uint32(C.NBD_CMD_FLUSH) 45 | nbdTrim = uint32(C.NBD_CMD_TRIM) 46 | 47 | //NBD response error numbers (#defines in nbd.h) 48 | nbdRespSuccess = nbdErr(0) 49 | ndbRespErrPerms = nbdErr(1) 50 | ndbRespErrIO = nbdErr(5) 51 | ndbRespErrMem = nbdErr(12) 52 | ndbRespErrInvalid = nbdErr(22) 53 | ndbRespErrNoSpace = nbdErr(28) 54 | ndbRespErrTooLarge = nbdErr(75) 55 | ndbRespErrUnsupportedOp = nbdErr(95) 56 | ndbRespErrShuttingDown = nbdErr(108) 57 | ) 58 | 59 | type nbdErr uint32 60 | 61 | func newRespErr(errNo uint32) error { 62 | if errNo == 0 { 63 | return nil 64 | } 65 | return nbdErr(errNo) 66 | } 67 | 68 | func (err nbdErr) Error() string { 69 | var msg = fmt.Sprintf("NBD Error #%d: ", err) 70 | switch err { 71 | case nbdRespSuccess: 72 | msg = "Not an error!" 73 | case ndbRespErrPerms: 74 | msg += "Operation not permitted" 75 | case ndbRespErrIO: 76 | msg += "Input/output error" 77 | case ndbRespErrMem: 78 | msg += "Cannot allocate memory" 79 | case ndbRespErrInvalid: 80 | msg += "Invalid argument" 81 | case ndbRespErrNoSpace: 82 | msg += "No space left on device" 83 | case ndbRespErrTooLarge: 84 | msg += "Value too large" 85 | case ndbRespErrUnsupportedOp: 86 | msg += "Operation not supported" 87 | case ndbRespErrShuttingDown: 88 | msg += "Server is in the process of being shut down" 89 | default: 90 | msg += "Unknown (likely invalid) error" 91 | } 92 | return msg 93 | } 94 | 95 | var ( 96 | //Sizes of communication structs 97 | ndbReqBytes = int(C.sizeofNdbReq) 98 | ndbRespBytes = int(C.sizeofNdbResp) 99 | ndbHandleLen = int(C.ndbHandleLen) 100 | //Magic numbers 101 | nbdReqMagic = uint32(C.htonl(C.uint32_t(C.NBD_REQUEST_MAGIC))) 102 | nbdReplyMagic = uint32(C.htonl(C.uint32_t(C.NBD_REPLY_MAGIC))) 103 | //NBD features 104 | ndbTrimSupported bool = C.ndbTrimSupported == 1 105 | ) 106 | 107 | func newNbdRawReq() []byte { 108 | return make([]byte, ndbReqBytes) 109 | } 110 | 111 | func newNbdRawResp() []byte { 112 | return make([]byte, ndbReqBytes) 113 | } 114 | 115 | func newNbdHandle() []byte { 116 | return make([]byte, ndbHandleLen) 117 | } 118 | 119 | //DefaultBlockSize type meant to be embded in implemetations to easily provide 120 | // the BlockSize() method 121 | type DefaultBlockSize int64 122 | 123 | //DefaultBlockSizeBytes is 4096 (4KB) 124 | const DefaultBlockSizeBytes DefaultBlockSize = 4096 125 | 126 | //BlockSize always returns DefaultBlockSizeBytes 127 | func (DefaultBlockSize) BlockSize() int64 { 128 | return int64(DefaultBlockSizeBytes) 129 | } 130 | -------------------------------------------------------------------------------- /pkg/usbdlib/req_test.go: -------------------------------------------------------------------------------- 1 | package usbdlib 2 | 3 | import ( 4 | "bytes" 5 | "encoding/binary" 6 | "fmt" 7 | "io" 8 | "testing" 9 | ) 10 | 11 | func TestDecode(t *testing.T) { 12 | var buf bytes.Buffer 13 | for i := int64(0); i < int64(DefaultBlockSizeBytes*2); i++ { 14 | in := newRequest().(*request) 15 | 16 | in.count = int(i) 17 | if i%2 == 0 { 18 | in.reqType = nbdRead 19 | } else { 20 | in.reqType = nbdWrite 21 | in.writeBuffer = make([]byte, in.count) 22 | for i := range in.writeBuffer { 23 | in.writeBuffer[i] = 7 24 | } 25 | } 26 | 27 | in.handle = encodeHandle(i) 28 | in.pos = i 29 | 30 | buf.Reset() 31 | if err := in.Encode(&buf); err != nil { 32 | t.Fatalf("Failed to encode request: %s", err) 33 | } 34 | in.rawReq = buf.Bytes()[:ndbReqBytes] 35 | 36 | out := newRequest().(*request) 37 | if err := out.Decode(&buf); err != nil { 38 | t.Fatalf("Failed to decode request: %s", err) 39 | } 40 | 41 | switch { 42 | case !bytes.Equal(in.rawReq, out.rawReq): 43 | t.Fatalf("Output of encoding and data read from stream did not match:\n\tin: %v\n\tvs\n\tout: %v", in.rawReq, out.rawReq) 44 | case in.reqType != out.reqType: 45 | t.Fatalf("Wrong request type: in %d vs out %d", in.reqType, out.reqType) 46 | case !bytes.Equal(in.handle, out.handle): 47 | t.Fatalf("Wrong request handle: in %v vs out %v", in.handle, out.handle) 48 | case in.pos != out.pos: 49 | t.Fatalf("Wrong request position: in %d vs out %d", in.pos, out.pos) 50 | case in.count != out.count: 51 | t.Fatalf("Wrong request count: in %d vs out %d", in.count, out.count) 52 | case in.reqType == nbdWrite && !bytes.Equal(in.writeBuffer, out.writeBuffer): 53 | t.Fatal("Input and output write buffers did not match") 54 | } 55 | } 56 | } 57 | 58 | func BenchmarkDecode(b *testing.B) { 59 | in := newRequest().(*request) 60 | in.reqType = nbdRead 61 | in.count = int(1) 62 | in.handle = encodeHandle(1) 63 | in.pos = 1 64 | 65 | var buf bytes.Buffer 66 | if err := in.Encode(&buf); err != nil { 67 | b.Fatalf("Failed to encode request: %s", err) 68 | } 69 | rawReq := buf.Bytes() 70 | 71 | req := newRequest().(*request) 72 | var err error 73 | 74 | b.ResetTimer() 75 | b.ReportAllocs() 76 | for i := 0; i < b.N; i++ { 77 | if err = req.Decode(bytes.NewBuffer(rawReq)); err != nil { 78 | b.Fatalf("Failed to decode request: %s", err) 79 | } 80 | } 81 | } 82 | 83 | func encodeHandle(x int64) []byte { 84 | buf := make([]byte, 8) 85 | binary.BigEndian.PutUint64(buf, uint64(x)) 86 | return buf 87 | } 88 | 89 | func (req *request) Encode(wtr io.Writer) error { 90 | //Write magic number 91 | var err error 92 | if err = binary.Write(wtr, binary.LittleEndian, nbdReqMagic); err != nil { 93 | return fmt.Errorf("Could not encode request type: %w", err) 94 | } 95 | 96 | //Write request type 97 | if err = binary.Write(wtr, binary.BigEndian, &req.reqType); err != nil { 98 | return fmt.Errorf("Could not encode request type: %w", err) 99 | } 100 | 101 | //Write handle 102 | if _, err = wtr.Write(req.handle); err != nil { 103 | return fmt.Errorf("Could not write operation handle: %w", err) 104 | } 105 | 106 | //Write pos 107 | pos := uint64(req.pos) 108 | if err = binary.Write(wtr, binary.BigEndian, &pos); err != nil { 109 | return fmt.Errorf("Could not encode position: %w", err) 110 | } 111 | 112 | //Write count 113 | count := uint32(req.count) 114 | if err = binary.Write(wtr, binary.BigEndian, &count); err != nil { 115 | return fmt.Errorf("Could not encode count: %w", err) 116 | } 117 | 118 | if req.reqType == nbdWrite { 119 | if len(req.writeBuffer) != int(count) { 120 | return fmt.Errorf("Write count %d did not match size of write buffer %d", count, len(req.writeBuffer)) 121 | } 122 | if _, err = wtr.Write(req.writeBuffer); err != nil { 123 | return fmt.Errorf("Failed to write contents of write buffer: %w", err) 124 | } 125 | } 126 | 127 | return nil 128 | } 129 | -------------------------------------------------------------------------------- /pkg/devices/dedupdisk/impls/fileblkstore.go: -------------------------------------------------------------------------------- 1 | package impls 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "os" 7 | "sync" 8 | "time" 9 | 10 | "github.com/tarndt/usbd/pkg/devices/dedupdisk" 11 | ) 12 | 13 | const syncDelay = time.Second * 5 14 | 15 | type fileBlockStore struct { 16 | file *os.File 17 | 18 | pendingWrites map[uint64]*sync.Cond 19 | nextID uint64 20 | nextPos int64 21 | writeState sync.RWMutex 22 | 23 | syncCh chan bool 24 | 25 | blockSize uint64 26 | zeroBlock []byte 27 | } 28 | 29 | //NewFileBlockStore constructs a dedupdisk.BlockStore backed by a simple file 30 | func NewFileBlockStore(filename string, blockSize int64) (dedupdisk.BlockStore, error) { 31 | fbs := &fileBlockStore{ 32 | blockSize: uint64(blockSize), 33 | syncCh: make(chan bool, 1), 34 | zeroBlock: make([]byte, blockSize), 35 | pendingWrites: make(map[uint64]*sync.Cond, 757), 36 | } 37 | var err error 38 | if fbs.file, err = os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0666); err != nil { 39 | return nil, fmt.Errorf("Could not open backing file %q: %w", filename, err) 40 | } 41 | info, err := fbs.file.Stat() 42 | if err != nil { 43 | return nil, fmt.Errorf("Could not stat backing file %q: %w", filename, err) 44 | } 45 | fbs.nextID = uint64(info.Size() / blockSize) 46 | go fbs.fsyncWorker() 47 | return fbs, nil 48 | } 49 | 50 | func (fbs *fileBlockStore) GetBlock(dedupID uint64, buf []byte) (err error) { 51 | //Check if fbs is the zero block 52 | if dedupID == zeroBlockID { 53 | copy(buf, fbs.zeroBlock) 54 | return nil 55 | } 56 | //Ensure we are safe to read 57 | var written *sync.Cond 58 | fbs.writeState.RLock() 59 | for written = fbs.pendingWrites[dedupID]; written != nil; written = fbs.pendingWrites[dedupID] { 60 | written.Wait() 61 | } 62 | fbs.writeState.RUnlock() 63 | //Read, we don't need the lock because once written, blocks are immutable 64 | _, err = fbs.file.ReadAt(buf, int64(dedupID*fbs.blockSize)) 65 | return 66 | } 67 | 68 | func (fbs *fileBlockStore) PutBlock(buf []byte) (dedupID uint64, err error) { 69 | //Update write state 70 | var pos int64 71 | var writeWaiters = sync.NewCond(&fbs.writeState) 72 | fbs.writeState.Lock() 73 | pos, dedupID = fbs.nextPos, fbs.nextID 74 | fbs.nextPos += int64(fbs.blockSize) 75 | fbs.nextID++ 76 | fbs.pendingWrites[dedupID] = writeWaiters 77 | fbs.writeState.Unlock() 78 | //Perform write 79 | _, err = fbs.file.WriteAt(buf, pos) 80 | fbs.writeState.Lock() 81 | delete(fbs.pendingWrites, dedupID) 82 | fbs.writeState.Unlock() 83 | writeWaiters.Broadcast() 84 | if err != nil { 85 | return 0, err 86 | } 87 | select { 88 | case fbs.syncCh <- true: 89 | default: 90 | } 91 | return dedupID, nil 92 | } 93 | 94 | func (fbs *fileBlockStore) PutBlockAt(buf []byte, blockID uint64) (dedupID uint64, err error) { 95 | //Update write state 96 | var writeWaiters = sync.NewCond(&fbs.writeState) 97 | fbs.writeState.Lock() 98 | fbs.pendingWrites[dedupID] = writeWaiters 99 | fbs.writeState.Unlock() 100 | //Perform write 101 | _, err = fbs.file.WriteAt(buf, int64(blockID*fbs.blockSize)) 102 | fbs.writeState.Lock() 103 | delete(fbs.pendingWrites, dedupID) 104 | fbs.writeState.Unlock() 105 | writeWaiters.Broadcast() 106 | if err != nil { 107 | return 0, err 108 | } 109 | select { 110 | case fbs.syncCh <- true: 111 | default: 112 | } 113 | return dedupID, nil 114 | } 115 | 116 | func (fbs *fileBlockStore) Flush() error { 117 | return fbs.file.Sync() 118 | } 119 | 120 | func (fbs *fileBlockStore) Close() error { 121 | return fbs.file.Close() 122 | } 123 | 124 | func (fbs *fileBlockStore) fsyncWorker() { 125 | var err error 126 | var looping bool 127 | for { 128 | <-fbs.syncCh 129 | time.Sleep(syncDelay) 130 | if err = fbs.Flush(); err != nil { 131 | log.Printf("FileBlockStore::fsyncWorker(): WARNING: Error syncing file; Details: %s", err) 132 | } 133 | looping = true 134 | for looping { 135 | select { 136 | case <-fbs.syncCh: 137 | default: 138 | looping = false 139 | } 140 | } 141 | } 142 | } 143 | -------------------------------------------------------------------------------- /pkg/devices/objstore/device_test.go: -------------------------------------------------------------------------------- 1 | package objstore 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/tarndt/usbd/pkg/devices/objstore/compress" 8 | "github.com/tarndt/usbd/pkg/devices/objstore/encrypt" 9 | "github.com/tarndt/usbd/pkg/devices/testutil" 10 | 11 | "github.com/graymeta/stow" 12 | "github.com/graymeta/stow/local" 13 | ) 14 | 15 | const testDirPrefix = "osbd-test-" 16 | 17 | func TestNewDevices(t *testing.T) { 18 | testLocalFS(t) 19 | testLocalS3(t) 20 | } 21 | 22 | func testLocalFS(t *testing.T) { 23 | t.Run("local-fs", func(t *testing.T) { 24 | t.Parallel() 25 | 26 | cfg := stow.ConfigMap{local.ConfigKeyPath: t.TempDir()} 27 | if err := ValidateConfig(local.Kind, cfg); err != nil { 28 | t.Fatalf("Could not validate store config: %s", err) 29 | } 30 | 31 | store, err := NewStore(KindLocalTest, cfg) 32 | if err != nil { 33 | t.Fatalf("Could not create s3 object store: %s", err) 34 | } 35 | 36 | t.Run("no-quota", func(t *testing.T) { 37 | testDeviceSizes(t, store, OptNoMetadataSupport(true)) 38 | }) 39 | t.Run("with-quota", func(t *testing.T) { 40 | testDeviceSizes(t, store, OptQuotaBytes(32*1024*1024)) 41 | }) 42 | }) 43 | } 44 | 45 | func testLocalS3(t *testing.T) { 46 | t.Run("local-s3", func(t *testing.T) { 47 | t.Parallel() 48 | 49 | srv := s3Server() 50 | defer srv.Close() 51 | 52 | store := s3Store(t, srv) 53 | 54 | t.Run("compress-none", func(t *testing.T) { 55 | testDeviceSizes(t, store) 56 | }) 57 | t.Run("compress-s2", func(t *testing.T) { 58 | testDeviceSizes(t, store, OptCompressRemoteObjects(compress.ModeS2)) 59 | }) 60 | t.Run("compress-gz", func(t *testing.T) { 61 | testDeviceSizes(t, store, OptCompressRemoteObjects(compress.ModeGzip)) 62 | }) 63 | t.Run("encrypt", func(t *testing.T) { 64 | key, err := encrypt.MakeRandomAESKey() 65 | if err != nil { 66 | t.Fatalf("Could not create AES key: %s", err) 67 | } 68 | t.Run("compress-none", func(t *testing.T) { 69 | testDeviceSizes(t, store, OptEncrypt{Mode: encrypt.ModeAESRec, Key: key}) 70 | }) 71 | t.Run("compress-s2", func(t *testing.T) { 72 | testDeviceSizes(t, store, OptCompressRemoteObjects(compress.ModeS2), OptEncrypt{Mode: encrypt.ModeAESRec, Key: key}) 73 | }) 74 | }) 75 | }) 76 | } 77 | 78 | func testDeviceSizes(t *testing.T, store stow.Location, options ...Option) { 79 | t.Run("mono-object", func(t *testing.T) { 80 | const ( 81 | totalBytes = 2 * 1024 * 1024 //2 MB 82 | objectBytes = totalBytes 83 | ) 84 | t.Run("thick", func(t *testing.T) { 85 | testSuite(t, store, totalBytes, objectBytes, append(options, OptAutoflushIval(time.Millisecond*50), OptThickProvisionLocalFiles(true))...) 86 | }) 87 | t.Run("thin", func(t *testing.T) { 88 | testSuite(t, store, totalBytes, objectBytes, append(options, OptAutoflushIval(time.Millisecond*50))...) 89 | }) 90 | }) 91 | 92 | t.Run("micro-objects", func(t *testing.T) { 93 | const ( 94 | totalBytes = 16 * 1024 * 1024 //16 MB 95 | objectBytes = 512 * 1024 //512 KB 96 | ) 97 | testSuite(t, store, totalBytes, objectBytes, options...) 98 | }) 99 | 100 | if testing.Short() { 101 | t.Skipf("In short mode, skipping larger tests") 102 | } 103 | 104 | t.Run("small-objects", func(t *testing.T) { 105 | const ( 106 | totalBytes = 64 * 1024 * 1024 //64 MB 107 | objectBytes = 2 * 1024 * 1024 //2 MB 108 | ) 109 | 110 | testSuite(t, store, totalBytes, objectBytes, append(options, OptConcurFlushCount(2))...) 111 | }) 112 | 113 | t.Run("medium-objects", func(t *testing.T) { 114 | const ( 115 | totalBytes = 256 * 1024 * 1024 //256 MB 116 | objectBytes = 16 * 1024 * 1024 //16 MB 117 | ) 118 | 119 | testSuite(t, store, totalBytes, objectBytes, append(options, OptConcurFlushCount(totalBytes/objectBytes))...) 120 | }) 121 | } 122 | 123 | func testSuite(t *testing.T, store stow.Location, totalBytes, objectBytes uint, options ...Option) { 124 | t.Parallel() 125 | 126 | container := createContainer(t, store) 127 | dev := createDevice(t, container, "", totalBytes, objectBytes, options...) 128 | 129 | testutil.TestDevSize(t, dev, totalBytes) 130 | testutil.TestReadEmpty(t, dev, objectBytes) 131 | devHash := testutil.TestWriteReadPattern(t, dev) 132 | testExistingRemote(t, container, "", totalBytes, objectBytes, devHash, options...) 133 | testutil.TestClose(t, dev) 134 | } 135 | -------------------------------------------------------------------------------- /pkg/devices/objstore/compress/stow.go: -------------------------------------------------------------------------------- 1 | package compress 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "strconv" 7 | 8 | "github.com/tarndt/usbd/pkg/util/strms" 9 | 10 | "github.com/graymeta/stow" 11 | ) 12 | 13 | const ( 14 | compressMetaAlgoHeader = "x-osbd-cmp-alg" 15 | compressMetaSizeHeader = "x-osbd-cmp-size" 16 | ) 17 | 18 | type compressedContainer struct { 19 | stow.Container 20 | Mode 21 | } 22 | 23 | var _ stow.Container = (*compressedContainer)(nil) 24 | 25 | //NewCompressedContainer wraps the provided container in transparent compression and decompression 26 | func NewCompressedContainer(container stow.Container, mode Mode) stow.Container { 27 | return compressedContainer{ 28 | Container: container, 29 | Mode: mode, 30 | } 31 | } 32 | 33 | func (cont compressedContainer) Item(id string) (stow.Item, error) { 34 | item, err := cont.Container.Item(id) 35 | if item != nil { 36 | item = compressedItem{item} 37 | } 38 | return item, err 39 | } 40 | 41 | func (cont compressedContainer) Items(prefix, cursor string, count int) ([]stow.Item, string, error) { 42 | items, cursor, err := cont.Container.Items(prefix, cursor, count) 43 | for i := range items { 44 | items[i] = compressedItem{items[i]} 45 | } 46 | return items, cursor, err 47 | } 48 | 49 | func (cont compressedContainer) Put(name string, rdr io.Reader, size int64, metadata map[string]interface{}) (stow.Item, error) { 50 | if cont.Mode == ModeIdentity { 51 | return cont.Container.Put(name, rdr, size, metadata) 52 | } 53 | 54 | pipeRdr, pipeWtr := io.Pipe() 55 | wtr, err := cont.NewWriter(pipeWtr) 56 | if err != nil { 57 | return nil, fmt.Errorf("Could not create stream compressor: %w", err) 58 | } 59 | 60 | go func() { 61 | _, err := io.Copy(wtr, rdr) 62 | if err != nil { 63 | pipeWtr.CloseWithError(fmt.Errorf("Copy failed during %s stream compression: %w", cont.Mode, err)) 64 | wtr.Close() 65 | return 66 | } 67 | 68 | if err = wtr.Close(); err != nil { 69 | pipeWtr.CloseWithError(fmt.Errorf("Close failed during %s stream compression: %w", cont.Mode, err)) 70 | return 71 | } 72 | 73 | pipeWtr.Close() 74 | }() 75 | 76 | mdWithCmp := make(map[string]interface{}, len(metadata)+2) 77 | for key, val := range metadata { 78 | mdWithCmp[key] = val 79 | } 80 | mdWithCmp[compressMetaAlgoHeader] = cont.AlgoName() 81 | mdWithCmp[compressMetaSizeHeader] = strconv.FormatInt(size, 36) 82 | 83 | return cont.Container.Put(name, pipeRdr, stow.SizeUnknown, mdWithCmp) 84 | } 85 | 86 | type compressedItem struct { 87 | stow.Item 88 | } 89 | 90 | var _ stow.Item = (*compressedItem)(nil) 91 | 92 | func (item compressedItem) Size() (int64, error) { 93 | md, err := item.Metadata() 94 | if err != nil { 95 | return 0, fmt.Errorf("Opening item metadata to check for compression failed: %w", err) 96 | } 97 | 98 | hdrAlgoVal, hdrExists := md[compressMetaAlgoHeader] 99 | if !hdrExists { 100 | return item.Item.Size() 101 | } 102 | 103 | hdrSizeVal, hdrExists := md[compressMetaSizeHeader] 104 | if !hdrExists { 105 | return 0, fmt.Errorf("Item metadata indicated (%v) compression but no size was recorded", hdrAlgoVal) 106 | } 107 | 108 | hdrSizeValStr, isString := hdrSizeVal.(string) 109 | if !isString { 110 | return 0, fmt.Errorf("Item metadata indicated (%v) compression but size (#%v) was a %T not a string", hdrAlgoVal, hdrSizeVal, hdrSizeVal) 111 | } 112 | 113 | size, err := strconv.ParseInt(hdrSizeValStr, 36, 64) 114 | if err != nil { 115 | return 0, fmt.Errorf("Item metadata size %q was not parseable as base36 integer: %w", hdrSizeValStr, err) 116 | } 117 | return size, nil 118 | } 119 | 120 | func (item compressedItem) Open() (io.ReadCloser, error) { 121 | md, err := item.Metadata() 122 | if err != nil { 123 | return nil, fmt.Errorf("Opening item metadata to check for compression failed: %w", err) 124 | } 125 | 126 | mode := ModeIdentity 127 | if hdrAlgoVal, hdrExists := md[compressMetaAlgoHeader]; hdrExists { 128 | hdrAlgoStr, isString := hdrAlgoVal.(string) 129 | if !isString { 130 | return nil, fmt.Errorf("Item metadata indicated compression but value (#%v) was a %T not a string", hdrAlgoVal, hdrAlgoVal) 131 | } 132 | 133 | if mode = ModeFromName(hdrAlgoStr); mode == ModeUnknown { 134 | return nil, fmt.Errorf("Item metadata specified unsupported compression mode: %q", hdrAlgoStr) 135 | } 136 | } 137 | 138 | itemRdr, err := item.Item.Open() 139 | if mode == ModeIdentity || err != nil { 140 | return itemRdr, err 141 | } 142 | decompRdr, err := mode.NewReader(itemRdr) 143 | if err != nil { 144 | itemRdr.Close() 145 | return nil, fmt.Errorf("Item decompressor could not be created: %w", err) 146 | } 147 | return strms.NewReadFirstCloseList(decompRdr, itemRdr), nil 148 | } 149 | -------------------------------------------------------------------------------- /pkg/devices/objstore/encrypt/mode.go: -------------------------------------------------------------------------------- 1 | package encrypt 2 | 3 | import ( 4 | "crypto/aes" 5 | "crypto/cipher" 6 | "crypto/rand" 7 | "fmt" 8 | "io" 9 | 10 | "github.com/tarndt/usbd/pkg/util" 11 | ) 12 | 13 | //Mode represents an encryption mode 14 | type Mode uint8 15 | 16 | //Enumerate available modes and their textual names 17 | const ( 18 | ModeIdentity Mode = iota 19 | ModeUnknown 20 | ModeAESCTR 21 | ModeAESCFB 22 | ModeAESOFB 23 | ModeAESRec = ModeAESCTR 24 | 25 | ModeIdentityName = "identity" 26 | ModeAESCTRName = "aes-ctr" 27 | ModeAESCFBName = "aes-cfb" 28 | ModeAESOFBName = "aes-ofb" 29 | ModeAESRecName = "aes-rec" 30 | ModeUknownName = "unknown" 31 | ) 32 | 33 | //ModeFromName constructs a Mode from a textual name 34 | func ModeFromName(name string) Mode { 35 | switch name { 36 | case "", ModeIdentityName: 37 | return ModeIdentity 38 | case ModeAESCTRName, ModeAESRecName: 39 | return ModeAESCTR 40 | case ModeAESCFBName: 41 | return ModeAESCFB 42 | case ModeAESOFBName: 43 | return ModeAESOFB 44 | } 45 | return ModeUnknown 46 | } 47 | 48 | //AlgoName returns the textual name of a Mode 49 | func (m Mode) AlgoName() string { 50 | switch m { 51 | case ModeIdentity: 52 | return ModeIdentityName 53 | case ModeAESCTR: 54 | return ModeAESCTRName 55 | case ModeAESCFB: 56 | return ModeAESCFBName 57 | case ModeAESOFB: 58 | return ModeAESOFBName 59 | } 60 | return ModeUknownName 61 | } 62 | 63 | //String is a synonym for AlgoName 64 | func (m Mode) String() string { 65 | return m.AlgoName() 66 | } 67 | 68 | //NewReader constructs a reader wrapper that applies this mode's decryption 69 | func (m Mode) NewReader(rdr io.Reader, key, initVect []byte) (io.Reader, error) { 70 | var cipherConstructor func(cipher.Block, []byte) cipher.Stream 71 | switch m { 72 | case ModeIdentity: 73 | return rdr, nil 74 | case ModeAESCTR: 75 | cipherConstructor = cipher.NewCTR 76 | case ModeAESCFB: 77 | cipherConstructor = cipher.NewCFBDecrypter 78 | case ModeAESOFB: 79 | cipherConstructor = cipher.NewOFB 80 | default: 81 | return nil, fmt.Errorf("Cannot create decryptor for unknown cipher") 82 | } 83 | 84 | aesEnc, err := aes.NewCipher(key) 85 | if err != nil { 86 | return nil, fmt.Errorf("Could not create AES decryptor: %w", err) 87 | } 88 | 89 | if len(initVect) != aes.BlockSize { 90 | return nil, fmt.Errorf("Provided AES initialization vector has: %d bytes, rather than the required: %d bytes", len(initVect), aes.BlockSize) 91 | } 92 | 93 | return cipher.StreamReader{ 94 | S: cipherConstructor(aesEnc, initVect), 95 | R: rdr, 96 | }, nil 97 | } 98 | 99 | //NewWriter constructs a writer wrapper that applies this mode's encryption 100 | func (m Mode) NewWriter(wtr io.WriteCloser, key []byte) (encryptor io.WriteCloser, initVect []byte, err error) { 101 | if m == ModeIdentity { 102 | return wtr, nil, nil 103 | } 104 | 105 | aesEnc, err := aes.NewCipher(key) 106 | if err != nil { 107 | return nil, nil, fmt.Errorf("Could not create AES encryptor: %w", err) 108 | } 109 | 110 | initVect = make([]byte, aes.BlockSize) 111 | if _, err = rand.Read(initVect); err != nil { 112 | return nil, nil, fmt.Errorf("Could not read entropy source to populate AES initialization vector: %w", err) 113 | } 114 | 115 | var cipherConstructor func(cipher.Block, []byte) cipher.Stream 116 | switch m { 117 | case ModeAESCTR: 118 | cipherConstructor = cipher.NewCTR 119 | case ModeAESCFB: 120 | cipherConstructor = cipher.NewCFBEncrypter 121 | case ModeAESOFB: 122 | cipherConstructor = cipher.NewOFB 123 | default: 124 | return nil, nil, fmt.Errorf("Cannot create encryptor for unknown cipher") 125 | } 126 | 127 | return cipher.StreamWriter{ 128 | S: cipherConstructor(aesEnc, initVect), 129 | W: noopWtrCloser{wtr}, //Sheild our writer from being closed 130 | }, initVect, nil 131 | } 132 | 133 | type noopWtrCloser struct { 134 | io.Writer 135 | } 136 | 137 | func (noopWtrCloser) Close() error { return nil } 138 | 139 | //MakeRandomAESKey generates AES-256 (32 byte) key securely by using a cryptographic entropy source 140 | func MakeRandomAESKey() ([]byte, error) { 141 | key := make([]byte, 32) 142 | if _, err := rand.Read(key); err != nil { 143 | return nil, fmt.Errorf("Could not read entropy source to populate key: %w", err) 144 | } 145 | return key, nil 146 | } 147 | 148 | //ValidAESKey confirms the provided key is the correct length for AES/128/224/256 149 | // and is not all zeros 150 | func ValidAESKey(key []byte) error { 151 | switch len(key) { 152 | case 16, 24, 32: 153 | case 0: 154 | return fmt.Errorf("AES key is empty") 155 | default: 156 | return fmt.Errorf("AES key is of an unexpected length: %d bytes (Use 16 bytes for AES-128, 24 bytes for AES-192, or 32 bytes AES-256 [recommended])", len(key)) 157 | } 158 | 159 | if util.IsZeros(key) { 160 | return fmt.Errorf("Provided AES key is all zeros (likely mistake)") 161 | } 162 | return nil 163 | } 164 | -------------------------------------------------------------------------------- /pkg/devices/objstore/encrypt/stow.go: -------------------------------------------------------------------------------- 1 | package encrypt 2 | 3 | import ( 4 | "encoding/hex" 5 | "fmt" 6 | "io" 7 | "strconv" 8 | 9 | "github.com/tarndt/usbd/pkg/util/strms" 10 | 11 | "github.com/graymeta/stow" 12 | ) 13 | 14 | const ( 15 | encryptMetaAlgoHeader = "x-osbd-crypt-alg" 16 | encryptMetaIVHeader = "x-osbd-crypt-iv" 17 | encryptMetaSizeHeader = "x-osbd-crypt-size" 18 | ) 19 | 20 | type encryptedContainer struct { 21 | stow.Container 22 | Mode 23 | key []byte 24 | } 25 | 26 | var _ stow.Container = (*encryptedContainer)(nil) 27 | 28 | //NewEncryptedContainer wraps the provided container in transparent encryption and decryption 29 | func NewEncryptedContainer(container stow.Container, mode Mode, key []byte) stow.Container { 30 | return encryptedContainer{ 31 | Container: container, 32 | Mode: mode, 33 | key: key, 34 | } 35 | } 36 | 37 | func (cont encryptedContainer) Item(id string) (stow.Item, error) { 38 | item, err := cont.Container.Item(id) 39 | if item != nil { 40 | item = encryptedItem{Item: item, key: cont.key} 41 | } 42 | return item, err 43 | } 44 | 45 | func (cont encryptedContainer) Items(prefix, cursor string, count int) ([]stow.Item, string, error) { 46 | items, cursor, err := cont.Container.Items(prefix, cursor, count) 47 | for i := range items { 48 | items[i] = encryptedItem{Item: items[i], key: cont.key} 49 | } 50 | return items, cursor, err 51 | } 52 | 53 | func (cont encryptedContainer) Put(name string, rdr io.Reader, size int64, metadata map[string]interface{}) (stow.Item, error) { 54 | if cont.Mode == ModeIdentity { 55 | return cont.Container.Put(name, rdr, size, metadata) 56 | } 57 | 58 | pipeRdr, pipeWtr := io.Pipe() 59 | wtr, initVect, err := cont.NewWriter(pipeWtr, cont.key) 60 | if err != nil { 61 | return nil, fmt.Errorf("Could not create stream encryptor: %w", err) 62 | } 63 | 64 | go func() { 65 | _, err := io.Copy(wtr, rdr) 66 | if err != nil { 67 | pipeWtr.CloseWithError(fmt.Errorf("Copy failed during %s stream encryption: %w", cont.Mode, err)) 68 | wtr.Close() 69 | return 70 | } 71 | 72 | if err = wtr.Close(); err != nil { 73 | pipeWtr.CloseWithError(fmt.Errorf("Close failed during %s stream encryption: %w", cont.Mode, err)) 74 | return 75 | } 76 | 77 | pipeWtr.Close() 78 | }() 79 | 80 | mdWithCrypt := make(map[string]interface{}, len(metadata)+2) 81 | for key, val := range metadata { 82 | mdWithCrypt[key] = val 83 | } 84 | mdWithCrypt[encryptMetaAlgoHeader] = cont.AlgoName() 85 | mdWithCrypt[encryptMetaIVHeader] = hex.EncodeToString(initVect) 86 | mdWithCrypt[encryptMetaSizeHeader] = strconv.FormatInt(size, 36) 87 | 88 | return cont.Container.Put(name, pipeRdr, stow.SizeUnknown, mdWithCrypt) 89 | } 90 | 91 | type encryptedItem struct { 92 | stow.Item 93 | key []byte 94 | } 95 | 96 | var _ stow.Item = (*encryptedItem)(nil) 97 | 98 | func (item encryptedItem) Size() (int64, error) { 99 | md, err := item.Metadata() 100 | if err != nil { 101 | return 0, fmt.Errorf("Opening item metadata to check for encryption failed: %w", err) 102 | } 103 | 104 | hdrAlgoVal, hdrExists := md[encryptMetaAlgoHeader] 105 | if !hdrExists { 106 | return item.Item.Size() 107 | } 108 | 109 | hdrSizeVal, hdrExists := md[encryptMetaSizeHeader] 110 | if !hdrExists { 111 | return 0, fmt.Errorf("Item metadata indicated (%v) encryption but no size was recorded", hdrAlgoVal) 112 | } 113 | 114 | hdrSizeValStr, isString := hdrSizeVal.(string) 115 | if !isString { 116 | return 0, fmt.Errorf("Item metadata indicated (%v) encryption but size (#%v) was a %T not a string", hdrAlgoVal, hdrSizeVal, hdrSizeVal) 117 | } 118 | 119 | size, err := strconv.ParseInt(hdrSizeValStr, 36, 64) 120 | if err != nil { 121 | return 0, fmt.Errorf("Item metadata size %q was not parseable as base36 integer: %w", hdrSizeValStr, err) 122 | } 123 | return size, nil 124 | } 125 | 126 | func (item encryptedItem) Open() (io.ReadCloser, error) { 127 | md, err := item.Metadata() 128 | if err != nil { 129 | return nil, fmt.Errorf("Opening item metadata to check for encryption failed: %w", err) 130 | } 131 | 132 | mode := ModeIdentity 133 | var initVect []byte 134 | if hdrAlgoVal, hdrExists := md[encryptMetaAlgoHeader]; hdrExists { 135 | hdrAlgoStr, isString := hdrAlgoVal.(string) 136 | if !isString { 137 | return nil, fmt.Errorf("Item metadata indicated encryption but value (#%v) was a %T not a string", hdrAlgoVal, hdrAlgoVal) 138 | } 139 | 140 | if mode = ModeFromName(hdrAlgoStr); mode == ModeUnknown { 141 | return nil, fmt.Errorf("Item metadata specified unsupported encryption mode: %q", hdrAlgoStr) 142 | } 143 | 144 | if hdrInitVecVal, hdrExists := md[encryptMetaIVHeader]; !hdrExists { 145 | return nil, fmt.Errorf("Item metadata indicated %s encryption but no IV (intialization vector) was recorded", mode) 146 | } else if hdrInitVecStr, isString := hdrInitVecVal.(string); !isString { 147 | return nil, fmt.Errorf("Item metadata indicated %s encryption but no IV (intialization vector) (#%v) was a %T not a string", mode, hdrInitVecVal, hdrInitVecVal) 148 | } else if initVect, err = hex.DecodeString(hdrInitVecStr); err != nil { 149 | return nil, fmt.Errorf("Item metadata IV (intialization vector) %q was not parseable as base16 value: %w", hdrInitVecStr, err) 150 | } 151 | } 152 | 153 | itemRdr, err := item.Item.Open() 154 | if mode == ModeIdentity || err != nil { 155 | return itemRdr, err 156 | } 157 | decompRdr, err := mode.NewReader(itemRdr, item.key, initVect) 158 | if err != nil { 159 | itemRdr.Close() 160 | return nil, fmt.Errorf("Item decyptor could not be created: %w", err) 161 | } 162 | return strms.NewReadFirstCloseList(decompRdr, itemRdr), nil 163 | } 164 | -------------------------------------------------------------------------------- /pkg/usbdlib/reqproc.go: -------------------------------------------------------------------------------- 1 | package usbdlib 2 | 3 | import ( 4 | "bufio" 5 | "context" 6 | "fmt" 7 | "io" 8 | "log" 9 | "runtime" 10 | "sync" 11 | ) 12 | 13 | //RecommendWorkerCount returns a empircally derived heuristic for the optimal 14 | // number of work goroutines on this machine 15 | func RecommendWorkerCount() int { 16 | return runtime.NumCPU() * 6 17 | } 18 | 19 | //ReqProcessor reqs requests from the NBD command stream (an io.ReadWriter, but 20 | // typically a *NbdStream) exectutes them against the provided Device implementation 21 | // and then writes responses back to the command stream. 22 | type reqProcessor struct { 23 | blockSize int64 24 | cmdStrm io.ReadWriteCloser 25 | dev Device 26 | reqQueue chan *request 27 | respQueue chan *response 28 | reqPool, respPool sync.Pool 29 | 30 | ctx context.Context 31 | ctxCancel context.CancelFunc 32 | workersWg sync.WaitGroup 33 | } 34 | 35 | func processRequests(ctx context.Context, cmdStrm io.ReadWriteCloser, device Device, workerCount int) error { 36 | ctx, cancel := context.WithCancel(ctx) 37 | 38 | this := &reqProcessor{ 39 | blockSize: device.BlockSize(), 40 | cmdStrm: cmdStrm, 41 | dev: device, 42 | reqQueue: make(chan *request, 64), 43 | respQueue: make(chan *response, 32), 44 | reqPool: sync.Pool{New: newRequest}, 45 | respPool: sync.Pool{New: newResponse}, 46 | ctx: ctx, 47 | ctxCancel: cancel, 48 | } 49 | go this.readStrmWorker() 50 | go this.writeStrmWorker() 51 | 52 | if workerCount < 1 { 53 | workerCount = RecommendWorkerCount() 54 | } 55 | if workerCount -= 2; workerCount < 1 { 56 | workerCount = 1 57 | } 58 | this.workersWg.Add(workerCount) 59 | for i := 0; i < workerCount; i++ { 60 | go this.reqIOWorker() 61 | } 62 | 63 | //Shutdown 64 | <-ctx.Done() //readStrmWorker is checking this and will close this.reqQueue killing IO workers 65 | this.workersWg.Wait() //All IO workers have shutdown 66 | close(this.respQueue) //Kills writeStrmWorker 67 | if err := this.dev.Close(); err != nil { 68 | return fmt.Errorf("Could not close userspace device: %w", err) 69 | } 70 | return nil 71 | } 72 | 73 | func (proc *reqProcessor) readStrmWorker() { 74 | defer close(proc.reqQueue) 75 | 76 | bufStrm := bufio.NewReaderSize(proc.cmdStrm, 16*1024*1024) 77 | var req *request 78 | var err error 79 | flushMu := new(sync.RWMutex) 80 | for { 81 | if err = proc.ctx.Err(); err != nil { 82 | return 83 | } 84 | 85 | req = proc.reqPool.Get().(*request) 86 | if err = req.Decode(bufStrm); err != nil { 87 | if proc.ctx.Err() != nil { 88 | return 89 | } 90 | log.Printf("ReqProcessor::readWorker(): Decode failed; Details: %s", err) 91 | if err = proc.cmdStrm.Close(); err != nil { 92 | log.Printf("ReqProcessor::readWorker(): Could not close command stream; Details: %s", err) 93 | } 94 | proc.ctxCancel() 95 | return 96 | } 97 | switch req.reqType { 98 | case nbdWrite, nbdTrim: 99 | flushMu.RLock() 100 | req.flushMu = flushMu 101 | 102 | case nbdFlush: 103 | req.flushMu = flushMu 104 | flushMu = new(sync.RWMutex) 105 | 106 | case nbdDisconnect: 107 | proc.ctxCancel() 108 | return 109 | } 110 | proc.reqQueue <- req 111 | } 112 | } 113 | 114 | func (proc *reqProcessor) writeStrmWorker() { 115 | var resp *response 116 | var open bool 117 | var err error 118 | 119 | bufStrm := bufio.NewWriterSize(proc.cmdStrm, 16*1024*1024) 120 | defer bufStrm.Flush() 121 | 122 | for { 123 | select { 124 | case resp, open = <-proc.respQueue: 125 | if !open { 126 | return 127 | } 128 | 129 | if resp.Write(proc.cmdStrm); err != nil { 130 | log.Printf("ReqProcessor::writeWorker(): Response reply failed; Details: %s", err) 131 | proc.ctxCancel() 132 | } 133 | proc.respPool.Put(resp) 134 | 135 | default: 136 | if err = bufStrm.Flush(); err != nil { 137 | log.Printf("ReqProcessor::writeWorker(): Flushing buffered replies failed; Details: %s", err) 138 | proc.ctxCancel() 139 | } 140 | 141 | select { 142 | case resp, open = <-proc.respQueue: 143 | if !open { 144 | return 145 | } 146 | 147 | if resp.Write(proc.cmdStrm); err != nil { 148 | log.Printf("ReqProcessor::writeWorker(): Response reply failed; Details: %s", err) 149 | proc.ctxCancel() 150 | } 151 | proc.respPool.Put(resp) 152 | } 153 | } 154 | } 155 | } 156 | 157 | func (proc *reqProcessor) reqIOWorker() { 158 | defer proc.workersWg.Done() 159 | 160 | var req *request 161 | var resp *response 162 | for req = range proc.reqQueue { 163 | resp = proc.execute(req, proc.respPool.Get().(*response)) 164 | select { 165 | case proc.respQueue <- resp: 166 | proc.reqPool.Put(req) 167 | default: 168 | proc.reqPool.Put(req) 169 | proc.respQueue <- resp 170 | } 171 | } 172 | } 173 | 174 | func (proc *reqProcessor) execute(req *request, resp *response) *response { 175 | if resp == nil { 176 | resp = new(response) 177 | } 178 | 179 | var err error 180 | var errCode nbdErr 181 | 182 | switch req.reqType { 183 | case nbdRead: 184 | if req.pos%proc.blockSize != 0 || int64(req.count)%proc.blockSize != 0 { 185 | err = fmt.Errorf("Assertion failed: Read request was not block aligned (pos=%d,len=%d)", req.pos, req.count) 186 | errCode = ndbRespErrInvalid 187 | break 188 | } 189 | 190 | _, err = proc.dev.ReadAt(resp.GetReadBuffer(req), req.pos) 191 | if err != nil { 192 | errCode = ndbRespErrIO 193 | } 194 | 195 | case nbdWrite: 196 | if req.pos%proc.blockSize != 0 || int64(req.count)%proc.blockSize != 0 { 197 | err = fmt.Errorf("Assertion failed: Write request was not block aligned (pos=%d,len=%d)", req.pos, req.count) 198 | errCode = ndbRespErrInvalid 199 | break 200 | } 201 | 202 | defer req.flushMu.RUnlock() 203 | _, err = proc.dev.WriteAt(req.writeBuffer, req.pos) 204 | if err != nil { 205 | errCode = ndbRespErrIO 206 | } 207 | 208 | case nbdFlush: 209 | req.flushMu.Lock() 210 | req.flushMu.Unlock() //We can release right away, we just need to ensure previous writes finished 211 | err = proc.dev.Flush() 212 | if err != nil { 213 | errCode = ndbRespErrIO 214 | } 215 | 216 | case nbdTrim: 217 | defer req.flushMu.RUnlock() 218 | err = proc.dev.Trim(req.pos, req.count) 219 | if err != nil { 220 | errCode = ndbRespErrIO 221 | } 222 | 223 | default: 224 | errCode = ndbRespErrInvalid 225 | err = fmt.Errorf("Assertion failed: Unknown NBD request type: %d", req.reqType) 226 | } 227 | 228 | if err != nil { 229 | log.Printf("ReqProcessor::execute(): WARNING: Request(type = %d, pos = %d, count = %d) and will return %s. Failure was: %s", req.reqType, req.pos, req.count, errCode, err) 230 | } 231 | resp.Set(req, errCode) 232 | return resp 233 | } 234 | -------------------------------------------------------------------------------- /pkg/usbdlib/nbdkern_linux.go: -------------------------------------------------------------------------------- 1 | package usbdlib 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "errors" 7 | "fmt" 8 | "io/ioutil" 9 | "os" 10 | "path/filepath" 11 | "strconv" 12 | "strings" 13 | "syscall" 14 | 15 | "github.com/pmorjan/kmod" 16 | "golang.org/x/sys/unix" 17 | ) 18 | 19 | //This is a collection of utilities for managing the Linux kernel NBD module 20 | 21 | func useNbdDev(devPath string) error { 22 | err := nbdLoaded() 23 | if err != nil { 24 | return fmt.Errorf("NBD is not loaded therefore %q could not possibly be an NBD: %w", devPath, err) 25 | } 26 | 27 | if err = validateDevPath(devPath); err != nil { 28 | return fmt.Errorf("%q is not a usable NBD: %w", devPath, err) 29 | } 30 | return nil 31 | } 32 | 33 | func makeNbdDev(maxNBDDevices uint) (string, error) { 34 | err := nbdLoaded() 35 | if err != nil { 36 | if err = loadNbd(maxNBDDevices); err != nil { 37 | return "", fmt.Errorf("NBD was not loaded and an attempt to load it failed: %w", err) 38 | } 39 | } 40 | 41 | devPath, err := nbdFreeDev() 42 | if err != nil { 43 | return "", fmt.Errorf("Did not find usable NBD: %w", err) 44 | } 45 | return devPath, nil 46 | } 47 | 48 | func nbdLoaded() error { 49 | const ( 50 | procfsKMods = "/proc/modules" 51 | nbdModName = "nbd" 52 | liveFlag = "Live" 53 | ) 54 | 55 | fin, err := os.Open(procfsKMods) 56 | if err != nil { 57 | return fmt.Errorf("Could not open procfs file %q listing kernel modules: %w", procfsKMods, err) 58 | } 59 | defer fin.Close() 60 | 61 | var loaded, live bool 62 | lines := bufio.NewScanner(fin) 63 | for lines.Scan() { 64 | if line := lines.Bytes(); bytes.HasPrefix(line, []byte(nbdModName)) { 65 | loaded, live = true, bytes.Contains(line, []byte(liveFlag)) 66 | break 67 | } 68 | } 69 | if err = lines.Err(); err != nil { 70 | return fmt.Errorf("Could not read procfs file %q listing kernel modules: %w", procfsKMods, err) 71 | } 72 | 73 | switch { 74 | case err != nil: 75 | return fmt.Errorf("Could not determine if the NDB kernel module is loaded: %w", err) 76 | case !loaded: 77 | return fmt.Errorf("The NDB kernel module is not loaded") 78 | case !live: 79 | return fmt.Errorf("The NDB kernel module is loaded but is not live") 80 | } 81 | return nil 82 | } 83 | 84 | func loadNbd(maxNBDDevices uint) error { 85 | if maxNBDDevices < 1 { 86 | return fmt.Errorf("Loading NBD with fewer than 1 devices provisioned is pointless") 87 | } 88 | 89 | kmodLoader, err := kmod.New() 90 | if err != nil { 91 | return fmt.Errorf("Failed to contruct kernel module loader: %w", err) 92 | } 93 | 94 | const ndbModuleName = "nbd" 95 | ndbModeuleArg := fmt.Sprintf("nbds_max=%d", maxNBDDevices) 96 | 97 | if err = kmodLoader.Load(ndbModuleName, ndbModeuleArg, 0); err != nil { 98 | switch { 99 | case errors.Is(err, kmod.ErrModuleNotFound), errors.Is(err, kmod.ErrModuleInUse): 100 | return fmt.Errorf("Failed to load kernel module %q: %w", ndbModuleName, err) 101 | } 102 | return fmt.Errorf("Failed to load kernel module %q (ensure this binary has capability \"cap_sys_module\" or root. %s: %w", ndbModuleName, capHelp(), err) 103 | } 104 | 105 | if err = nbdLoaded(); err != nil { 106 | return fmt.Errorf("NBD did not appear as loaded after load attempt: %w", err) 107 | } 108 | return nil 109 | } 110 | 111 | func capHelp() string { 112 | return fmt.Sprintf("(Ex. run \"sudo setcap cap_sys_module+ep %s\")", os.Args[0]) 113 | } 114 | 115 | func validateDevPath(devPath string) error { 116 | fstat, err := os.Stat(devPath) 117 | switch { 118 | case errors.Is(err, os.ErrNotExist): 119 | return fmt.Errorf("Provided device %q does not exist", devPath) 120 | case err != nil: 121 | return fmt.Errorf("Provided device %q could not be stat'd", devPath) 122 | case fstat.IsDir(): 123 | return fmt.Errorf("Provided device %q is a directory", devPath) 124 | case fstat.Mode().IsRegular(): 125 | return fmt.Errorf("Provided device %q is not special therefore cannot be a device file", devPath) 126 | case fstat.Size() > 0: 127 | return fmt.Errorf("Provided device %q has a size greater than zero so it may be in use", devPath) 128 | } 129 | 130 | devMajor, err := nbdMajor() 131 | if err != nil { 132 | return fmt.Errorf("Could not determine major device number for NDB: %w", err) 133 | } 134 | 135 | if actualMajor, _, err := ndbMajorMinorForDevice(devPath); err != nil { 136 | return fmt.Errorf("Could not validate device major and minor were correct for %q: %w", devPath, err) 137 | } else if actualMajor != devMajor { 138 | return fmt.Errorf("Create device %q had device major number %d when %d was expected", devPath, actualMajor, devMajor) 139 | } 140 | return nil 141 | } 142 | 143 | func nbdMajor() (int, error) { 144 | const ( 145 | procfsDeviceMajors = "/proc/devices" 146 | nbdDevName = "nbd" 147 | nbdDevSuffix = " " + nbdDevName 148 | ) 149 | 150 | fin, err := os.Open(procfsDeviceMajors) 151 | if err != nil { 152 | return -1, fmt.Errorf("Could not open procfs file %q listing kernel device major numbers: %w", procfsDeviceMajors, err) 153 | } 154 | defer fin.Close() 155 | 156 | lines := bufio.NewScanner(fin) 157 | for lines.Scan() { 158 | if line := lines.Bytes(); bytes.HasSuffix(line, []byte(nbdDevSuffix)) { 159 | devMajor, err := strconv.Atoi(string(bytes.TrimSpace(bytes.TrimSuffix(line, []byte(nbdDevSuffix))))) 160 | if err != nil { 161 | return -1, fmt.Errorf("Could not parse %q device major entry: %q in %q: %w", nbdDevName, line, procfsDeviceMajors, err) 162 | } 163 | return devMajor, nil 164 | } 165 | } 166 | if err = lines.Err(); err != nil { 167 | return -1, fmt.Errorf("Could not read procfs file %q listing device major numbers: %w", procfsDeviceMajors, err) 168 | } 169 | return -1, fmt.Errorf("Could not find device major entry in %q for %q", procfsDeviceMajors, nbdDevName) 170 | } 171 | 172 | func ndbMajorMinorForDevice(devPath string) (major, minor int, err error) { 173 | fstat := syscall.Stat_t{} 174 | if err = syscall.Stat(devPath, &fstat); err != nil { 175 | return -1, -1, fmt.Errorf("Could not stat provided potential device file %q: %w", devPath, err) 176 | } 177 | 178 | if fmode := os.FileMode(fstat.Mode); fmode&os.ModeDevice != 0 { 179 | return -1, -1, fmt.Errorf("Provided device file %q is not a block device: %w", devPath, err) 180 | } 181 | 182 | return int(unix.Major(fstat.Rdev)), int(unix.Minor(fstat.Rdev)), nil 183 | } 184 | 185 | func nbdFreeDev() (string, error) { 186 | const sysblockDevsDir = "/sys/block/" 187 | dirs, err := ioutil.ReadDir(sysblockDevsDir) 188 | if err != nil { 189 | return "", fmt.Errorf("Could not list blocks devices in sysfs directory %q: %w", sysblockDevsDir, err) 190 | } 191 | 192 | const ( 193 | nbdDevPrefix = "nbd" 194 | devDir = "/dev" 195 | ) 196 | devCount := 0 197 | for _, dir := range dirs { 198 | devName := dir.Name() 199 | if !strings.HasPrefix(devName, nbdDevPrefix) { 200 | continue 201 | } 202 | devCount++ 203 | 204 | devPath := filepath.Join(devDir, devName) 205 | if err = validateDevPath(devPath); err != nil { 206 | continue 207 | } 208 | return devPath, nil 209 | } 210 | return "", fmt.Errorf("None of %d NDB devices found were free (empty) and valid (correct device major number)", devCount) 211 | } 212 | -------------------------------------------------------------------------------- /pkg/devices/dedupdisk/dedupdisk.go: -------------------------------------------------------------------------------- 1 | package dedupdisk 2 | 3 | //NOTE: This device is of older and is generally lower code quality than the 4 | // simple devices (RAMDisk, FileDisk) which have been updated or the new ObjectStore 5 | // device. If looking for reference examples refer to those implementations instead. 6 | 7 | import ( 8 | "context" 9 | "errors" 10 | "fmt" 11 | "io" 12 | "sync" 13 | 14 | "github.com/tarndt/usbd/pkg/usbdlib" 15 | ) 16 | 17 | var errShutdown = errors.New("Device is shutdown") 18 | 19 | type dedupDisk struct { 20 | lunMap LUNMap 21 | idStore IDStore 22 | blockStore BlockStore 23 | blockSize, size int64 24 | errNotPresent error 25 | usbdlib.DefaultBlockSize 26 | 27 | optMu sync.RWMutex 28 | ctx context.Context 29 | ctxCancel context.CancelFunc 30 | closeOnce sync.Once 31 | } 32 | 33 | //NewDedupDisk is the constructor for file backed devices that use PebbleDB to dedupliacate 34 | // their blocks 35 | func NewDedupDisk(lunMap LUNMap, idStore IDStore, blockStore BlockStore) usbdlib.Device { 36 | ctx, cancel := context.WithCancel(context.Background()) 37 | 38 | this := &dedupDisk{ 39 | lunMap: lunMap, 40 | idStore: idStore, 41 | blockStore: blockStore, 42 | errNotPresent: idStore.GetErrNotPresent(), 43 | ctx: ctx, 44 | ctxCancel: cancel, 45 | } 46 | this.blockSize, this.size = this.BlockSize(), this.lunMap.Size() 47 | return this 48 | } 49 | 50 | //Size of this device in bytes 51 | func (dd *dedupDisk) Size() int64 { 52 | return dd.size 53 | } 54 | 55 | //ReadAt fufills io.ReaderAt and in turn part of usbdlib.Device 56 | func (dd *dedupDisk) ReadAt(buf []byte, pos int64) (count int, err error) { 57 | dd.optMu.RLock() 58 | defer dd.optMu.RUnlock() 59 | 60 | if dd.ctx.Err() != nil { 61 | return -1, errShutdown 62 | } 63 | 64 | readSize := int64(len(buf)) 65 | if pos < 0 { 66 | return 0, fmt.Errorf("Out of bounds read with negative position") 67 | } else if pos+readSize > dd.size { 68 | return 0, io.EOF 69 | } 70 | 71 | block := uint64(pos / dd.blockSize) 72 | if readSize > dd.blockSize { 73 | //more common multiblock read path 74 | blockc := readSize / dd.blockSize 75 | dedupIds, errors := make([]uint64, blockc), make([]error, blockc) 76 | if err = dd.lunMap.GetIDs(block, dedupIds); err != nil { 77 | return 0, fmt.Errorf("lunMap lookup of %d LUN blocks starting %d failed: %w", blockc, block, err) 78 | } 79 | //Run our many requests 80 | var dedupID uint64 81 | var bufStart int64 82 | var i int 83 | var wg sync.WaitGroup 84 | wg.Add(int(blockc)) 85 | for i, dedupID = range dedupIds { 86 | go func(id uint64, buffer []byte, err *error, wg *sync.WaitGroup) { 87 | if readError := dd.blockStore.GetBlock(id, buffer); readError != nil { 88 | *err = fmt.Errorf("Fetch of dedup block %d failed: %w", dedupID, readError) 89 | } 90 | wg.Done() 91 | }(dedupID, buf[bufStart:bufStart+dd.blockSize], &errors[i], &wg) 92 | bufStart += dd.blockSize 93 | } 94 | 95 | wg.Wait() 96 | if err = getFirstError(errors); err != nil { 97 | return 0, err 98 | } 99 | } else { 100 | //fast-path for read single block 101 | var dedupID uint64 102 | if dedupID, err = dd.lunMap.GetID(block); err != nil { 103 | return 0, fmt.Errorf("lunMap lookup of LUN block %d failed: %w", block, err) 104 | } 105 | if err = dd.blockStore.GetBlock(dedupID, buf); err != nil { 106 | return 0, fmt.Errorf("Fetch of dedup block %d from block store failed: %w", dedupID, err) 107 | } 108 | } 109 | return int(readSize), nil 110 | } 111 | 112 | func getFirstError(errors []error) (err error) { 113 | for _, err = range errors { 114 | if err != nil { 115 | break 116 | } 117 | } 118 | return 119 | } 120 | 121 | //WriteAt fufills io.WriterAt and in turn part of usbdlib.Device 122 | func (dd *dedupDisk) WriteAt(buf []byte, pos int64) (count int, err error) { 123 | dd.optMu.RLock() 124 | defer dd.optMu.RUnlock() 125 | 126 | if dd.ctx.Err() != nil { 127 | return -1, errShutdown 128 | } 129 | 130 | writeSize := int64(len(buf)) 131 | if pos < 0 { 132 | return 0, fmt.Errorf("Out of bounds write with negative position") 133 | } else if pos+writeSize > dd.size { 134 | return 0, io.ErrUnexpectedEOF 135 | } 136 | 137 | if writeSize > dd.blockSize { 138 | //more common multiblock read path 139 | blockc := int(writeSize / dd.blockSize) 140 | errors := make([]error, blockc) 141 | var wg sync.WaitGroup 142 | wg.Add(blockc) 143 | for i, bufPos := 0, int64(0); i < blockc; i++ { 144 | /*go*/ func(buffer []byte, pos int64, err *error) { //TODO limit max goroutines 145 | if _, writeErr := dd.WriteAt(buffer, pos); err != nil { 146 | *err = writeErr 147 | } 148 | wg.Done() 149 | }(buf[bufPos:bufPos+dd.blockSize], pos, &errors[i]) 150 | pos += dd.blockSize 151 | bufPos += dd.blockSize 152 | } 153 | 154 | wg.Wait() 155 | if err = getFirstError(errors); err != nil { 156 | return 0, err 157 | } 158 | } else { 159 | //fast-path for write single block 160 | var dedupID uint64 161 | var hash []byte 162 | block := uint64(pos / dd.blockSize) 163 | if dedupID, hash, err = dd.idStore.GetID(buf); err == dd.errNotPresent { 164 | if dedupID, err = dd.blockStore.PutBlock(buf); err != nil { 165 | return 0, fmt.Errorf("Addition of dedup block %d to block store failed: %w", dedupID, err) 166 | } 167 | if dd.idStore.PutID(hash, dedupID); err != nil { 168 | return 0, fmt.Errorf("Addition of dedup block %d to idStore store failed: %w", dedupID, err) 169 | } 170 | } else if err != nil { 171 | return 0, fmt.Errorf("ID store lookup of write block %d failed: %w", block, err) 172 | } 173 | if err = dd.lunMap.PutID(block, dedupID); err != nil { 174 | return 0, fmt.Errorf("Addition of dedup block ID %d to lunMap failed: %w", dedupID, err) 175 | } 176 | } 177 | return int(writeSize), nil 178 | } 179 | 180 | //Trim fufills part of usbdlib.Device 181 | func (*dedupDisk) Trim(pos int64, count int) error { 182 | return nil //TODO 183 | } 184 | 185 | //Flush fufills part of usbdlib.Device 186 | func (dd *dedupDisk) Flush() error { 187 | dd.optMu.RLock() 188 | defer dd.optMu.RUnlock() 189 | 190 | if dd.ctx.Err() != nil { 191 | return errShutdown 192 | } 193 | 194 | errCh := make(chan error, 6) 195 | var wg sync.WaitGroup 196 | wg.Add(3) 197 | flush := func(resouce FlushClose) { 198 | defer wg.Done() 199 | if err := resouce.Flush(); err != nil { 200 | errCh <- err 201 | } 202 | } 203 | go flush(dd.lunMap) 204 | go flush(dd.idStore) 205 | go flush(dd.blockStore) 206 | wg.Wait() 207 | select { //return first error 208 | case err := <-errCh: 209 | return fmt.Errorf("One or more errors occurred: %w", err) 210 | default: 211 | } 212 | return nil 213 | } 214 | 215 | //ReadAt fufills io.Closer and in turn part of usbdlib.Device 216 | func (dd *dedupDisk) Close() (err error) { 217 | dd.closeOnce.Do(func() { 218 | dd.optMu.Lock() 219 | defer dd.optMu.Unlock() 220 | 221 | dd.ctxCancel() 222 | err = dd.close() 223 | }) 224 | return err 225 | } 226 | 227 | func (dd *dedupDisk) close() error { 228 | errCh := make(chan error, 6) 229 | var wg sync.WaitGroup 230 | wg.Add(3) 231 | cleanup := func(resource FlushClose) { 232 | defer wg.Done() 233 | if err := resource.Flush(); err != nil { 234 | errCh <- err 235 | } 236 | if err := resource.Close(); err != nil { 237 | errCh <- err 238 | } 239 | } 240 | go cleanup(dd.lunMap) 241 | go cleanup(dd.idStore) 242 | go cleanup(dd.blockStore) 243 | wg.Wait() 244 | select { //return first error 245 | case err := <-errCh: 246 | return fmt.Errorf("One or more errors occurred: %w", err) 247 | default: 248 | } 249 | return nil 250 | } 251 | -------------------------------------------------------------------------------- /pkg/devices/dedupdisk/impls/lz4blkstore.go: -------------------------------------------------------------------------------- 1 | package impls 2 | 3 | //IMPORTANT: lzbs code is old and expirmental and testing as shown to be buggy 4 | //TODO: 5 | // * Think about how to best take advantage of sparse files... 6 | // * Using a non-cgo implementation of LZ4 or maybe S2 instead... 7 | // * Fix bugs? 8 | 9 | /* 10 | #include "lz4.h" 11 | #cgo CFLAGS: -march=native -O2 12 | */ 13 | import "C" 14 | 15 | import ( 16 | "encoding/binary" 17 | "fmt" 18 | "math" 19 | "sync" 20 | "unsafe" 21 | 22 | "github.com/tarndt/usbd/pkg/devices/dedupdisk" 23 | "github.com/tarndt/usbd/pkg/util/consterr" 24 | ) 25 | 26 | /* Compress block format: 27 | [block 0, m bytes] 28 | [block 1, m bytes] 29 | [possible free space ...] 30 | [header end, 2 bytes] 31 | [header 1, 2 bytes] 32 | [header 0, 2 bytes] 33 | [header entry count, 1 byte] 34 | */ 35 | 36 | type lz4BlockStore struct { 37 | blockSize uint64 38 | maxLZ4BlockSize uint64 39 | fbs *fileBlockStore 40 | blockPool sync.Pool 41 | 42 | curBlockLock sync.Mutex 43 | curBlockID uint64 44 | curBlock []byte 45 | curBlockPos uint64 46 | curBlockCount uint8 47 | } 48 | 49 | //NewLZ4BlockStore constructs a dedupdisk.BlockStore backed by a LZ4 compressed file. WIP... 50 | // will return an error stating LZ4BlockStore is expirmental. lzbs exists in the source 51 | // tree for educational purposes at the moment. 52 | func NewLZ4BlockStore(filename string, blockSize int64) (dedupdisk.BlockStore, error) { 53 | return nil, fmt.Errorf("LZ4BlockStore is expirmental and has bugs: %w", consterr.ErrNotImplemented) 54 | // STOP people from using lzbs for now :( 55 | 56 | /*fbs, err := NewFileBlockStore(filename, blockSize) 57 | if err != nil { 58 | return nil, fmt.Errorf("Could not create backing FileBlockStore %q: %w", filename, err) 59 | } 60 | lzbs := &LZ4BlockStore{ 61 | blockSize: uint64(blockSize), 62 | maxLZ4BlockSize: uint64(lz4MaxCompressedSize(int(blockSize))), 63 | fbs: fbs, 64 | curBlock: make([]byte, blockSize), 65 | } 66 | lzbs.blockPool = sync.Pool{New: lzbs.NewBlock} 67 | return lzbs, nil*/ 68 | } 69 | 70 | func (lzbs *lz4BlockStore) NewBlock() interface{} { 71 | return make([]byte, lzbs.blockSize) 72 | } 73 | 74 | func (lzbs *lz4BlockStore) GetBlock(dedupID uint64, buf []byte) (err error) { 75 | //Check if lzbs is the zero block 76 | if dedupID == zeroBlockID { 77 | copy(buf, lzbs.fbs.zeroBlock) 78 | return nil 79 | } 80 | 81 | //Calculate the compression heaer location and underlying block ID 82 | offsetIndex := dedupID >> 56 83 | dedupID = dedupID & 0x00FFFFFFFFFFFFFF 84 | if offsetIndex == math.MaxUint8 { //lzbs block was not compressed 85 | return lzbs.fbs.GetBlock(dedupID, buf) 86 | } 87 | 88 | //Setup buffers 89 | compressBuf := lzbs.blockPool.Get().([]byte) 90 | compressedData := compressBuf[:lzbs.blockSize] 91 | 92 | //Get underlying (compressed) block 93 | if err = lzbs.fbs.GetBlock(dedupID, compressedData); err != nil { 94 | lzbs.blockPool.Put(compressBuf) 95 | return err 96 | } 97 | 98 | //Convert offset index to byte position within block 99 | offsetPos := lzbs.blockSize - (((offsetIndex + 1) << 1) + 1) 100 | start := binary.LittleEndian.Uint16(compressedData[offsetPos:]) 101 | end := binary.LittleEndian.Uint16(compressedData[offsetPos-2:]) 102 | n := lz4Decompress(buf, compressedData[start:end]) 103 | lzbs.blockPool.Put(compressBuf) 104 | // log.Printf("Read compressed block (%d bytes) from blockstore block ID: %d at pos: %d to %d, header index: %d, hdrStart: %d\n", n, dedupID, start, end, offsetIndex, offsetPos) 105 | // log.Printf("Compressed Data: %s\n", hex.EncodeToString(compressedData[start:end])) 106 | // log.Printf("Read Block: %s\n", hex.EncodeToString(buf)) 107 | //log.Printf("Entire Compressed Block: %s\n", hex.EncodeToString(compressedData)) 108 | if n < 0 { 109 | return fmt.Errorf("Compressed block was malformed (errorcode = %d)", n) 110 | } 111 | return nil 112 | } 113 | 114 | /* 115 | PutBlock: 116 | 1. If block fits in current block 117 | 1. Insert into current block 118 | 2. Write current block to disk 119 | 2. Else 120 | 1. Write current block to disk 121 | 2. Reset current block 122 | 3. If block fits in new empty current block 123 | 1. Insert into current block 124 | 2. Write current block to disk 125 | 4. Else 126 | 1. Write to disk uncompressed 127 | 2. Update current block pos */ 128 | func (lzbs *lz4BlockStore) PutBlock(buf []byte) (dedupID uint64, err error) { 129 | //log.Printf("Original Data: %s\n", hex.EncodeToString(buf)) 130 | compressBuf := lzbs.blockPool.Get().([]byte) 131 | n := uint64(lz4Compress(compressBuf, buf)) 132 | lzbs.curBlockLock.Lock() 133 | hdrStartOffset := lzbs.blockSize - ((uint64(lzbs.curBlockCount+1) << 1) + 1) 134 | hdrEndOffset := hdrStartOffset - 2 135 | if lzbs.curBlockPos+n < hdrEndOffset && lzbs.curBlockCount+1 < math.MaxUint8 { //Write fits in current block 136 | dedupID, err = lzbs.writeToMem(compressBuf, n, hdrStartOffset) 137 | } else { //write does not fit in current block 138 | //Reset current block 139 | lzbs.curBlockPos, lzbs.curBlockCount, lzbs.curBlockID, hdrStartOffset = 0, 0, 0, lzbs.blockSize-3 140 | if lzbs.curBlockPos+n < hdrEndOffset { //Write fits in new empty current block 141 | dedupID, err = lzbs.writeToMem(compressBuf, n, hdrStartOffset) 142 | } else { //Write to disk uncompressed 143 | lzbs.blockPool.Put(compressBuf) 144 | lzbs.curBlockLock.Unlock() 145 | const noCompressionOffsetMask = (uint64(math.MaxUint8)) << 56 146 | dedupID, err = lzbs.fbs.PutBlock(buf) 147 | dedupID = noCompressionOffsetMask & dedupID 148 | } 149 | } 150 | return dedupID, err 151 | } 152 | 153 | func (lzbs *lz4BlockStore) writeToMem(compressBuf []byte, n, hdrStartOffset uint64) (dedupID uint64, err error) { 154 | //Insert into current block 155 | copy(lzbs.curBlock[lzbs.curBlockPos:], compressBuf[:n]) 156 | lzbs.blockPool.Put(compressBuf) 157 | //Write headers 158 | binary.LittleEndian.PutUint16(lzbs.curBlock[hdrStartOffset:], uint16(lzbs.curBlockPos)) 159 | lzbs.curBlockPos += n 160 | binary.LittleEndian.PutUint16(lzbs.curBlock[hdrStartOffset-2:], uint16(lzbs.curBlockPos)) 161 | lzbs.curBlockCount++ 162 | lzbs.curBlock[lzbs.blockSize-1] = lzbs.curBlockCount 163 | //Write current block to disk 164 | if lzbs.curBlockPos > 0 { //Add to current disk block 165 | dedupID, err = lzbs.fbs.PutBlockAt(lzbs.curBlock, lzbs.curBlockID) 166 | } else { //Get a new disk block 167 | dedupID, err = lzbs.fbs.PutBlock(lzbs.curBlock) 168 | lzbs.curBlockID = dedupID 169 | } 170 | lzbs.curBlockLock.Unlock() 171 | // log.Printf("Wrote compressed block (%d bytes) to blockstore block ID: %d at pos: %d to %d, header index: %d, hdrStart: %d\n", n, dedupID, lzbs.curBlockPos-n, lzbs.curBlockPos, lzbs.curBlockCount-1, hdrStartOffset) 172 | // log.Printf("Compressed Data: %s\n", hex.EncodeToString(compressBuf[:n])) 173 | dedupID = (uint64(lzbs.curBlockCount-1) << 52) & dedupID 174 | return dedupID, err 175 | } 176 | 177 | func (lzbs *lz4BlockStore) Flush() error { 178 | lzbs.curBlockLock.Lock() 179 | defer lzbs.curBlockLock.Unlock() 180 | return lzbs.fbs.Flush() 181 | } 182 | 183 | func (lzbs *lz4BlockStore) Close() error { 184 | lzbs.curBlockLock.Lock() 185 | defer lzbs.curBlockLock.Unlock() 186 | return lzbs.fbs.Close() 187 | } 188 | 189 | func lz4Compress(dst, src []byte) (n int) { 190 | return int(C.LZ4_compress((*C.char)(unsafe.Pointer(&src[0])), (*C.char)(unsafe.Pointer(&dst[0])), C.int(len(src)))) 191 | } 192 | 193 | func lz4Decompress(dst, src []byte) (n int) { 194 | return int(C.LZ4_decompress_fast((*C.char)(unsafe.Pointer(&src[0])), (*C.char)(unsafe.Pointer(&dst[0])), C.int(len(dst)))) 195 | } 196 | 197 | func lz4MaxCompressedSize(uncompressedSize int) int { 198 | return int(C.LZ4_compressBound(C.int(uncompressedSize))) 199 | } 200 | -------------------------------------------------------------------------------- /pkg/devices/testutil/helpers.go: -------------------------------------------------------------------------------- 1 | package testutil 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "context" 7 | "crypto/sha256" 8 | "encoding/hex" 9 | "io" 10 | "testing" 11 | "time" 12 | 13 | "github.com/tarndt/usbd/pkg/usbdlib" 14 | "github.com/tarndt/usbd/pkg/util/strms" 15 | ) 16 | 17 | //CreateContext creates a context aware of the provided a testing.T's deadline 18 | func CreateContext(t *testing.T) context.Context { 19 | const defaultTO = time.Minute 20 | deadline, hasDeadline := t.Deadline() 21 | if !hasDeadline { 22 | deadline = time.Now().Add(defaultTO) 23 | } 24 | 25 | ctx, cancel := context.WithDeadline(context.Background(), deadline) 26 | t.Cleanup(cancel) 27 | return ctx 28 | } 29 | 30 | //TestDevSize verfies the provided device reports the correct size 31 | func TestDevSize(t *testing.T, dev usbdlib.Device, expectedSize uint) { 32 | t.Run("dev-size", func(t *testing.T) { 33 | if actual := dev.Size(); actual != int64(expectedSize) { 34 | t.Fatalf("Expected device size to be %d but it was %d", expectedSize, actual) 35 | } 36 | }) 37 | } 38 | 39 | //TestReadEmpty verifies the provided device reads as empty 40 | func TestReadEmpty(t *testing.T, dev usbdlib.Device, boundaryBytes uint) { 41 | count := uint(dev.Size()) 42 | 43 | t.Run("read-empty", func(t *testing.T) { 44 | t.Run("unbuffered", func(t *testing.T) { 45 | count := unbufCount(count) 46 | buf, rdr := make([]byte, 1), strms.NewReadAtReader(dev) 47 | for i := uint(0); i < count; i++ { 48 | if _, err := rdr.Read(buf); err != nil { 49 | t.Fatalf("Failed to read byte %d of %d: %s", i+1, count, err) 50 | } 51 | if buf[0] != 0 { 52 | t.Fatalf("Non-zero value %d found", buf[0]) 53 | } 54 | } 55 | }) 56 | 57 | t.Run("buffered", func(t *testing.T) { 58 | rdr := bufio.NewReader(strms.NewReadAtReader(dev)) 59 | for i := uint(0); i < count; i++ { 60 | actual, err := rdr.ReadByte() 61 | if err != nil { 62 | t.Fatalf("Failed to read zero byte: %s", err) 63 | } 64 | if actual != 0 { 65 | t.Fatalf("Wrong value %d found, expecting %d", actual, 0) 66 | } 67 | } 68 | }) 69 | 70 | t.Run("misaligned", func(t *testing.T) { 71 | if boundaryBytes < 1 { 72 | t.Skipf("No alignment boundary for test") 73 | } 74 | if count < boundaryBytes*2 { 75 | t.Skipf("Two segments worth of total capacity required for test") 76 | } 77 | 78 | t.Run("two-segments", func(t *testing.T) { 79 | twoSegSize := boundaryBytes * 2 80 | buf := make([]byte, twoSegSize) 81 | n, err := dev.ReadAt(buf, 0) 82 | switch { 83 | case err != nil: 84 | t.Fatalf("Failed two read two segments: %s", err) 85 | case uint(n) != twoSegSize: 86 | t.Fatalf("Read returned %d bytes rather than two segments (%d bytes)", n, twoSegSize) 87 | } 88 | for _, byte := range buf { 89 | if byte != 0 { 90 | t.Fatalf("Expected all zeros, found %d", byte) 91 | } 92 | } 93 | }) 94 | 95 | t.Run("span-segments", func(t *testing.T) { 96 | buf := make([]byte, boundaryBytes) 97 | n, err := dev.ReadAt(buf, int64(boundaryBytes/2)) 98 | switch { 99 | case err != nil: 100 | t.Fatalf("Failed two read spaning two segments: %s", err) 101 | case uint(n) != boundaryBytes: 102 | t.Fatalf("Read returned %d bytes rather than two segments (%d bytes)", n, boundaryBytes) 103 | } 104 | for _, byte := range buf { 105 | if byte != 0 { 106 | t.Fatalf("Expected all zeros, found %d", byte) 107 | } 108 | } 109 | }) 110 | }) 111 | }) 112 | } 113 | 114 | //TestWriteReadPattern writes and reads a pattern to the provided device 115 | func TestWriteReadPattern(t *testing.T, dev usbdlib.Device) (writtenHash []byte) { 116 | count := uint(dev.Size()) 117 | 118 | t.Run("write-read-pattern", func(t *testing.T) { 119 | t.Run("unbuffered", func(t *testing.T) { 120 | count := unbufCount(count) 121 | buf, wtr := make([]byte, 1), strms.NewWriteAtWriter(dev) 122 | for i := uint(0); i < count; i++ { 123 | writeVal := byte(i % 256) 124 | if _, err := wtr.Write([]byte{writeVal}); err != nil { 125 | t.Fatalf("Failed to write byte %d of %d: %s", i+1, count, err) 126 | } 127 | if _, err := dev.ReadAt(buf, int64(i)); err != nil { 128 | t.Fatalf("Failed to read byte just written: %s", err) 129 | } 130 | if buf[0] != writeVal { 131 | t.Fatalf("Wrong value %d found at pos %d, expecting %d", buf[0], i, writeVal) 132 | } 133 | } 134 | if err := dev.Flush(); err != nil { 135 | t.Fatalf("Failed to flush device: %s", err) 136 | } 137 | }) 138 | 139 | t.Run("buffered", func(t *testing.T) { 140 | const offset = 16 141 | 142 | getVal := func(idx uint) byte { 143 | if (idx/4096)%2 == 0 { 144 | return byte((idx + offset) % 256) 145 | } 146 | return 0 147 | } 148 | 149 | t.Run("write", func(t *testing.T) { 150 | hashWtr := sha256.New() 151 | wtr := bufio.NewWriter(io.MultiWriter(strms.NewWriteAtWriter(dev), hashWtr)) 152 | for i := uint(0); i < count; i++ { 153 | if err := wtr.WriteByte(getVal(i)); err != nil { 154 | t.Fatalf("Failed to write byte %d of %d: %s", i+1, count, err) 155 | } 156 | } 157 | 158 | t.Run("flush", func(t *testing.T) { 159 | t.Run("buffer", func(t *testing.T) { 160 | if err := wtr.Flush(); err != nil { 161 | t.Fatalf("Failed to flush buffered writer: %s", err) 162 | } 163 | }) 164 | writtenHash = hashWtr.Sum(nil) 165 | 166 | t.Run("device", func(t *testing.T) { 167 | if err := dev.Flush(); err != nil { 168 | t.Fatalf("Failed to flush device: %s", err) 169 | } 170 | }) 171 | }) 172 | }) 173 | 174 | t.Run("read", func(t *testing.T) { 175 | t.Run("read-bytes", func(t *testing.T) { 176 | rdr := bufio.NewReader(strms.NewReadAtReader(dev)) 177 | for i := uint(0); true; i++ { 178 | actual, err := rdr.ReadByte() 179 | if err != nil { 180 | if err == io.EOF && i == count { 181 | break 182 | } 183 | t.Fatalf("Failed to read byte index %d of %d just written: %s", i, count, err) 184 | } 185 | if expected := getVal(i); actual != expected { 186 | t.Fatalf("Wrong value %d found, expecting %d", actual, expected) 187 | } 188 | } 189 | }) 190 | 191 | TestReadHash(t, dev, writtenHash) 192 | }) 193 | }) 194 | }) 195 | 196 | return writtenHash 197 | } 198 | 199 | //TestReadHash confirms the SHA of the provided device matches the expected SHA 200 | func TestReadHash(t *testing.T, dev usbdlib.Device, expectedHash []byte) { 201 | t.Run("read-hash", func(t *testing.T) { 202 | hashWtr := sha256.New() 203 | if n, err := io.Copy(hashWtr, bufio.NewReader(strms.NewReadAtReader(dev))); err != nil { 204 | t.Fatalf("Failed to calculate SHA256 of device: %s", err) 205 | } else if devSize := dev.Size(); n != devSize { 206 | t.Fatalf("While calculating SHA256 of device %d bytes were found instead of %d", n, devSize) 207 | } else if readHash := hashWtr.Sum(nil); !bytes.Equal(expectedHash, readHash) { 208 | t.Fatalf("SHA256 written to device was %s but %s was expected", hex.EncodeToString(expectedHash), hex.EncodeToString(readHash)) 209 | } 210 | }) 211 | } 212 | 213 | //TestClose confirms the device close without error and subsequent operations fail as expected 214 | func TestClose(t *testing.T, dev usbdlib.Device) { 215 | t.Run("close", func(t *testing.T) { 216 | if err := dev.Close(); err != nil { 217 | t.Fatalf("Failed to close device: %s", err) 218 | } 219 | 220 | buf := make([]byte, 1) 221 | if _, err := dev.ReadAt(buf, 0); err == nil { 222 | t.Fatal("Expected error during read on closed device") 223 | } 224 | if _, err := dev.WriteAt(buf, 0); err == nil { 225 | t.Fatal("Expected error during write on closed device") 226 | } 227 | if err := dev.Flush(); err == nil { 228 | t.Fatal("Expected error during flush on closed device") 229 | } 230 | }) 231 | } 232 | 233 | func unbufCount(count uint) uint { 234 | const ThreeMB = 3 * 1024 * 1024 235 | unbufCount := count / 3 236 | if unbufCount > ThreeMB { 237 | unbufCount = ThreeMB 238 | } 239 | return unbufCount 240 | } 241 | -------------------------------------------------------------------------------- /pkg/usbdlib/nbdstrm.go: -------------------------------------------------------------------------------- 1 | package usbdlib 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "net" 8 | "os" 9 | "strings" 10 | "sync" 11 | "syscall" 12 | ) 13 | 14 | //DefMaxNBDDevices if the NBD Linux kernel module is loaded and the user does 15 | // not provide the number of NBD devices to allocate, this many are created. 16 | // Important: The NBD kernel module does not support dynamic device creation after 17 | // load via udev or mknod! 18 | const DefMaxNBDDevices = 32 19 | 20 | //NbdStream manages the kernel-space resources that are associated with a network block device (NBD) 21 | type NbdStream struct { 22 | dev Device 23 | net.Conn 24 | ctx context.Context 25 | ctxCancel context.CancelFunc 26 | errCh chan error 27 | } 28 | 29 | //NewNbdHandler contructs a new NbdStream instance that handles requests for the 30 | // provided Device. 31 | func NewNbdHandler(ctx context.Context, dev Device, options ...interface{}) (*NbdStream, string, error) { 32 | var ( 33 | blockDeviceName string 34 | err error 35 | ) 36 | 37 | maxNBDDevices := uint(DefMaxNBDDevices) 38 | if len(options) == 1 { //Args is a single arg telling us the max devices to create on nbd load 39 | var isInt bool 40 | if maxNBDDevices, isInt = options[0].(uint); isInt { 41 | options = nil 42 | } 43 | } 44 | 45 | if len(options) > 0 { //Args are one or more device names to try 46 | devNamesTried := make([]string, 0, len(options)) 47 | var devName string 48 | for _, opt := range options { 49 | var isString bool 50 | devName, isString = opt.(string) 51 | 52 | if !isString { 53 | continue 54 | } else if err = useNbdDev(devName); err != nil { 55 | devNamesTried = append(devNamesTried, devName) 56 | continue 57 | } 58 | 59 | blockDeviceName = devName 60 | break 61 | } 62 | if blockDeviceName == "" { 63 | return nil, "", fmt.Errorf("Could not use any of provided device names: %s. Error on %q: %w", strings.Join(devNamesTried, ","), devName, err) 64 | } 65 | } else { 66 | blockDeviceName, err = makeNbdDev(maxNBDDevices) 67 | if err != nil { 68 | return nil, "", fmt.Errorf("Could not create NDB: %w", err) 69 | } 70 | } 71 | 72 | strm, err := newNbdStream(ctx, dev, blockDeviceName) 73 | return strm, blockDeviceName, err 74 | } 75 | 76 | func newNbdStream(ctx context.Context, dev Device, blockDeviceName string) (*NbdStream, error) { 77 | devFile, userSockFd, kernelSockFd, err := setupSycalls(blockDeviceName, dev) 78 | if err != nil { 79 | return nil, err 80 | } 81 | 82 | cmdSocket, err := net.FileConn(os.NewFile(uintptr(userSockFd), "")) 83 | if err != nil { 84 | return nil, fmt.Errorf("Could not create command socket") 85 | } 86 | 87 | //Error handling helpers 88 | errCh := make(chan error, 1) 89 | sendErr := func(err error) { 90 | select { 91 | case errCh <- err: 92 | default: 93 | } 94 | } 95 | 96 | ctx, cancel := context.WithCancel(ctx) 97 | 98 | var wgDone sync.WaitGroup 99 | wgDone.Add(2) 100 | nbdDoItWorker := func() { //Finish setup... 101 | var err error 102 | defer func() { 103 | wgDone.Done() 104 | cancel() 105 | }() 106 | 107 | //Ask NBD to begin service, this blocks until NBD service terminates 108 | _, _, err = sysCall(syscall.SYS_IOCTL, devFile.Fd(), nbdDoIt, 0) 109 | if err != nil { 110 | sendErr(fmt.Errorf("NBD \"Do it\" failed; Details: %w", err)) 111 | } 112 | } 113 | 114 | var wgRunning sync.WaitGroup 115 | wgRunning.Add(1) 116 | scanPartTableThenCleanupWorker := func() { 117 | var err error 118 | defer func() { 119 | wgDone.Done() 120 | wgDone.Wait() 121 | 122 | //Cleanup 123 | if err = devFile.Close(); err != nil { 124 | sendErr(fmt.Errorf("NBD device file close failed; Details: %w", err)) 125 | } 126 | if err = dev.Close(); err != nil && !errors.Is(err, context.Canceled) { 127 | sendErr(fmt.Errorf("NBD user space device close failed; Details: %w", err)) 128 | } 129 | if err = syscall.Close(kernelSockFd); err != nil { 130 | sendErr(fmt.Errorf("kernel side socket close failed; Details: %w", err)) 131 | } 132 | if err = syscall.Close(userSockFd); err != nil && !strings.Contains(err.Error(), "bad file descriptor") { 133 | //This failing with bad desc is normal if ndbClearSock did its job 134 | sendErr(fmt.Errorf("user side socket close failed; Details: %w", err)) 135 | } 136 | close(errCh) //unblock Close() 137 | }() 138 | 139 | //We need to open the device again to ensure the OS rescans the partition table 140 | if tmp, err := os.OpenFile(blockDeviceName, os.O_RDONLY, 0); err != nil { 141 | sendErr(fmt.Errorf("NBD %q partition scan open failed; Details: %w", blockDeviceName, err)) 142 | wgRunning.Done() 143 | return 144 | } else if err = tmp.Close(); err != nil { 145 | sendErr(fmt.Errorf("NBD %q partition scan close failed; Details: %w", blockDeviceName, err)) 146 | wgRunning.Done() 147 | return 148 | } 149 | 150 | wgRunning.Done() 151 | <-ctx.Done() 152 | //Disconnect and reset NBD driver state 153 | if _, _, err = sysCall(syscall.SYS_IOCTL, devFile.Fd(), nbdClearQueue, 0); err != nil { 154 | sendErr(fmt.Errorf("NBD queue clear failed; Details: %w", err)) 155 | } 156 | if _, _, err = sysCall(syscall.SYS_IOCTL, devFile.Fd(), ndbDisconnect, 0); err != nil { 157 | sendErr(fmt.Errorf("NBD disconnect failed; Details: %w", err)) 158 | } 159 | if _, _, err = sysCall(syscall.SYS_IOCTL, devFile.Fd(), ndbClearSock, 0); err != nil { 160 | sendErr(fmt.Errorf("NBD socket clear failed; Details: %w", err)) 161 | } 162 | } 163 | 164 | //Kick things off 165 | go nbdDoItWorker() 166 | go scanPartTableThenCleanupWorker() 167 | wgRunning.Wait() 168 | 169 | //Check for errors so far 170 | select { 171 | case err = <-errCh: 172 | if err != nil { 173 | cancel() //Abort! 174 | return nil, err 175 | } 176 | default: 177 | } 178 | 179 | return &NbdStream{ 180 | dev: dev, 181 | Conn: cmdSocket, 182 | ctx: ctx, 183 | ctxCancel: cancel, 184 | errCh: errCh, 185 | }, nil 186 | } 187 | 188 | //Close this NbdStream 189 | func (strm *NbdStream) Close() error { 190 | strm.ctxCancel() 191 | return <-strm.errCh 192 | } 193 | 194 | //ProcessRequests for this NbdStream. Blocks until Close is called, so you may 195 | // want to run this in secondary goroutine. 196 | func (strm *NbdStream) ProcessRequests() error { 197 | return processRequests(context.Background(), strm, strm.dev, RecommendWorkerCount()) 198 | } 199 | 200 | func setupSycalls(blockDeviceName string, dev Device) (*os.File, int, int, error) { 201 | fds, err := syscall.Socketpair(syscall.AF_UNIX, syscall.SOCK_STREAM, 0) 202 | if err != nil { 203 | return nil, 0, 0, fmt.Errorf("Could not create socket pair: %w", err) 204 | } 205 | userSockFd, kernelSockFd := fds[0], uintptr(fds[1]) 206 | 207 | //Open device file we will use to communicate to NDB (via ioctl) 208 | devFile, err := os.OpenFile(blockDeviceName, os.O_RDWR, 0) 209 | if err != nil { 210 | return nil, 0, 0, fmt.Errorf("Could not open NBD device file: %s: %w", blockDeviceName, err) 211 | } 212 | 213 | //Inform NBD of block size of our device 214 | if _, _, err = sysCall(syscall.SYS_IOCTL, devFile.Fd(), nbdSetBlockSize, uintptr(dev.BlockSize())); err != nil { 215 | devFile.Close() 216 | return nil, 0, 0, fmt.Errorf("Could not inform NBD of device block size: %w", err) 217 | } 218 | 219 | //Inform NBD of the size of our device (rounded to our block size) 220 | if _, _, err = sysCall(syscall.SYS_IOCTL, devFile.Fd(), nbdSetSizeBlocks, uintptr(dev.Size()/dev.BlockSize())); err != nil { 221 | devFile.Close() 222 | return nil, 0, 0, fmt.Errorf("Could not inform NBD of the number of blocks on device: %w", err) 223 | } 224 | 225 | //Reset the state of the socket with NBD 226 | if _, _, err = sysCall(syscall.SYS_IOCTL, devFile.Fd(), ndbClearSock, 0); err != nil { 227 | devFile.Close() 228 | return nil, 0, 0, fmt.Errorf("Could not clear NBD socket: %w", err) 229 | } 230 | 231 | //Inform NBD of the socket it should use to talk to us 232 | if _, _, err = sysCall(syscall.SYS_IOCTL, devFile.Fd(), nbdSetSock, kernelSockFd); err != nil { 233 | devFile.Close() 234 | return nil, 0, 0, fmt.Errorf("Could not inform NBD of which socket to use: %w", err) 235 | } 236 | 237 | //Ask NBD to send us trim commands, if supported 238 | if ndbTrimSupported { 239 | if _, _, err = sysCall(syscall.SYS_IOCTL, devFile.Fd(), nbdSetFlags, nbdFlagSendTrim); err != nil { 240 | devFile.Close() 241 | return nil, 0, 0, fmt.Errorf("Could not inform NBD to send TRIM commands: %w", err) 242 | } 243 | } 244 | 245 | return devFile, userSockFd, int(kernelSockFd), nil 246 | } 247 | 248 | func sysCall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err error) { 249 | var errNo syscall.Errno 250 | r1, r2, errNo = syscall.Syscall(trap, a1, a2, a3) 251 | if errNo != 0 { 252 | err = errNo 253 | } 254 | return 255 | } 256 | -------------------------------------------------------------------------------- /pkg/devices/objstore/segment.go: -------------------------------------------------------------------------------- 1 | package objstore 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "io" 7 | "os" 8 | "path/filepath" 9 | "strconv" 10 | "strings" 11 | "sync" 12 | "sync/atomic" 13 | "time" 14 | 15 | "github.com/tarndt/sema" 16 | "github.com/tarndt/usbd/pkg/util" 17 | 18 | "github.com/graymeta/stow" 19 | ) 20 | 21 | type segment struct { 22 | ID int 23 | *storeParams 24 | 25 | localFile *os.File 26 | fileMu sync.RWMutex 27 | 28 | remoteItem stow.Item 29 | itemMu sync.RWMutex 30 | 31 | atomicDirty, atomicBacked uint64 32 | atomicLastReadUnixNano, atomicLastWriteUnixNano int64 33 | } 34 | 35 | //loadSegments is a helper that constructs a slice of segments from items stored 36 | // in a remote object store containers that will be cached on the local file system 37 | // * container stow.Container -> where items are presisted remotely 38 | // * cacheDir string -> local directory files caching remote data 39 | // * {total/segment}Bytes int64 -> bytes per segment and sum of segments 40 | // * thickProvision -> no not use Linux sparse files 41 | // * quotaSema -> counting semaphore for tracking number of segments cached locally 42 | func loadSegments(container stow.Container, cacheDir string, totalBytes, segmentBytes int64, thickProvision, persistCache bool, quotaSema sema.CountingSema) ([]segment, error) { 43 | params := &storeParams{ 44 | container: container, segmentBytes: segmentBytes, cacheDir: cacheDir, 45 | thickProvision: thickProvision, persistCache: persistCache, quotaSema: quotaSema} 46 | prefix := osbdPrefix + devicePrefix + container.Name() + blockPrefix 47 | count := int(totalBytes / segmentBytes) 48 | 49 | segments := make([]segment, count) 50 | for i := range segments { 51 | seg := &segments[i] 52 | seg.ID = i 53 | seg.storeParams = params 54 | } 55 | 56 | items, cursor, err := container.Items(prefix, stow.CursorStart, count+1) 57 | switch { 58 | case err != nil: 59 | return nil, fmt.Errorf("Could not enumerate items in %s: %w", describeContainer(container), err) 60 | case len(items) > count: 61 | return nil, fmt.Errorf("Enumeration of %s revealed more than the expected %d items", describeContainer(container), count) 62 | case !stow.IsCursorEnd(cursor): 63 | return nil, fmt.Errorf("After enumerating the expected %d items the %s indicated more items exist", count, describeContainer(container)) 64 | } 65 | 66 | for _, item := range items { 67 | segIDStr := strings.TrimPrefix(item.Name(), prefix) //Remove X 68 | segIDStr = strings.TrimSuffix(segIDStr, filepath.Ext(segIDStr)) //Remove any file extension 69 | segID, err := strconv.Atoi(segIDStr) 70 | switch { 71 | case err != nil: 72 | return nil, fmt.Errorf("Could not parse %s name into block ID while iterating on items in %s: Parse %q failed: %w", describeItem(item), describeContainer(container), segIDStr, err) 73 | case segID < 0 || segID > count: 74 | return nil, fmt.Errorf("Parsed ID: %d from %s in %s was not in expected range (0 <= %d <= %d)", segID, describeItem(item), describeContainer(container), segID, count) 75 | } 76 | 77 | segments[segID].remoteItem = item 78 | } 79 | 80 | return segments, nil 81 | } 82 | 83 | //ReadAt reads len(buf) bytes from the segment starting at byte offset pos (position). 84 | // It returns the number of bytes read and an error, if any. 85 | func (seg *segment) ReadAt(buf []byte, pos int64) (count int, err error) { 86 | atomic.StoreInt64(&seg.atomicLastReadUnixNano, time.Now().UnixNano()) 87 | 88 | data, unlock, err := seg.loadFile(false) 89 | if err != nil { 90 | return 0, fmt.Errorf("Could not load segment data for reading: %w", err) 91 | } 92 | 93 | //Section is empty, return as as many zeros as min(len(buf), seg bytes after pos) 94 | if data == nil { 95 | unlock() 96 | bufLen := len(buf) 97 | if end := pos + int64(bufLen); end > seg.segmentBytes { 98 | bufLen, err = int(seg.segmentBytes-pos), io.EOF 99 | } 100 | 101 | util.ZeroFill(buf[:bufLen]) 102 | return bufLen, err 103 | } 104 | defer unlock() 105 | 106 | return data.ReadAt(buf, pos) 107 | } 108 | 109 | //WriteAt writes len(buf) bytes to the segment starting at byte offset pos (position). 110 | // It returns the number of bytes written and an error, if any. 111 | func (seg *segment) WriteAt(buf []byte, pos int64) (count int, err error) { 112 | atomic.StoreInt64(&seg.atomicLastWriteUnixNano, time.Now().UnixNano()) 113 | 114 | if util.IsZeros(buf) { //if write is all zeros and segment is empty write is a noop 115 | seg.fileMu.RLock() 116 | data := seg.localFile 117 | seg.fileMu.RUnlock() 118 | if data == nil { 119 | return len(buf), nil 120 | } 121 | } 122 | 123 | data, unlock, err := seg.loadFile(true) 124 | if err != nil { 125 | return 0, fmt.Errorf("Could not load segment data for writing: %w", err) 126 | } 127 | defer unlock() 128 | 129 | count, err = data.WriteAt(buf, pos) 130 | if count > 0 { 131 | atomic.StoreUint64(&seg.atomicDirty, 1) 132 | } 133 | return count, err 134 | } 135 | 136 | //loadFile is a helper that returns the local data, file creating it if needed and desired and returns the file's corresponding access mutex 137 | func (seg *segment) loadFile(createWrite bool) (file *os.File, unlock func(), err error) { 138 | if !createWrite { 139 | seg.fileMu.RLock() 140 | if seg.localFile != nil { 141 | return seg.localFile, seg.fileMu.RUnlock, nil 142 | } 143 | seg.fileMu.RUnlock() 144 | } 145 | 146 | seg.fileMu.Lock() 147 | if seg.localFile != nil { 148 | return seg.localFile, seg.fileMu.Unlock, nil 149 | } 150 | 151 | seg.itemMu.RLock() 152 | if seg.remoteItem == nil { 153 | seg.itemMu.RUnlock() 154 | if createWrite { 155 | seg.localFile, err = seg.storeParams.createFile(strconv.Itoa(seg.ID)) 156 | } else { 157 | return nil, seg.fileMu.Unlock, nil 158 | } 159 | } else { 160 | defer seg.itemMu.RUnlock() 161 | seg.localFile, err = seg.storeParams.downloadFile(seg.remoteItem) 162 | } 163 | 164 | if err != nil { 165 | seg.fileMu.Unlock() 166 | return nil, nil, err 167 | } 168 | atomic.StoreUint64(&seg.atomicBacked, 1) 169 | return seg.localFile, seg.fileMu.Unlock, nil 170 | } 171 | 172 | //Flush will persist a dirty segment to the backing store. If a optional buffer 173 | // is provided the segment will be copied into memory and unblock writes to proceed 174 | // concurrently with any upload 175 | func (seg *segment) Flush(optBuf *bytes.Buffer) error { 176 | if !seg.Dirty() { 177 | return nil 178 | } 179 | 180 | seg.fileMu.RLock() 181 | return seg.flush(optBuf, seg.fileMu.RUnlock) 182 | } 183 | 184 | //flush is the internal flush implementation, the unlock method of the file.(R)Lock acquired 185 | // can be passed to allow buffered uploads to unblock early 186 | func (seg *segment) flush(optBuf *bytes.Buffer, optEarlyUnlock func()) (err error) { 187 | if seg.localFile == nil { 188 | if optEarlyUnlock != nil { 189 | optEarlyUnlock() 190 | } 191 | return fmt.Errorf("Segment found to be dirty during flush request but is missing a backing data file") 192 | } 193 | 194 | seg.itemMu.Lock() 195 | defer seg.itemMu.Unlock() 196 | 197 | atomic.StoreUint64(&seg.atomicDirty, 0) //mark clean so if somone writes during upload we revert to dirty 198 | seg.remoteItem, err = seg.storeParams.syncFile(seg.localFile, strconv.Itoa(seg.ID), optBuf, optEarlyUnlock) 199 | 200 | return err 201 | } 202 | 203 | //DeleteFile persists the segment to remote storage and removes the local data 204 | func (seg *segment) DeleteFile() (err error) { 205 | seg.fileMu.Lock() 206 | defer seg.fileMu.Unlock() 207 | 208 | if !seg.Backed() { 209 | return nil 210 | } 211 | 212 | if seg.Dirty() { 213 | if err = seg.flush(nil, nil); err != nil { 214 | return fmt.Errorf("Flush of %q during DeleteFile failed: %w", seg.localFile.Name(), err) 215 | } 216 | } 217 | 218 | if err = seg.storeParams.removeFile(seg.localFile); err != nil { 219 | return fmt.Errorf("Could not remove local file %q: %w", seg.localFile.Name(), err) 220 | } 221 | atomic.StoreUint64(&seg.atomicBacked, 0) 222 | seg.localFile = nil 223 | seg.releaseCapacity() 224 | 225 | return nil 226 | } 227 | 228 | //Drity returns if the segment has local writes not committed to the remote store 229 | func (seg *segment) Dirty() bool { 230 | return atomic.LoadUint64(&seg.atomicDirty) == 1 231 | } 232 | 233 | //Drity returns if the segment has a locally stored data 234 | func (seg *segment) Backed() bool { 235 | return atomic.LoadUint64(&seg.atomicBacked) == 1 236 | } 237 | 238 | //LastOp returns the last time a read or write operation took place on this segment 239 | func (seg *segment) LastOp() time.Time { 240 | lastWrite, lastRead := seg.LastWrite(), seg.LastRead() 241 | if lastRead.Before(lastWrite) { 242 | return lastRead 243 | } 244 | return lastWrite 245 | } 246 | 247 | //LastRead returns the last time a read operation took place on this segment 248 | func (seg *segment) LastRead() time.Time { 249 | return atomicLoadTime(&seg.atomicLastReadUnixNano) 250 | } 251 | 252 | //LastWrite returns the last time a read operation took place on this segment 253 | func (seg *segment) LastWrite() time.Time { 254 | return atomicLoadTime(&seg.atomicLastWriteUnixNano) 255 | } 256 | 257 | //atomicLoadTime is a helper to atomicly load a nanosecond counter field and 258 | // convert it to a time.Time 259 | func atomicLoadTime(atomicNanos *int64) time.Time { 260 | nsec := atomic.LoadInt64(atomicNanos) 261 | if nsec < 1 { 262 | return time.Time{} 263 | } 264 | return time.Unix(0, nsec) 265 | } 266 | -------------------------------------------------------------------------------- /cmd/usbdsrvd/conf/parseargs.go: -------------------------------------------------------------------------------- 1 | package conf 2 | 3 | import ( 4 | "encoding/json" 5 | "errors" 6 | "flag" 7 | "fmt" 8 | "log" 9 | "os" 10 | "path/filepath" 11 | "runtime" 12 | "strings" 13 | "time" 14 | 15 | "github.com/tarndt/usbd/pkg/devices/objstore" 16 | "github.com/tarndt/usbd/pkg/devices/objstore/compress" 17 | "github.com/tarndt/usbd/pkg/devices/objstore/encrypt" 18 | "github.com/tarndt/usbd/pkg/usbdlib" 19 | 20 | "github.com/dustin/go-humanize" 21 | "github.com/graymeta/stow" 22 | "github.com/graymeta/stow/s3" 23 | ) 24 | 25 | const ( 26 | defKeyFile = "key.aes" 27 | oneGiB = 1024 * 1024 * 1024 28 | defStoreSize = oneGiB 29 | defObjectSize = 64 * 1024 * 1024 30 | ) 31 | 32 | //MustGetConfig successful reads configuration from command-line arguments and 33 | // creates a Config or it exits with feedback for the invoking user 34 | func MustGetConfig() *Config { 35 | 36 | var devKind string 37 | var help bool 38 | cfg := new(Config) 39 | 40 | //General options 41 | flag.StringVar(&devKind, "dev-type", "mem", "Type of device to back block device with: 'mem', 'file', 'dedup', 'objstore'.") 42 | flag.UintVar(&cfg.NBDDevCount, "nbd-max-devs", usbdlib.DefMaxNBDDevices, "If the NBD kernel module is loaded by this deamon how many NBD devices should it create") 43 | flag.StringVar(&cfg.StorageDirectory, "store-dir", "./", "Location to create new backing disk files in") 44 | flag.StringVar(&cfg.StorageName, "store-name", "test-lun", "File base name to use for new backing disk files") 45 | flagCapacityVar(&cfg.StorageBytes, "store-size", defStoreSize, "Amount of storage capcity to use for new backing files (ex. 100 MiB, 20 GiB)") 46 | flag.BoolVar(&help, "help", false, "Display help and exit") 47 | 48 | //Device type specific options 49 | 50 | //Dedup 51 | var IDStoreMemoryCache string 52 | flag.StringVar(&IDStoreMemoryCache, "dedup-memcache", "512 MiB", "Amount of memory to the dedup store ID cache (ex. 100 MiB, 20 GiB)") 53 | 54 | //Objectstore 55 | var objStoreConfigJSON, AESMode, AESKey, Compress string 56 | recAESMode := encrypt.ModeFromName(encrypt.ModeAESRecName).AlgoName() 57 | flagCapacityVar(&cfg.ObjStoreConfig.LocalDiskCacheBytes, "objstore-diskcache", 0, "Amount of disk for caching remote objects (0 implies local fullbacking/caching or ex. 100 MiB, 20 GiB)") 58 | flag.BoolVar(&cfg.ObjStoreConfig.PersistCache, "objstore-persistcache", false, "Should the local cache be persistent or deleted on device shutdown") 59 | flag.StringVar(&cfg.ObjStoreConfig.Kind, "objstore-kind", s3.Kind, "Type of remote objectstore: 's3', 'b2', 'local', 'azure', 'swift', 'google', 'oracle' or 'sftp'") 60 | flagCapacityVar(&cfg.ObjStoreConfig.ObjectBytes, "objstore-objsize", defObjectSize, "Size of remote objects (ex. 32 MiB, 1 GiB)") 61 | flag.StringVar(&objStoreConfigJSON, "objstore-cfg", mustGetDefObjStoreParams(), "JSON configuration (default assumes local minio [kind \"s3\"] with default settings)") 62 | flag.StringVar(&AESMode, "objstore-aesmode", recAESMode, 63 | fmt.Sprintf("AES encryption mode to use to encrypt remote objects: %q, %q, %q or %q for no encryption. %q is recommended.", 64 | encrypt.ModeAESCFBName, encrypt.ModeAESCTRName, encrypt.ModeAESOFBName, encrypt.ModeIdentityName, recAESMode), 65 | ) 66 | flag.StringVar(&AESKey, "objstore-aeskey", "", "If AES is enabled; AES key to use to encrypt remote objects (if absent a key is generated and saved to ./"+defKeyFile+", otherwise use: key:, file:, env:") 67 | flag.StringVar(&Compress, "objstore-compress", compress.ModeS2Name, 68 | fmt.Sprintf("Compression algorithm to use for remote objects: %q, %q or %q for no compression)", 69 | compress.ModeS2Name, compress.ModeGzipName, compress.ModeIdentityName), 70 | ) 71 | flag.UintVar(&cfg.ObjStoreConfig.ConcurFlush, "objstore-concurflush", 0, "Maximum number of dirty local objects to concurrently upload to the remote objectstore (0 implies use heuristic)") 72 | flag.DurationVar(&cfg.ObjStoreConfig.FlushInterval, "objstore-flushevery", time.Second*10, "Frequency in which dirty local objects are uploaded to the remote objectstore (0 disables autoflush)") 73 | 74 | //Process args set 75 | flag.Parse() 76 | 77 | if help { 78 | fmt.Printf("Usage: %s [optional: options see below...] [optional: NBD device to use ex. /dev/nbd0, if absent the first free device is used.]\n"+ 79 | "Arguments starting with -X are only applicable if dev-type=X is being set.\n"+ 80 | "\tExample:\n"+"\t\t1 GiB device backed with by memory: ./usbdsrvd\n"+ 81 | "\t\t8 GiB device backed by a file and exported specifically on /dev/nbd5: ./usbdsrvd -dev-type=file -store-dir=/tmp -store-name=testfilevol -store-size=8GiB /dev/nbd5\n"+ 82 | "\t\t12 GiB device backed by file deduplicated using PebbleDB: ./usbdsrvd -dev-type=file -store-dir=/tmp -store-name=testdedupvol -store-size=12GiB\n"+ 83 | "\t\t20 GiB device backed by a locally running S3/minio objectstore: ./usbdsrvd -dev-type=objstore -store-dir=/tmp -store-name=testobjvol -store-size=20GiB\n\n", os.Args[0]) 84 | flag.PrintDefaults() 85 | os.Exit(0) 86 | } 87 | 88 | cfg.NBDDevName = flag.Arg(0) 89 | 90 | if cfg.BackingMode = NewBackingDevice(devKind); cfg.BackingMode == DevUnknown { 91 | log.Fatalf("Bad argument: Unknown backing device type of: %q", devKind) 92 | } 93 | 94 | if cfg.StorageName == "" { 95 | log.Fatalf("No volume name was provided (use -store-name=X)") 96 | } 97 | 98 | if cfg.BackingMode != DevMem { 99 | if cfg.StorageDirectory == "" { 100 | log.Fatalf("No storage directory was provided (use -store-dir=X)") 101 | } 102 | var err error 103 | if cfg.StorageDirectory, err = filepath.Abs(cfg.StorageDirectory); err != nil { 104 | log.Fatalf("Could not resolve storage directory (-store-dir=%q) to an absolute path: %s", cfg.StorageDirectory, err) 105 | } else if fstat, err := os.Stat(cfg.StorageDirectory); err != nil { 106 | if errors.Is(err, os.ErrNotExist) { 107 | log.Fatalf("Provided storage directory (-store-dir=%q) does not exist", cfg.StorageDirectory) 108 | } 109 | log.Fatalf("Provided storage directory (-store-dir=%q) could not be be accessed", cfg.StorageDirectory) 110 | } else if !fstat.IsDir() { 111 | log.Fatalf("Provided storage directory (-store-dir=%q) is not a directory", cfg.StorageDirectory) 112 | } 113 | } 114 | 115 | switch cfg.BackingMode { 116 | case DevDedupFile: 117 | if IDStoreMemoryCache == "" { 118 | log.Fatalf("No memory quota for the dedup store ID cache was provided (use -dedup-memcache=X)") 119 | } else if bytes, err := humanize.ParseBytes(IDStoreMemoryCache); err != nil { 120 | log.Fatalf("Could not parse provided capacity for dedup store ID cache: %q: %s", IDStoreMemoryCache, err) 121 | } else { 122 | cfg.DedupConfig.IDStoreMemoryCacheBytes = int64(bytes) 123 | } 124 | 125 | case DevObjStore: 126 | mustGetObjStoreConfig(cfg, objStoreConfigJSON, AESMode, AESKey, Compress) 127 | } 128 | return cfg 129 | } 130 | 131 | func mustGetObjStoreConfig(cfg *Config, objStoreConfigJSON, AESMode, AESKey, Compress string) { 132 | switch strings.ToLower(cfg.ObjStoreConfig.Kind) { 133 | case "s3", "b2", "local", "azure", "swift", "google", "oracle", "sftp": 134 | case "": 135 | log.Fatalf("An objectstore kind must be provided (-objstore-kind=X)") 136 | default: 137 | log.Fatalf("Unknown objectstore kind was provided: %q", cfg.ObjStoreConfig.Kind) 138 | } 139 | 140 | if objStoreConfigJSON == "" { 141 | log.Fatalf("No JSON configuration was provided for remote objectstore (use -objstore-cfg=JSON)") 142 | } 143 | cfg.ObjStoreConfig.Config = make(stow.ConfigMap) 144 | if err := json.Unmarshal([]byte(objStoreConfigJSON), &cfg.ObjStoreConfig.Config); err != nil { 145 | log.Fatalf("Provided JSON configuration for remote objectstore could not be parsed: %s", err) 146 | } 147 | if err := objstore.ValidateConfig(cfg.ObjStoreConfig.Kind, cfg.ObjStoreConfig.Config); err != nil { 148 | log.Fatalf("Provided configuration for remote objectstore was not valid: %s", err) 149 | } 150 | 151 | if AESMode != encrypt.ModeIdentityName { 152 | if cfg.ObjStoreConfig.AESMode = encrypt.ModeFromName(AESMode); cfg.ObjStoreConfig.AESMode == encrypt.ModeUnknown { 153 | log.Fatalf("Unknown AES mode %q was provided", AESMode) 154 | } 155 | 156 | var err error 157 | if AESKey == "" { 158 | keyFile := filepath.Join(cfg.StorageDirectory, defKeyFile) 159 | if cfg.ObjStoreConfig.AESKey, err = os.ReadFile(keyFile); err != nil { 160 | if !errors.Is(err, os.ErrNotExist) { 161 | log.Fatalf("Could not read AES key file %q: %s", keyFile, err) 162 | } 163 | if cfg.ObjStoreConfig.AESKey, err = encrypt.MakeRandomAESKey(); err != nil { 164 | log.Fatalf("Could not create AES key: %s", err) 165 | } 166 | if err = os.WriteFile(keyFile, cfg.ObjStoreConfig.AESKey, 0666); err != nil { 167 | log.Fatalf("Could not write AES key to file: %q: %s", keyFile, err) 168 | } 169 | log.Printf("Generated AES-256 key and stored it in %q", keyFile) 170 | } 171 | } else { 172 | switch { 173 | case strings.HasPrefix(AESKey, "file:"): 174 | AESKey = strings.TrimPrefix(AESKey, "file:") 175 | if cfg.ObjStoreConfig.AESKey, err = os.ReadFile(AESKey); err != nil { 176 | log.Fatalf("Could not read AES key file %q: %s", AESKey, err) 177 | } 178 | 179 | case strings.HasPrefix(AESKey, "key:"): 180 | cfg.ObjStoreConfig.AESKey = []byte(strings.TrimPrefix(AESKey, "key:")) 181 | 182 | case strings.HasPrefix(AESKey, "env:"): 183 | AESKey = strings.TrimPrefix(AESKey, "env:") 184 | if envVal, exists := os.LookupEnv(AESKey); !exists { 185 | log.Fatalf("Could not read AES key from non-existent environment variable %q", AESKey) 186 | } else { 187 | cfg.ObjStoreConfig.AESKey = []byte(envVal) 188 | } 189 | 190 | default: 191 | log.Fatalf("AES key source %q is not valid", AESKey) 192 | } 193 | if err = encrypt.ValidAESKey(cfg.ObjStoreConfig.AESKey); err != nil { 194 | log.Fatalf("Could not validate provided AES key: %s", err) 195 | } 196 | } 197 | } 198 | 199 | if cfg.ObjStoreConfig.CompressMode = compress.ModeFromName(Compress); cfg.ObjStoreConfig.CompressMode == compress.ModeUnknown { 200 | log.Fatalf("Unknown compression mode %q was provided", Compress) 201 | } 202 | 203 | if cfg.ObjStoreConfig.ConcurFlush == 0 { 204 | cfg.ObjStoreConfig.ConcurFlush = recConcurFlush(int64(cfg.ObjStoreConfig.ObjectBytes)) 205 | } 206 | } 207 | 208 | func mustGetDefObjStoreParams() string { 209 | cfg := stow.ConfigMap{ 210 | s3.ConfigEndpoint: "http://127.0.0.1:9000", 211 | s3.ConfigAccessKeyID: "minioadmin", 212 | s3.ConfigSecretKey: "minioadmin", 213 | } 214 | 215 | JSON, err := json.Marshal(cfg) 216 | if err != nil { 217 | log.Fatalf("Could not marshal default object store JSON config: %s", err) 218 | } 219 | 220 | return string(JSON) 221 | } 222 | 223 | func recConcurFlush(objsize int64) uint { 224 | const budgetBytes = 1024 * 1024 * 1024 //1 GiB 225 | rec := runtime.NumCPU() 226 | 227 | if int64(rec)*objsize > budgetBytes { 228 | rec = int(budgetBytes / objsize) 229 | } 230 | if rec < 1 { 231 | rec = 1 232 | } 233 | return uint(rec) 234 | } 235 | -------------------------------------------------------------------------------- /pkg/devices/objstore/device.go: -------------------------------------------------------------------------------- 1 | package objstore 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "errors" 7 | "fmt" 8 | "io" 9 | "sync" 10 | "sync/atomic" 11 | "time" 12 | 13 | "github.com/tarndt/sema" 14 | "github.com/tarndt/usbd/pkg/devices/objstore/compress" 15 | "github.com/tarndt/usbd/pkg/devices/objstore/encrypt" 16 | "github.com/tarndt/usbd/pkg/usbdlib" 17 | 18 | "github.com/dustin/go-humanize" 19 | "github.com/graymeta/stow" 20 | ) 21 | 22 | type device struct { 23 | usbdlib.DefaultBlockSize 24 | 25 | ctx context.Context 26 | ctxCancel context.CancelFunc 27 | container stow.Container 28 | totalBytes, segmentBytes int64 29 | segments []segment 30 | quotaSegSema sema.CountingSema 31 | pendingOpMu sync.RWMutex 32 | 33 | //options 34 | autoflush time.Duration 35 | concurFlush uint 36 | 37 | compressMode compress.Mode 38 | quotaBytes int64 39 | 40 | noMetadata, thickProvision, persistCache bool 41 | 42 | encryptMode encrypt.Mode 43 | encryptKey []byte 44 | } 45 | 46 | var _ = (*device)(nil) 47 | 48 | //NewDevice is the constructor for ObjectStore backed devices 49 | func NewDevice(ctx context.Context, container stow.Container, cacheDir string, totalBytes, objectBytes uint, options ...Option) (usbdlib.Device, error) { 50 | switch { 51 | case ctx == nil: 52 | return nil, fmt.Errorf("Provided context was nil") 53 | case container == nil: 54 | return nil, fmt.Errorf("Provided container was nil") 55 | case objectBytes > totalBytes: 56 | return nil, fmt.Errorf("Provided object size (%s) must be not exceeds provided total size (%s)", humanize.IBytes(uint64(objectBytes)), humanize.IBytes(uint64(totalBytes))) 57 | } 58 | 59 | ctx, cancel := context.WithCancel(ctx) 60 | dev := &device{ 61 | ctx: ctx, 62 | ctxCancel: cancel, 63 | container: container, 64 | totalBytes: int64(totalBytes), 65 | segmentBytes: int64(objectBytes), 66 | } 67 | 68 | err := dev.applyOpts(options...) 69 | if err != nil { 70 | return nil, fmt.Errorf("Could not apply configuration options: %w", err) 71 | } 72 | 73 | dev.segments, err = loadSegments(dev.container, cacheDir, dev.totalBytes, dev.segmentBytes, dev.thickProvision, dev.persistCache, dev.quotaSegSema) 74 | if err != nil { 75 | cancel() 76 | return nil, fmt.Errorf("Could not create device using %s: %w", describeContainer(dev.container), err) 77 | } 78 | 79 | return dev, nil 80 | } 81 | 82 | func (dev *device) applyOpts(options ...Option) error { 83 | for _, opt := range options { 84 | opt.apply(dev) 85 | } 86 | 87 | if dev.noMetadata { 88 | if dev.compressMode != compress.ModeIdentity { 89 | return fmt.Errorf("Backing store does not support metadata, but %s store compression is enabled", dev.compressMode) 90 | } else if dev.encryptMode != encrypt.ModeIdentity { 91 | return fmt.Errorf("Backing store does not support metadata, but %s store encryption is enabled", dev.encryptMode) 92 | } 93 | } else { 94 | if dev.encryptMode != encrypt.ModeIdentity { 95 | if len(dev.encryptKey) < 1 { 96 | return fmt.Errorf("%s store encryption is enabled but no key was provided", dev.encryptMode) 97 | } 98 | if err := encrypt.ValidAESKey(dev.encryptKey); err != nil { 99 | return fmt.Errorf("Invalid AES key provided: %w", err) 100 | } 101 | dev.container = encrypt.NewEncryptedContainer(dev.container, dev.encryptMode, dev.encryptKey) 102 | } 103 | dev.container = compress.NewCompressedContainer(dev.container, dev.compressMode) 104 | } 105 | 106 | if dev.quotaBytes > 0 { 107 | if dev.quotaBytes < dev.segmentBytes { 108 | return fmt.Errorf("Provided quota (%d bytes) was smaller than a single segment/object (%d bytes)", dev.quotaBytes, dev.segmentBytes) 109 | } 110 | if dev.quotaBytes < dev.totalBytes { 111 | dev.quotaSegSema = sema.NewChanSemaTimeout(uint(dev.quotaBytes/dev.segmentBytes), 0) 112 | } 113 | } 114 | 115 | if dev.concurFlush < 1 { 116 | dev.concurFlush = 1 117 | } 118 | 119 | if dev.autoflush > 0 { 120 | go dev.flushWorker() 121 | } 122 | 123 | return nil 124 | } 125 | 126 | //Size of this device in bytes 127 | func (dev *device) Size() int64 { 128 | return dev.totalBytes 129 | } 130 | 131 | //ReadAt fufills io.ReaderAt and in turn part of usbdlib.Device 132 | func (dev *device) ReadAt(buf []byte, pos int64) (count int, err error) { 133 | return dev.ioAt(true, buf, pos) 134 | } 135 | 136 | //WriteAt fufills io.WriterAt and in turn part of usbdlib.Device 137 | func (dev *device) WriteAt(buf []byte, pos int64) (count int, err error) { 138 | return dev.ioAt(false, buf, pos) 139 | } 140 | 141 | func (dev *device) ioAt(readOp bool, buf []byte, pos int64) (count int, err error) { 142 | dev.pendingOpMu.RLock() 143 | defer dev.pendingOpMu.RUnlock() 144 | if err := dev.ctx.Err(); err != nil { 145 | return 0, fmt.Errorf("Device is shutdown: %w", err) 146 | } 147 | 148 | if pos > dev.totalBytes { 149 | return 0, io.EOF 150 | } 151 | 152 | segID, maxSegID := pos/dev.segmentBytes, int64(len(dev.segments)) 153 | pos -= segID * dev.segmentBytes 154 | 155 | totalRead := 0 156 | remaining := len(buf) 157 | 158 | for remaining > 0 { 159 | if segID >= maxSegID { 160 | return totalRead, io.EOF 161 | } 162 | 163 | segBuf := buf[totalRead:] 164 | if maxWrite := int(dev.segmentBytes - pos); remaining > maxWrite { 165 | segBuf = segBuf[:maxWrite] 166 | } 167 | 168 | const maxCapTries = 10 169 | capTries := 0 170 | for { 171 | if readOp { 172 | count, err = dev.segments[segID].ReadAt(segBuf, pos) 173 | } else { 174 | count, err = dev.segments[segID].WriteAt(segBuf, pos) 175 | } 176 | if err != nil { 177 | if errors.Is(err, errCapacityClaim) && capTries < maxCapTries { 178 | if _, err = dev.removeLeastRecentUsed(int(segID)); err != nil { 179 | return count, fmt.Errorf("Could not free capacity to load segment %d: %w", segID, err) 180 | } 181 | capTries++ 182 | continue 183 | } 184 | return count, fmt.Errorf("Could not access segment %d: %w", segID, err) 185 | } 186 | break 187 | } 188 | 189 | remaining -= count 190 | totalRead += count 191 | segID++ 192 | pos = 0 193 | } 194 | 195 | return totalRead, nil 196 | } 197 | 198 | //Trim fufills part of usbdlib.Device 199 | func (dev *device) Trim(pos int64, count int) error { 200 | return nil //See TODO 201 | } 202 | 203 | //Flush fufills part of usbdlib.Device 204 | func (dev *device) Flush() error { 205 | dev.pendingOpMu.RLock() 206 | defer dev.pendingOpMu.RUnlock() 207 | if err := dev.ctx.Err(); err != nil { 208 | return fmt.Errorf("Device is shutdown: %w", err) 209 | } 210 | 211 | return dev.flush() 212 | } 213 | 214 | func (dev *device) flush() error { 215 | concurFlush := dev.concurFlush 216 | if concurFlush < 1 { 217 | concurFlush = 1 218 | } 219 | 220 | flushList := make([]*segment, 0, len(dev.segments)) 221 | for i := range dev.segments { 222 | seg := &dev.segments[i] 223 | if seg.Dirty() { 224 | flushList = append(flushList, seg) 225 | } 226 | } 227 | if len(flushList) < 1 { 228 | return nil 229 | } 230 | 231 | const maxFlushAlloc = 512 * 1024 * 1024 //512 MB 232 | flushAlloc := int64(0) 233 | bufCh := make(chan *bytes.Buffer, concurFlush) 234 | getBuf := func() *bytes.Buffer { 235 | select { 236 | case buf := <-bufCh: 237 | return buf 238 | default: 239 | } 240 | 241 | cur := atomic.AddInt64(&flushAlloc, dev.segmentBytes) 242 | if cur > maxFlushAlloc { 243 | return nil 244 | } 245 | defer func() { 246 | recover() //if alloc fails, just abort which returns nil 247 | }() 248 | return bytes.NewBuffer(make([]byte, 0, int(dev.segmentBytes))) 249 | } 250 | freeBuf := func(buf *bytes.Buffer) { 251 | if buf == nil { 252 | return 253 | } 254 | buf.Reset() 255 | bufCh <- buf 256 | } 257 | 258 | errCh := make(chan error, 1) 259 | flushSema := sema.NewChanSemaCount(concurFlush) 260 | var pending sync.WaitGroup 261 | for _, seg := range flushList { 262 | if !seg.Dirty() { 263 | continue 264 | } 265 | flushSema.P() 266 | pending.Add(1) 267 | 268 | go func(s *segment) { 269 | defer func() { 270 | flushSema.V() 271 | pending.Done() 272 | }() 273 | 274 | buf := getBuf() 275 | defer freeBuf(buf) 276 | 277 | if err := s.Flush(buf); err != nil { 278 | select { 279 | case errCh <- err: 280 | default: 281 | } 282 | } 283 | }(seg) 284 | } 285 | pending.Wait() 286 | close(errCh) 287 | 288 | if err := <-errCh; err != nil { 289 | return fmt.Errorf("One ore more segments failed to flush: %w", err) 290 | } 291 | return nil 292 | } 293 | 294 | //ReadAt fufills io.Closer and in turn part of usbdlib.Device 295 | func (dev *device) Close() error { 296 | dev.pendingOpMu.Lock() 297 | defer dev.pendingOpMu.Unlock() 298 | if err := dev.ctx.Err(); err != nil { 299 | return nil 300 | } 301 | dev.ctxCancel() 302 | 303 | err := dev.flush() 304 | if err != nil { 305 | return fmt.Errorf("Failed to write local cache back to remote store during shutdown (local cache will be preserved): %w", err) 306 | } 307 | 308 | if !dev.persistCache { 309 | for i := range dev.segments { 310 | if rmErr := dev.segments[i].DeleteFile(); rmErr != nil && err == nil { 311 | err = rmErr 312 | } 313 | } 314 | } 315 | if err != nil { 316 | return fmt.Errorf("Could not delete one or more local cache files during shutdown: %w", err) 317 | } 318 | 319 | return nil 320 | } 321 | 322 | func (dev *device) flushWorker() { 323 | if dev.autoflush == 0 { 324 | return 325 | } 326 | 327 | ticker := time.NewTicker(dev.autoflush) 328 | for { 329 | select { 330 | case <-dev.ctx.Done(): 331 | ticker.Stop() 332 | return 333 | case <-ticker.C: 334 | dev.Flush() 335 | } 336 | } 337 | } 338 | 339 | func (dev *device) removeLeastRecentUsed(forIdx int) (int, error) { 340 | const maxTries = 16 341 | 342 | for tries := 0; true; tries++ { 343 | var ( 344 | cleanCannidate, dirtyCannidate *segment 345 | cleanLastOp, dirtyLastOp time.Time 346 | cleanIdx, dirtyIdx int 347 | ) 348 | 349 | for i := range dev.segments { 350 | if i == forIdx { 351 | continue 352 | } 353 | 354 | seg := &dev.segments[i] 355 | if !seg.Backed() { 356 | continue 357 | } 358 | 359 | if seg.Dirty() { 360 | if lastOp := seg.LastOp(); lastOp.Before(dirtyLastOp) || dirtyLastOp.IsZero() { 361 | dirtyCannidate, dirtyLastOp, dirtyIdx = seg, lastOp, i 362 | } 363 | } else { 364 | if lastOp := seg.LastOp(); lastOp.Before(cleanLastOp) || cleanLastOp.IsZero() { 365 | cleanCannidate, cleanLastOp, cleanIdx = seg, lastOp, i 366 | } 367 | } 368 | } 369 | 370 | cannidate, lastOp, idx := cleanCannidate, cleanLastOp, cleanIdx 371 | if cannidate == nil { 372 | cannidate, lastOp, idx = dirtyCannidate, dirtyLastOp, dirtyIdx 373 | } 374 | 375 | if cannidate == nil { 376 | return 0, fmt.Errorf("Could not remove any segments as none are backed") 377 | } else if cannidate.Backed() && (cannidate.LastOp().Equal(lastOp) || tries+1 >= maxTries) { 378 | return idx, cannidate.DeleteFile() 379 | } 380 | } 381 | panic("BUG: removeLeastRecentUsed loop never exists") 382 | } 383 | -------------------------------------------------------------------------------- /pkg/devices/objstore/storeparams.go: -------------------------------------------------------------------------------- 1 | package objstore 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "errors" 7 | "fmt" 8 | "io" 9 | "os" 10 | "path/filepath" 11 | "sync" 12 | 13 | "github.com/tarndt/sema" 14 | "github.com/tarndt/usbd/pkg/util/consterr" 15 | "github.com/tarndt/usbd/pkg/util/strms" 16 | 17 | "github.com/dustin/go-humanize" 18 | "github.com/graymeta/stow" 19 | ) 20 | 21 | const errCapacityClaim = consterr.ConstErr("Maximum number of local segments already loaded") 22 | 23 | const bufSize = 1024 * 1024 * 2 //2 MB 24 | var copyBufPool sync.Pool = sync.Pool{ 25 | New: func() interface{} { 26 | return make([]byte, bufSize) 27 | }, 28 | } 29 | 30 | type storeParams struct { 31 | container stow.Container 32 | segmentBytes int64 33 | cacheDir string 34 | thickProvision, persistCache bool 35 | 36 | quotaSema sema.CountingSema 37 | } 38 | 39 | func (sp *storeParams) createFile(segID string) (file *os.File, err error) { 40 | if err = sp.claimCapacity(); err != nil { 41 | return nil, fmt.Errorf("Not enough capacity to create file: %w", err) 42 | } 43 | 44 | itemName := osbdPrefix + devicePrefix + sp.container.Name() + blockPrefix + segID 45 | fpath := filepath.Join(sp.cacheDir, itemName) 46 | file, err = os.Create(fpath) 47 | if err != nil { 48 | return nil, fmt.Errorf("Could not create new object local file %q: %w", fpath, err) 49 | } 50 | defer func() { 51 | if err != nil { 52 | file.Close() 53 | os.Remove(fpath) 54 | } 55 | }() 56 | 57 | if sp.thickProvision { 58 | buf := copyBufPool.Get().([]byte) 59 | limitedRdr := io.LimitedReader{R: strms.DevZero, N: sp.segmentBytes} 60 | _, err = io.CopyBuffer(file, &limitedRdr, buf) 61 | copyBufPool.Put(buf) 62 | if err != nil { 63 | return nil, fmt.Errorf("Could not write zeros to new object local file %q: %w", fpath, err) 64 | } 65 | } else { 66 | if _, err = file.WriteAt([]byte{0}, sp.segmentBytes-1); err != nil { 67 | return nil, fmt.Errorf("Could not allocate new sparse object local file %q: %w", fpath, err) 68 | } 69 | } 70 | 71 | return file, err 72 | } 73 | 74 | func (sp *storeParams) downloadFile(item stow.Item) (file *os.File, err error) { 75 | if err = sp.claimCapacity(); err != nil { 76 | return nil, fmt.Errorf("Not enough capacity to download file: %w", err) 77 | } 78 | defer func() { 79 | if err != nil { 80 | sp.releaseCapacity() 81 | if file != nil { 82 | file.Close() 83 | os.Remove(file.Name()) 84 | } 85 | } 86 | }() 87 | 88 | remoteSize, err := item.Size() 89 | if err != nil { 90 | return nil, fmt.Errorf("Could not obtain %s size: %w", describeItem(item), err) 91 | } else if remoteSize == 0 { 92 | return nil, nil //we don't have anything to download 93 | } else if remoteSize != sp.segmentBytes { 94 | return nil, fmt.Errorf("Store reported %s was wrong size: Expected %s and found: %s ", describeItem(item), humanize.IBytes(uint64(sp.segmentBytes)), humanize.IBytes(uint64(remoteSize))) 95 | } 96 | 97 | fpath := filepath.Join(sp.cacheDir, item.Name()) 98 | 99 | file, err = sp.cachedFile(item, fpath, remoteSize) 100 | if err != nil { 101 | return nil, fmt.Errorf("Local cache is unusable for %s: %w", describeItem(item), err) 102 | } else if file != nil { 103 | return file, nil 104 | } 105 | 106 | if file, err = os.Create(fpath); err != nil { 107 | return nil, fmt.Errorf("Could not create download object local file %q: %w", fpath, err) 108 | } 109 | 110 | data, err := item.Open() 111 | if err != nil { 112 | return nil, fmt.Errorf("Could not open %s for downloading: %w", describeItem(item), err) 113 | } 114 | defer data.Close() 115 | 116 | limitedRdr := &io.LimitedReader{R: data, N: remoteSize + 1} 117 | var downloadedBytes int64 118 | if sp.thickProvision { 119 | buf := copyBufPool.Get().([]byte) 120 | downloadedBytes, err = io.CopyBuffer(file, limitedRdr, buf) 121 | copyBufPool.Put(buf) 122 | } else { 123 | downloadedBytes, err = sparseCopy(file, limitedRdr) 124 | } 125 | 126 | switch { 127 | case err != nil: 128 | return nil, fmt.Errorf("Could not download %s: %w", describeItem(item), err) 129 | case limitedRdr.N < 1: 130 | return nil, fmt.Errorf("%s contained more than the expected %d bytes (%s) of data", describeItem(item), remoteSize, humanize.IBytes(uint64(remoteSize))) 131 | case downloadedBytes != remoteSize: 132 | return nil, fmt.Errorf("Store download %s was wrong size: Expected %d bytes (%s) and found: %d bytes (%s)", describeItem(item), remoteSize, humanize.IBytes(uint64(remoteSize)), downloadedBytes, humanize.IBytes(uint64(downloadedBytes))) 133 | } 134 | return file, nil 135 | } 136 | 137 | func (sp *storeParams) cachedFile(item stow.Item, fpath string, remoteSize int64) (*os.File, error) { 138 | dataFileInfo, err := os.Stat(fpath) 139 | dataExists := err == nil 140 | eTagPath := eTagFileName(fpath) 141 | _, err = os.Stat(eTagPath) 142 | eTagExists := err == nil 143 | 144 | rmDataFile := func() error { 145 | if err := os.Remove(fpath); err != nil { 146 | return fmt.Errorf("Could not remove local cache file %q that did not have eTag metadata in preparation for re-download: %w", fpath, err) 147 | } 148 | return nil 149 | } 150 | 151 | rmETagFile := func() error { 152 | if err := os.Remove(eTagPath); err != nil { 153 | return fmt.Errorf("Could not remove local cache eTag metadata file %q that did not have matching local data file in preparation for re-download: %w", eTagPath, err) 154 | } 155 | return nil 156 | } 157 | 158 | switch { 159 | case dataExists && !eTagExists: 160 | if err = rmDataFile(); err != nil { 161 | return nil, err 162 | } 163 | 164 | case !dataExists && eTagExists: 165 | if err = rmETagFile(); err != nil { 166 | return nil, err 167 | } 168 | 169 | case !sp.persistCache && (dataExists || eTagExists): 170 | return nil, fmt.Errorf("Local cache and/or eTag metadata files exist but cache persistence is not enabled; Enable persistence or remove local files") 171 | 172 | case dataExists && eTagExists: 173 | fileETagBytes, err := os.ReadFile(eTagPath) 174 | if err != nil { 175 | return nil, fmt.Errorf("Could not read eTag metadata file %q while checking in locally persisted cache file could be used: %w", eTagPath, err) 176 | } 177 | fileETag := string(fileETagBytes) 178 | 179 | itemETag, err := item.ETag() 180 | if err != nil { 181 | return nil, fmt.Errorf("Could not get remote item %s eTag metadata: %w", describeItem(item), err) 182 | } 183 | 184 | //Is the local data file valid for re-use? 185 | if fileETag != "" && fileETag == itemETag && dataFileInfo.Mode().IsRegular() && dataFileInfo.Size() == remoteSize { 186 | file, err := os.OpenFile(fpath, os.O_RDWR, 0666) 187 | if err != nil { 188 | return nil, fmt.Errorf("Could not open locally cached data file %q for re-use: %w", fpath, err) 189 | } 190 | return file, nil 191 | } 192 | 193 | //It was not, get rid of the them 194 | rmDataErr, rmETagErr := rmDataFile(), rmETagFile() 195 | switch { 196 | case rmDataErr != nil: 197 | return nil, rmDataErr 198 | case rmETagErr != nil: 199 | return nil, rmETagErr 200 | } 201 | } 202 | return nil, nil 203 | } 204 | 205 | func (sp *storeParams) removeFile(file *os.File) error { 206 | fpath := file.Name() 207 | 208 | err := file.Close() 209 | if err != nil { 210 | return fmt.Errorf("Could not close local cache file %q before deleting it: %w", fpath, err) 211 | } 212 | 213 | if err = os.Remove(fpath); err != nil { 214 | return fmt.Errorf("Could not remove local cache file %q: %w", fpath, err) 215 | } 216 | 217 | if sp.persistCache { 218 | if err = os.Remove(eTagFileName(fpath)); err != nil && !errors.Is(err, os.ErrNotExist) { 219 | return fmt.Errorf("Could not remove local file %q: %w", fpath, err) 220 | } 221 | } 222 | return nil 223 | } 224 | 225 | func (sp *storeParams) syncFile(file *os.File, segID string, optBuf *bytes.Buffer, optEarlyUnlock func()) (stow.Item, error) { 226 | if err := file.Sync(); err != nil { 227 | if optEarlyUnlock != nil { 228 | optEarlyUnlock() 229 | } 230 | return nil, fmt.Errorf("Failed to fsync local file: %w", err) 231 | } 232 | 233 | var srcRdr io.Reader 234 | if optEarlyUnlock != nil && optBuf != nil && int64(optBuf.Cap()) >= sp.segmentBytes { 235 | optBuf.Reset() 236 | if n, err := optBuf.ReadFrom(strms.NewReadAtReader(file)); err != nil { 237 | optEarlyUnlock() 238 | return nil, fmt.Errorf("Could not buffer file for transmission to remote object store: %w", err) 239 | } else if n != sp.segmentBytes { 240 | optEarlyUnlock() 241 | return nil, fmt.Errorf("Fewer bytes (%d) than expected (%d) were read while buffering file for transmission to remote object store", n, sp.segmentBytes) 242 | } 243 | optEarlyUnlock() 244 | srcRdr = optBuf 245 | } else { 246 | if optEarlyUnlock != nil { 247 | defer optEarlyUnlock() 248 | } 249 | srcRdr = bufio.NewReader(strms.NewReadAtReader(file)) 250 | } 251 | 252 | itemName := osbdPrefix + devicePrefix + sp.container.Name() + blockPrefix + segID 253 | item, err := sp.container.Put(itemName, srcRdr, sp.segmentBytes, nil) 254 | if err != nil { 255 | return nil, fmt.Errorf("Could not upload to remote item %q in %s: %w", itemName, describeContainer(sp.container), err) 256 | } 257 | 258 | if sp.persistCache { 259 | if err = persistEtag(file.Name(), item); err != nil { 260 | return item, err 261 | } 262 | } 263 | return item, nil 264 | } 265 | 266 | func persistEtag(assocFilename string, item stow.Item) error { 267 | eTag, err := item.ETag() 268 | if err != nil { 269 | return fmt.Errorf("Could not get eTag from remote object: %w", err) 270 | } else if eTag == "" { 271 | return nil 272 | } 273 | 274 | if err = os.WriteFile(eTagFileName(assocFilename), []byte(eTag), 0666); err != nil { 275 | return fmt.Errorf("Could not write eTag metadata file for cache persistence: %w", err) 276 | } 277 | return nil 278 | } 279 | 280 | func eTagFileName(assocFilename string) string { 281 | const eTagFileSuffix = ".etag" 282 | return assocFilename + eTagFileSuffix 283 | } 284 | 285 | func (sp *storeParams) claimCapacity() error { 286 | if sp.quotaSema != nil && !sp.quotaSema.P() { 287 | return errCapacityClaim 288 | } 289 | return nil 290 | } 291 | 292 | func (sp *storeParams) releaseCapacity() { 293 | const errCapacityReleaseMsg = "Could not release capacity, likely BUG" 294 | 295 | if sp.quotaSema != nil && !sp.quotaSema.V() { 296 | panic(errCapacityReleaseMsg) 297 | } 298 | } 299 | 300 | func sparseCopy(file *os.File, src io.Reader) (n int64, err error) { 301 | const minSeqSparseZeros = 4096 302 | in := bufio.NewReader(src) 303 | out := bufio.NewWriter(file) 304 | pendZeros := int64(0) 305 | 306 | var cur byte 307 | for { 308 | if cur, err = in.ReadByte(); err != nil { 309 | if err == io.EOF { 310 | if err = out.Flush(); err != nil { 311 | break 312 | } 313 | if pendZeros >= 0 { 314 | if penZeroMinusOne := pendZeros - 1; penZeroMinusOne > 0 { 315 | if _, err = file.Seek(penZeroMinusOne, os.SEEK_CUR); err != nil { 316 | break 317 | } 318 | } 319 | _, err = file.Write([]byte{0}) 320 | n += pendZeros 321 | } 322 | } 323 | break 324 | } 325 | 326 | if cur == 0 { 327 | pendZeros++ 328 | } else { 329 | if pendZeros > 0 { 330 | if pendZeros >= minSeqSparseZeros { 331 | if err = out.Flush(); err != nil { 332 | break 333 | } 334 | if _, err = file.Seek(pendZeros, os.SEEK_CUR); err != nil { 335 | break 336 | } 337 | n += pendZeros 338 | } else { 339 | for i := int64(0); i < pendZeros; i++ { 340 | if err = out.WriteByte(0); err != nil { 341 | break 342 | } 343 | n++ 344 | } 345 | if err != nil { 346 | break 347 | } 348 | } 349 | pendZeros = 0 350 | } 351 | if err = out.WriteByte(cur); err != nil { 352 | break 353 | } 354 | n++ 355 | } 356 | } 357 | if err != nil { 358 | return n, err 359 | } 360 | return n, out.Flush() 361 | } 362 | -------------------------------------------------------------------------------- /pkg/devices/dedupdisk/impls/lz4.h: -------------------------------------------------------------------------------- 1 | /* 2 | LZ4 - Fast LZ compression algorithm 3 | Header File 4 | Copyright (C) 2011-2013, Yann Collet. 5 | BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) 6 | 7 | Redistribution and use in source and binary forms, with or without 8 | modification, are permitted provided that the following conditions are 9 | met: 10 | 11 | * Redistributions of source code must retain the above copyright 12 | notice, this list of conditions and the following disclaimer. 13 | * Redistributions in binary form must reproduce the above 14 | copyright notice, this list of conditions and the following disclaimer 15 | in the documentation and/or other materials provided with the 16 | distribution. 17 | 18 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | 30 | You can contact the author at : 31 | - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html 32 | - LZ4 source repository : http://code.google.com/p/lz4/ 33 | */ 34 | #pragma once 35 | 36 | #if defined (__cplusplus) 37 | extern "C" { 38 | #endif 39 | 40 | 41 | //************************************** 42 | // Compiler Options 43 | //************************************** 44 | #if defined(_MSC_VER) && !defined(__cplusplus) // Visual Studio 45 | # define inline __inline // Visual C is not C99, but supports some kind of inline 46 | #endif 47 | 48 | 49 | //**************************** 50 | // Simple Functions 51 | //**************************** 52 | 53 | int LZ4_compress (const char* source, char* dest, int inputSize); 54 | int LZ4_decompress_safe (const char* source, char* dest, int inputSize, int maxOutputSize); 55 | 56 | /* 57 | LZ4_compress() : 58 | Compresses 'inputSize' bytes from 'source' into 'dest'. 59 | Destination buffer must be already allocated, 60 | and must be sized to handle worst cases situations (input data not compressible) 61 | Worst case size evaluation is provided by function LZ4_compressBound() 62 | inputSize : Max supported value is LZ4_MAX_INPUT_VALUE 63 | return : the number of bytes written in buffer dest 64 | or 0 if the compression fails 65 | 66 | LZ4_decompress_safe() : 67 | maxOutputSize : is the size of the destination buffer (which must be already allocated) 68 | return : the number of bytes decoded in the destination buffer (necessarily <= maxOutputSize) 69 | If the source stream is detected malformed, the function will stop decoding and return a negative result. 70 | This function is protected against buffer overflow exploits (never writes outside of output buffer, and never reads outside of input buffer). Therefore, it is protected against malicious data packets 71 | */ 72 | 73 | 74 | //**************************** 75 | // Advanced Functions 76 | //**************************** 77 | #define LZ4_MAX_INPUT_SIZE 0x7E000000 // 2 113 929 216 bytes 78 | #define LZ4_COMPRESSBOUND(isize) ((unsigned int)(isize) > (unsigned int)LZ4_MAX_INPUT_SIZE ? 0 : (isize) + ((isize)/255) + 16) 79 | static inline int LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); } 80 | 81 | /* 82 | LZ4_compressBound() : 83 | Provides the maximum size that LZ4 may output in a "worst case" scenario (input data not compressible) 84 | primarily useful for memory allocation of output buffer. 85 | inline function is recommended for the general case, 86 | macro is also provided when result needs to be evaluated at compilation (such as stack memory allocation). 87 | 88 | isize : is the input size. Max supported value is LZ4_MAX_INPUT_SIZE 89 | return : maximum output size in a "worst case" scenario 90 | or 0, if input size is too large ( > LZ4_MAX_INPUT_SIZE) 91 | */ 92 | 93 | 94 | int LZ4_compress_limitedOutput (const char* source, char* dest, int inputSize, int maxOutputSize); 95 | 96 | /* 97 | LZ4_compress_limitedOutput() : 98 | Compress 'inputSize' bytes from 'source' into an output buffer 'dest' of maximum size 'maxOutputSize'. 99 | If it cannot achieve it, compression will stop, and result of the function will be zero. 100 | This function never writes outside of provided output buffer. 101 | 102 | inputSize : Max supported value is LZ4_MAX_INPUT_VALUE 103 | maxOutputSize : is the size of the destination buffer (which must be already allocated) 104 | return : the number of bytes written in buffer 'dest' 105 | or 0 if the compression fails 106 | */ 107 | 108 | 109 | int LZ4_decompress_fast (const char* source, char* dest, int outputSize); 110 | 111 | /* 112 | LZ4_decompress_fast() : 113 | outputSize : is the original (uncompressed) size 114 | return : the number of bytes read from the source buffer (in other words, the compressed size) 115 | If the source stream is malformed, the function will stop decoding and return a negative result. 116 | note : This function is a bit faster than LZ4_decompress_safe() 117 | This function never writes outside of output buffers, but may read beyond input buffer in case of malicious data packet. 118 | Use this function preferably into a trusted environment (data to decode comes from a trusted source). 119 | Destination buffer must be already allocated. Its size must be a minimum of 'outputSize' bytes. 120 | */ 121 | 122 | int LZ4_decompress_safe_partial (const char* source, char* dest, int inputSize, int targetOutputSize, int maxOutputSize); 123 | 124 | /* 125 | LZ4_decompress_safe_partial() : 126 | This function decompress a compressed block of size 'inputSize' at position 'source' 127 | into output buffer 'dest' of size 'maxOutputSize'. 128 | The function tries to stop decompressing operation as soon as 'targetOutputSize' has been reached, 129 | reducing decompression time. 130 | return : the number of bytes decoded in the destination buffer (necessarily <= maxOutputSize) 131 | Note : this number can be < 'targetOutputSize' should the compressed block to decode be smaller. 132 | Always control how many bytes were decoded. 133 | If the source stream is detected malformed, the function will stop decoding and return a negative result. 134 | This function never writes outside of output buffer, and never reads outside of input buffer. It is therefore protected against malicious data packets 135 | */ 136 | 137 | 138 | //***************************** 139 | // Using an external allocation 140 | //***************************** 141 | int LZ4_sizeofState(); 142 | int LZ4_compress_withState (void* state, const char* source, char* dest, int inputSize); 143 | int LZ4_compress_limitedOutput_withState (void* state, const char* source, char* dest, int inputSize, int maxOutputSize); 144 | 145 | /* 146 | These functions are provided should you prefer to allocate memory for compression tables with your own allocation methods. 147 | To know how much memory must be allocated for the compression tables, use : 148 | int LZ4_sizeofState(); 149 | 150 | Note that tables must be aligned on 4-bytes boundaries, otherwise compression will fail (return code 0). 151 | 152 | The allocated memory can be provided to the compressions functions using 'void* state' parameter. 153 | LZ4_compress_withState() and LZ4_compress_limitedOutput_withState() are equivalent to previously described functions. 154 | They just use the externally allocated memory area instead of allocating their own (on stack, or on heap). 155 | */ 156 | 157 | 158 | //**************************** 159 | // Streaming Functions 160 | //**************************** 161 | 162 | void* LZ4_create (const char* inputBuffer); 163 | int LZ4_compress_continue (void* LZ4_Data, const char* source, char* dest, int inputSize); 164 | int LZ4_compress_limitedOutput_continue (void* LZ4_Data, const char* source, char* dest, int inputSize, int maxOutputSize); 165 | char* LZ4_slideInputBuffer (void* LZ4_Data); 166 | int LZ4_free (void* LZ4_Data); 167 | 168 | /* 169 | These functions allow the compression of dependent blocks, where each block benefits from prior 64 KB within preceding blocks. 170 | In order to achieve this, it is necessary to start creating the LZ4 Data Structure, thanks to the function : 171 | 172 | void* LZ4_create (const char* inputBuffer); 173 | The result of the function is the (void*) pointer on the LZ4 Data Structure. 174 | This pointer will be needed in all other functions. 175 | If the pointer returned is NULL, then the allocation has failed, and compression must be aborted. 176 | The only parameter 'const char* inputBuffer' must, obviously, point at the beginning of input buffer. 177 | The input buffer must be already allocated, and size at least 192KB. 178 | 'inputBuffer' will also be the 'const char* source' of the first block. 179 | 180 | All blocks are expected to lay next to each other within the input buffer, starting from 'inputBuffer'. 181 | To compress each block, use either LZ4_compress_continue() or LZ4_compress_limitedOutput_continue(). 182 | Their behavior are identical to LZ4_compress() or LZ4_compress_limitedOutput(), 183 | but require the LZ4 Data Structure as their first argument, and check that each block starts right after the previous one. 184 | If next block does not begin immediately after the previous one, the compression will fail (return 0). 185 | 186 | When it's no longer possible to lay the next block after the previous one (not enough space left into input buffer), a call to : 187 | char* LZ4_slideInputBuffer(void* LZ4_Data); 188 | must be performed. It will typically copy the latest 64KB of input at the beginning of input buffer. 189 | Note that, for this function to work properly, minimum size of an input buffer must be 192KB. 190 | ==> The memory position where the next input data block must start is provided as the result of the function. 191 | 192 | Compression can then resume, using LZ4_compress_continue() or LZ4_compress_limitedOutput_continue(), as usual. 193 | 194 | When compression is completed, a call to LZ4_free() will release the memory used by the LZ4 Data Structure. 195 | */ 196 | 197 | int LZ4_sizeofStreamState(); 198 | int LZ4_resetStreamState(void* state, const char* inputBuffer); 199 | 200 | /* 201 | These functions achieve the same result as : 202 | void* LZ4_create (const char* inputBuffer); 203 | 204 | They are provided here to allow the user program to allocate memory using its own routines. 205 | 206 | To know how much space must be allocated, use LZ4_sizeofStreamState(); 207 | Note also that space must be 4-bytes aligned. 208 | 209 | Once space is allocated, you must initialize it using : LZ4_resetStreamState(void* state, const char* inputBuffer); 210 | void* state is a pointer to the space allocated. 211 | It must be aligned on 4-bytes boundaries, and be large enough. 212 | The parameter 'const char* inputBuffer' must, obviously, point at the beginning of input buffer. 213 | The input buffer must be already allocated, and size at least 192KB. 214 | 'inputBuffer' will also be the 'const char* source' of the first block. 215 | 216 | The same space can be re-used multiple times, just by initializing it each time with LZ4_resetStreamState(). 217 | return value of LZ4_resetStreamState() must be 0 is OK. 218 | Any other value means there was an error (typically, pointer is not aligned on 4-bytes boundaries). 219 | */ 220 | 221 | 222 | int LZ4_decompress_safe_withPrefix64k (const char* source, char* dest, int inputSize, int maxOutputSize); 223 | int LZ4_decompress_fast_withPrefix64k (const char* source, char* dest, int outputSize); 224 | 225 | /* 226 | *_withPrefix64k() : 227 | These decoding functions work the same as their "normal name" versions, 228 | but can use up to 64KB of data in front of 'char* dest'. 229 | These functions are necessary to decode inter-dependant blocks. 230 | */ 231 | 232 | 233 | //**************************** 234 | // Obsolete Functions 235 | //**************************** 236 | 237 | static inline int LZ4_uncompress (const char* source, char* dest, int outputSize) { return LZ4_decompress_fast(source, dest, outputSize); } 238 | static inline int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize) { return LZ4_decompress_safe(source, dest, isize, maxOutputSize); } 239 | 240 | /* 241 | These functions are deprecated and should no longer be used. 242 | They are provided here for compatibility with existing user programs. 243 | */ 244 | 245 | 246 | 247 | #if defined (__cplusplus) 248 | } 249 | #endif 250 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Mozilla Public License Version 2.0 2 | ================================== 3 | 4 | 1. Definitions 5 | -------------- 6 | 7 | 1.1. "Contributor" 8 | means each individual or legal entity that creates, contributes to 9 | the creation of, or owns Covered Software. 10 | 11 | 1.2. "Contributor Version" 12 | means the combination of the Contributions of others (if any) used 13 | by a Contributor and that particular Contributor's Contribution. 14 | 15 | 1.3. "Contribution" 16 | means Covered Software of a particular Contributor. 17 | 18 | 1.4. "Covered Software" 19 | means Source Code Form to which the initial Contributor has attached 20 | the notice in Exhibit A, the Executable Form of such Source Code 21 | Form, and Modifications of such Source Code Form, in each case 22 | including portions thereof. 23 | 24 | 1.5. "Incompatible With Secondary Licenses" 25 | means 26 | 27 | (a) that the initial Contributor has attached the notice described 28 | in Exhibit B to the Covered Software; or 29 | 30 | (b) that the Covered Software was made available under the terms of 31 | version 1.1 or earlier of the License, but not also under the 32 | terms of a Secondary License. 33 | 34 | 1.6. "Executable Form" 35 | means any form of the work other than Source Code Form. 36 | 37 | 1.7. "Larger Work" 38 | means a work that combines Covered Software with other material, in 39 | a separate file or files, that is not Covered Software. 40 | 41 | 1.8. "License" 42 | means this document. 43 | 44 | 1.9. "Licensable" 45 | means having the right to grant, to the maximum extent possible, 46 | whether at the time of the initial grant or subsequently, any and 47 | all of the rights conveyed by this License. 48 | 49 | 1.10. "Modifications" 50 | means any of the following: 51 | 52 | (a) any file in Source Code Form that results from an addition to, 53 | deletion from, or modification of the contents of Covered 54 | Software; or 55 | 56 | (b) any new file in Source Code Form that contains any Covered 57 | Software. 58 | 59 | 1.11. "Patent Claims" of a Contributor 60 | means any patent claim(s), including without limitation, method, 61 | process, and apparatus claims, in any patent Licensable by such 62 | Contributor that would be infringed, but for the grant of the 63 | License, by the making, using, selling, offering for sale, having 64 | made, import, or transfer of either its Contributions or its 65 | Contributor Version. 66 | 67 | 1.12. "Secondary License" 68 | means either the GNU General Public License, Version 2.0, the GNU 69 | Lesser General Public License, Version 2.1, the GNU Affero General 70 | Public License, Version 3.0, or any later versions of those 71 | licenses. 72 | 73 | 1.13. "Source Code Form" 74 | means the form of the work preferred for making modifications. 75 | 76 | 1.14. "You" (or "Your") 77 | means an individual or a legal entity exercising rights under this 78 | License. For legal entities, "You" includes any entity that 79 | controls, is controlled by, or is under common control with You. For 80 | purposes of this definition, "control" means (a) the power, direct 81 | or indirect, to cause the direction or management of such entity, 82 | whether by contract or otherwise, or (b) ownership of more than 83 | fifty percent (50%) of the outstanding shares or beneficial 84 | ownership of such entity. 85 | 86 | 2. License Grants and Conditions 87 | -------------------------------- 88 | 89 | 2.1. Grants 90 | 91 | Each Contributor hereby grants You a world-wide, royalty-free, 92 | non-exclusive license: 93 | 94 | (a) under intellectual property rights (other than patent or trademark) 95 | Licensable by such Contributor to use, reproduce, make available, 96 | modify, display, perform, distribute, and otherwise exploit its 97 | Contributions, either on an unmodified basis, with Modifications, or 98 | as part of a Larger Work; and 99 | 100 | (b) under Patent Claims of such Contributor to make, use, sell, offer 101 | for sale, have made, import, and otherwise transfer either its 102 | Contributions or its Contributor Version. 103 | 104 | 2.2. Effective Date 105 | 106 | The licenses granted in Section 2.1 with respect to any Contribution 107 | become effective for each Contribution on the date the Contributor first 108 | distributes such Contribution. 109 | 110 | 2.3. Limitations on Grant Scope 111 | 112 | The licenses granted in this Section 2 are the only rights granted under 113 | this License. No additional rights or licenses will be implied from the 114 | distribution or licensing of Covered Software under this License. 115 | Notwithstanding Section 2.1(b) above, no patent license is granted by a 116 | Contributor: 117 | 118 | (a) for any code that a Contributor has removed from Covered Software; 119 | or 120 | 121 | (b) for infringements caused by: (i) Your and any other third party's 122 | modifications of Covered Software, or (ii) the combination of its 123 | Contributions with other software (except as part of its Contributor 124 | Version); or 125 | 126 | (c) under Patent Claims infringed by Covered Software in the absence of 127 | its Contributions. 128 | 129 | This License does not grant any rights in the trademarks, service marks, 130 | or logos of any Contributor (except as may be necessary to comply with 131 | the notice requirements in Section 3.4). 132 | 133 | 2.4. Subsequent Licenses 134 | 135 | No Contributor makes additional grants as a result of Your choice to 136 | distribute the Covered Software under a subsequent version of this 137 | License (see Section 10.2) or under the terms of a Secondary License (if 138 | permitted under the terms of Section 3.3). 139 | 140 | 2.5. Representation 141 | 142 | Each Contributor represents that the Contributor believes its 143 | Contributions are its original creation(s) or it has sufficient rights 144 | to grant the rights to its Contributions conveyed by this License. 145 | 146 | 2.6. Fair Use 147 | 148 | This License is not intended to limit any rights You have under 149 | applicable copyright doctrines of fair use, fair dealing, or other 150 | equivalents. 151 | 152 | 2.7. Conditions 153 | 154 | Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted 155 | in Section 2.1. 156 | 157 | 3. Responsibilities 158 | ------------------- 159 | 160 | 3.1. Distribution of Source Form 161 | 162 | All distribution of Covered Software in Source Code Form, including any 163 | Modifications that You create or to which You contribute, must be under 164 | the terms of this License. You must inform recipients that the Source 165 | Code Form of the Covered Software is governed by the terms of this 166 | License, and how they can obtain a copy of this License. You may not 167 | attempt to alter or restrict the recipients' rights in the Source Code 168 | Form. 169 | 170 | 3.2. Distribution of Executable Form 171 | 172 | If You distribute Covered Software in Executable Form then: 173 | 174 | (a) such Covered Software must also be made available in Source Code 175 | Form, as described in Section 3.1, and You must inform recipients of 176 | the Executable Form how they can obtain a copy of such Source Code 177 | Form by reasonable means in a timely manner, at a charge no more 178 | than the cost of distribution to the recipient; and 179 | 180 | (b) You may distribute such Executable Form under the terms of this 181 | License, or sublicense it under different terms, provided that the 182 | license for the Executable Form does not attempt to limit or alter 183 | the recipients' rights in the Source Code Form under this License. 184 | 185 | 3.3. Distribution of a Larger Work 186 | 187 | You may create and distribute a Larger Work under terms of Your choice, 188 | provided that You also comply with the requirements of this License for 189 | the Covered Software. If the Larger Work is a combination of Covered 190 | Software with a work governed by one or more Secondary Licenses, and the 191 | Covered Software is not Incompatible With Secondary Licenses, this 192 | License permits You to additionally distribute such Covered Software 193 | under the terms of such Secondary License(s), so that the recipient of 194 | the Larger Work may, at their option, further distribute the Covered 195 | Software under the terms of either this License or such Secondary 196 | License(s). 197 | 198 | 3.4. Notices 199 | 200 | You may not remove or alter the substance of any license notices 201 | (including copyright notices, patent notices, disclaimers of warranty, 202 | or limitations of liability) contained within the Source Code Form of 203 | the Covered Software, except that You may alter any license notices to 204 | the extent required to remedy known factual inaccuracies. 205 | 206 | 3.5. Application of Additional Terms 207 | 208 | You may choose to offer, and to charge a fee for, warranty, support, 209 | indemnity or liability obligations to one or more recipients of Covered 210 | Software. However, You may do so only on Your own behalf, and not on 211 | behalf of any Contributor. You must make it absolutely clear that any 212 | such warranty, support, indemnity, or liability obligation is offered by 213 | You alone, and You hereby agree to indemnify every Contributor for any 214 | liability incurred by such Contributor as a result of warranty, support, 215 | indemnity or liability terms You offer. You may include additional 216 | disclaimers of warranty and limitations of liability specific to any 217 | jurisdiction. 218 | 219 | 4. Inability to Comply Due to Statute or Regulation 220 | --------------------------------------------------- 221 | 222 | If it is impossible for You to comply with any of the terms of this 223 | License with respect to some or all of the Covered Software due to 224 | statute, judicial order, or regulation then You must: (a) comply with 225 | the terms of this License to the maximum extent possible; and (b) 226 | describe the limitations and the code they affect. Such description must 227 | be placed in a text file included with all distributions of the Covered 228 | Software under this License. Except to the extent prohibited by statute 229 | or regulation, such description must be sufficiently detailed for a 230 | recipient of ordinary skill to be able to understand it. 231 | 232 | 5. Termination 233 | -------------- 234 | 235 | 5.1. The rights granted under this License will terminate automatically 236 | if You fail to comply with any of its terms. However, if You become 237 | compliant, then the rights granted under this License from a particular 238 | Contributor are reinstated (a) provisionally, unless and until such 239 | Contributor explicitly and finally terminates Your grants, and (b) on an 240 | ongoing basis, if such Contributor fails to notify You of the 241 | non-compliance by some reasonable means prior to 60 days after You have 242 | come back into compliance. Moreover, Your grants from a particular 243 | Contributor are reinstated on an ongoing basis if such Contributor 244 | notifies You of the non-compliance by some reasonable means, this is the 245 | first time You have received notice of non-compliance with this License 246 | from such Contributor, and You become compliant prior to 30 days after 247 | Your receipt of the notice. 248 | 249 | 5.2. If You initiate litigation against any entity by asserting a patent 250 | infringement claim (excluding declaratory judgment actions, 251 | counter-claims, and cross-claims) alleging that a Contributor Version 252 | directly or indirectly infringes any patent, then the rights granted to 253 | You by any and all Contributors for the Covered Software under Section 254 | 2.1 of this License shall terminate. 255 | 256 | 5.3. In the event of termination under Sections 5.1 or 5.2 above, all 257 | end user license agreements (excluding distributors and resellers) which 258 | have been validly granted by You or Your distributors under this License 259 | prior to termination shall survive termination. 260 | 261 | ************************************************************************ 262 | * * 263 | * 6. Disclaimer of Warranty * 264 | * ------------------------- * 265 | * * 266 | * Covered Software is provided under this License on an "as is" * 267 | * basis, without warranty of any kind, either expressed, implied, or * 268 | * statutory, including, without limitation, warranties that the * 269 | * Covered Software is free of defects, merchantable, fit for a * 270 | * particular purpose or non-infringing. The entire risk as to the * 271 | * quality and performance of the Covered Software is with You. * 272 | * Should any Covered Software prove defective in any respect, You * 273 | * (not any Contributor) assume the cost of any necessary servicing, * 274 | * repair, or correction. This disclaimer of warranty constitutes an * 275 | * essential part of this License. No use of any Covered Software is * 276 | * authorized under this License except under this disclaimer. * 277 | * * 278 | ************************************************************************ 279 | 280 | ************************************************************************ 281 | * * 282 | * 7. Limitation of Liability * 283 | * -------------------------- * 284 | * * 285 | * Under no circumstances and under no legal theory, whether tort * 286 | * (including negligence), contract, or otherwise, shall any * 287 | * Contributor, or anyone who distributes Covered Software as * 288 | * permitted above, be liable to You for any direct, indirect, * 289 | * special, incidental, or consequential damages of any character * 290 | * including, without limitation, damages for lost profits, loss of * 291 | * goodwill, work stoppage, computer failure or malfunction, or any * 292 | * and all other commercial damages or losses, even if such party * 293 | * shall have been informed of the possibility of such damages. This * 294 | * limitation of liability shall not apply to liability for death or * 295 | * personal injury resulting from such party's negligence to the * 296 | * extent applicable law prohibits such limitation. Some * 297 | * jurisdictions do not allow the exclusion or limitation of * 298 | * incidental or consequential damages, so this exclusion and * 299 | * limitation may not apply to You. * 300 | * * 301 | ************************************************************************ 302 | 303 | 8. Litigation 304 | ------------- 305 | 306 | Any litigation relating to this License may be brought only in the 307 | courts of a jurisdiction where the defendant maintains its principal 308 | place of business and such litigation shall be governed by laws of that 309 | jurisdiction, without reference to its conflict-of-law provisions. 310 | Nothing in this Section shall prevent a party's ability to bring 311 | cross-claims or counter-claims. 312 | 313 | 9. Miscellaneous 314 | ---------------- 315 | 316 | This License represents the complete agreement concerning the subject 317 | matter hereof. If any provision of this License is held to be 318 | unenforceable, such provision shall be reformed only to the extent 319 | necessary to make it enforceable. Any law or regulation which provides 320 | that the language of a contract shall be construed against the drafter 321 | shall not be used to construe this License against a Contributor. 322 | 323 | 10. Versions of the License 324 | --------------------------- 325 | 326 | 10.1. New Versions 327 | 328 | Mozilla Foundation is the license steward. Except as provided in Section 329 | 10.3, no one other than the license steward has the right to modify or 330 | publish new versions of this License. Each version will be given a 331 | distinguishing version number. 332 | 333 | 10.2. Effect of New Versions 334 | 335 | You may distribute the Covered Software under the terms of the version 336 | of the License under which You originally received the Covered Software, 337 | or under the terms of any subsequent version published by the license 338 | steward. 339 | 340 | 10.3. Modified Versions 341 | 342 | If you create software not governed by this License, and you want to 343 | create a new license for such software, you may create and use a 344 | modified version of this License if you rename the license and remove 345 | any references to the name of the license steward (except to note that 346 | such modified license differs from this License). 347 | 348 | 10.4. Distributing Source Code Form that is Incompatible With Secondary 349 | Licenses 350 | 351 | If You choose to distribute Source Code Form that is Incompatible With 352 | Secondary Licenses under the terms of this version of the License, the 353 | notice described in Exhibit B of this License must be attached. 354 | 355 | Exhibit A - Source Code Form License Notice 356 | ------------------------------------------- 357 | 358 | This Source Code Form is subject to the terms of the Mozilla Public 359 | License, v. 2.0. If a copy of the MPL was not distributed with this 360 | file, You can obtain one at http://mozilla.org/MPL/2.0/. 361 | 362 | If it is not possible or desirable to put the notice in a particular 363 | file, then You may include the notice in a location (such as a LICENSE 364 | file in a relevant directory) where a recipient would be likely to look 365 | for such a notice. 366 | 367 | You may add additional accurate notices of copyright ownership. 368 | 369 | Exhibit B - "Incompatible With Secondary Licenses" Notice 370 | --------------------------------------------------------- 371 | 372 | This Source Code Form is "Incompatible With Secondary Licenses", as 373 | defined by the Mozilla Public License, v. 2.0. 374 | --------------------------------------------------------------------------------