├── .gitignore ├── configs ├── sqlboiler │ └── metadata.yaml └── sql-migrate │ └── metadata.yaml ├── pkg ├── config │ ├── events.go │ ├── constants.go │ ├── error.go │ └── config.go ├── fs │ ├── filesystem_unix.go │ ├── filesystem_non_unix.go │ ├── stat_unix.go │ ├── stat_non_unix.go │ └── fileinfo.go ├── hardware │ ├── eject.go │ └── tell.go ├── logging │ └── structured.go ├── cache │ ├── cache.go │ ├── filesystem.go │ └── write.go ├── tape │ ├── read.go │ ├── write.go │ └── manager.go ├── mtio │ ├── mtio_stub.go │ └── mtio_linux.go ├── inventory │ ├── list.go │ ├── find.go │ └── stat.go ├── operations │ ├── operations.go │ ├── initialize.go │ ├── restore.go │ ├── delete.go │ ├── move.go │ └── archive.go ├── keys │ ├── recipient.go │ └── identity.go ├── compression │ ├── decompress.go │ └── compress.go ├── encryption │ ├── encrypt.go │ └── decrypt.go ├── utility │ └── keygen.go ├── recovery │ ├── fetch.go │ └── query.go └── signature │ ├── sign.go │ └── verify.go ├── cmd └── stfs │ ├── main.go │ └── cmd │ ├── drive_root.go │ ├── recovery_root.go │ ├── serve_root.go │ ├── inventory_root.go │ ├── operation_root.go │ ├── drive_eject.go │ ├── drive_tell.go │ ├── inventory_find.go │ ├── inventory_stat.go │ ├── inventory_list.go │ ├── keygen.go │ ├── root.go │ ├── recovery_query.go │ ├── operation_delete.go │ ├── operation_move.go │ ├── operation_initialize.go │ ├── recovery_index.go │ ├── recovery_fetch.go │ ├── operation_restore.go │ ├── operation_update.go │ ├── serve_http.go │ └── operation_archive.go ├── internal ├── ioext │ ├── write.go │ ├── flusher.go │ └── counter.go ├── pathext │ └── path.go ├── db │ └── sqlite │ │ ├── models │ │ └── metadata │ │ │ ├── boil_view_names.go │ │ │ ├── boil_table_names.go │ │ │ ├── boil_queries.go │ │ │ ├── boil_types.go │ │ │ └── sqlite_upsert.go │ │ └── migrations │ │ └── metadata │ │ └── migrations.go ├── keyext │ └── read.go ├── suffix │ ├── config.go │ ├── add.go │ └── remove.go ├── check │ ├── signature.go │ ├── encryption.go │ ├── keys.go │ ├── filesystem.go │ └── compression.go ├── handlers │ └── panic.go ├── records │ └── stfs.go ├── persisters │ ├── sqlite.go │ └── sqite_cgo.go ├── tarext │ └── write.go ├── ftp │ └── server.go ├── logging │ ├── csv.go │ └── json.go └── converters │ └── header.go ├── Hydrunfile ├── examples ├── logger.go ├── simple │ └── main.go └── full │ └── main.go ├── Makefile ├── go.mod ├── db └── sqlite │ └── migrations │ └── metadata │ └── 1637447083.sql └── .github └── workflows └── hydrun.yaml /.gitignore: -------------------------------------------------------------------------------- 1 | out 2 | -------------------------------------------------------------------------------- /configs/sqlboiler/metadata.yaml: -------------------------------------------------------------------------------- 1 | no-tests: true 2 | sqlite3: 3 | dbname: /tmp/stfs-metadata.sqlite 4 | -------------------------------------------------------------------------------- /pkg/config/events.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | type HeaderEvent struct { 4 | Type string 5 | Indexed bool 6 | Header *Header 7 | } 8 | -------------------------------------------------------------------------------- /configs/sql-migrate/metadata.yaml: -------------------------------------------------------------------------------- 1 | production: 2 | dialect: sqlite3 3 | datasource: /tmp/stfs-metadata.sqlite 4 | dir: db/sqlite/migrations/metadata/ 5 | -------------------------------------------------------------------------------- /pkg/fs/filesystem_unix.go: -------------------------------------------------------------------------------- 1 | //go:build !(windows || wasm) 2 | 3 | package fs 4 | 5 | import "syscall" 6 | 7 | const ( 8 | O_ACCMODE = syscall.O_ACCMODE 9 | ) 10 | -------------------------------------------------------------------------------- /cmd/stfs/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import "github.com/pojntfx/stfs/cmd/stfs/cmd" 4 | 5 | func main() { 6 | if err := cmd.Execute(); err != nil { 7 | panic(err) 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /pkg/hardware/eject.go: -------------------------------------------------------------------------------- 1 | package hardware 2 | 3 | import "github.com/pojntfx/stfs/pkg/config" 4 | 5 | func Eject(mt config.MagneticTapeIO, fd uintptr) error { 6 | return mt.EjectTape(fd) 7 | } 8 | -------------------------------------------------------------------------------- /pkg/fs/filesystem_non_unix.go: -------------------------------------------------------------------------------- 1 | //go:build windows || wasm 2 | 3 | package fs 4 | 5 | const ( 6 | O_ACCMODE = 0x3 // It is safe to hard-code this bit as the bits are not being set from the OS 7 | ) 8 | -------------------------------------------------------------------------------- /pkg/hardware/tell.go: -------------------------------------------------------------------------------- 1 | package hardware 2 | 3 | import "github.com/pojntfx/stfs/pkg/config" 4 | 5 | func Tell(mt config.MagneticTapeIO, fd uintptr) (int64, error) { 6 | return mt.GetCurrentRecordFromTape(fd) 7 | } 8 | -------------------------------------------------------------------------------- /pkg/logging/structured.go: -------------------------------------------------------------------------------- 1 | package logging 2 | 3 | import ( 4 | golog "github.com/fclairamb/go-log" 5 | ) 6 | 7 | type StructuredLogger interface { 8 | golog.Logger 9 | 10 | Trace(event string, keyvals ...interface{}) 11 | } 12 | -------------------------------------------------------------------------------- /pkg/cache/cache.go: -------------------------------------------------------------------------------- 1 | package cache 2 | 3 | import "io" 4 | 5 | type WriteCache interface { 6 | io.Closer 7 | io.Reader 8 | io.Seeker 9 | io.Writer 10 | 11 | Truncate(size int64) error 12 | Size() (int64, error) 13 | Sync() error 14 | } 15 | -------------------------------------------------------------------------------- /internal/ioext/write.go: -------------------------------------------------------------------------------- 1 | package ioext 2 | 3 | import "io" 4 | 5 | type NopCloser struct { 6 | io.Writer 7 | } 8 | 9 | func (NopCloser) Close() error { return nil } 10 | 11 | func AddCloseNopToWriter(w io.Writer) NopCloser { 12 | return NopCloser{w} 13 | } 14 | -------------------------------------------------------------------------------- /internal/pathext/path.go: -------------------------------------------------------------------------------- 1 | package pathext 2 | 3 | import "strings" 4 | 5 | func IsRoot(path string, trim bool) bool { 6 | if trim && len(strings.TrimSpace(path)) == 0 { 7 | return true 8 | } 9 | 10 | return path == "" || path == "." || path == "/" || path == "./" 11 | } 12 | -------------------------------------------------------------------------------- /internal/db/sqlite/models/metadata/boil_view_names.go: -------------------------------------------------------------------------------- 1 | // Code generated by SQLBoiler 4.16.2 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT. 2 | // This file is meant to be re-generated in place and/or deleted at any time. 3 | 4 | package models 5 | 6 | var ViewNames = struct { 7 | }{} 8 | -------------------------------------------------------------------------------- /internal/keyext/read.go: -------------------------------------------------------------------------------- 1 | package keyext 2 | 3 | import ( 4 | "io/ioutil" 5 | 6 | "github.com/pojntfx/stfs/pkg/config" 7 | ) 8 | 9 | func ReadKey(encryptionFormat string, pathToKey string) ([]byte, error) { 10 | if encryptionFormat == config.NoneKey { 11 | return []byte{}, nil 12 | } 13 | 14 | return ioutil.ReadFile(pathToKey) 15 | } 16 | -------------------------------------------------------------------------------- /cmd/stfs/cmd/drive_root.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "github.com/spf13/cobra" 5 | "github.com/spf13/viper" 6 | ) 7 | 8 | var driveCmd = &cobra.Command{ 9 | Use: "drive", 10 | Aliases: []string{"dri", "d"}, 11 | Short: "Manage tape drives", 12 | } 13 | 14 | func init() { 15 | viper.AutomaticEnv() 16 | 17 | rootCmd.AddCommand(driveCmd) 18 | } 19 | -------------------------------------------------------------------------------- /internal/suffix/config.go: -------------------------------------------------------------------------------- 1 | package suffix 2 | 3 | const ( 4 | CompressionFormatGZipSuffix = ".gz" 5 | CompressionFormatLZ4Suffix = ".lz4" 6 | CompressionFormatZStandardSuffix = ".zst" 7 | CompressionFormatBrotliSuffix = ".br" 8 | CompressionFormatBzip2Suffix = ".bz2" 9 | 10 | EncryptionFormatAgeSuffix = ".age" 11 | EncryptionFormatPGPSuffix = ".pgp" 12 | ) 13 | -------------------------------------------------------------------------------- /cmd/stfs/cmd/recovery_root.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "github.com/spf13/cobra" 5 | "github.com/spf13/viper" 6 | ) 7 | 8 | var recoveryCmd = &cobra.Command{ 9 | Use: "recovery", 10 | Aliases: []string{"rec", "r"}, 11 | Short: "Recover tapes or tar files", 12 | } 13 | 14 | func init() { 15 | viper.AutomaticEnv() 16 | 17 | rootCmd.AddCommand(recoveryCmd) 18 | } 19 | -------------------------------------------------------------------------------- /internal/ioext/flusher.go: -------------------------------------------------------------------------------- 1 | package ioext 2 | 3 | import "io" 4 | 5 | type FlusherWriter interface { 6 | io.WriteCloser 7 | 8 | Flush() error 9 | } 10 | 11 | type NopFlusherWriter struct { 12 | io.WriteCloser 13 | } 14 | 15 | func (NopFlusherWriter) Flush() error { return nil } 16 | 17 | func AddFlushNop(w io.WriteCloser) NopFlusherWriter { 18 | return NopFlusherWriter{w} 19 | } 20 | -------------------------------------------------------------------------------- /cmd/stfs/cmd/serve_root.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "github.com/spf13/cobra" 5 | "github.com/spf13/viper" 6 | ) 7 | 8 | var serveCmd = &cobra.Command{ 9 | Use: "serve", 10 | Aliases: []string{"ser", "s", "srv"}, 11 | Short: "Serve tape or tar file and the index", 12 | } 13 | 14 | func init() { 15 | viper.AutomaticEnv() 16 | 17 | rootCmd.AddCommand(serveCmd) 18 | } 19 | -------------------------------------------------------------------------------- /cmd/stfs/cmd/inventory_root.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "github.com/spf13/cobra" 5 | "github.com/spf13/viper" 6 | ) 7 | 8 | var inventoryCmd = &cobra.Command{ 9 | Use: "inventory", 10 | Aliases: []string{"inv", "i"}, 11 | Short: "Get contents and metadata of tape or tar file from the index", 12 | } 13 | 14 | func init() { 15 | viper.AutomaticEnv() 16 | 17 | rootCmd.AddCommand(inventoryCmd) 18 | } 19 | -------------------------------------------------------------------------------- /cmd/stfs/cmd/operation_root.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "github.com/spf13/cobra" 5 | "github.com/spf13/viper" 6 | ) 7 | 8 | var operationCmd = &cobra.Command{ 9 | Use: "operation", 10 | Aliases: []string{"ope", "o", "op"}, 11 | Short: "Perform operations on tape or tar file and the index", 12 | } 13 | 14 | func init() { 15 | viper.AutomaticEnv() 16 | 17 | rootCmd.AddCommand(operationCmd) 18 | } 19 | -------------------------------------------------------------------------------- /internal/db/sqlite/models/metadata/boil_table_names.go: -------------------------------------------------------------------------------- 1 | // Code generated by SQLBoiler 4.16.2 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT. 2 | // This file is meant to be re-generated in place and/or deleted at any time. 3 | 4 | package models 5 | 6 | var TableNames = struct { 7 | GorpMigrations string 8 | Headers string 9 | }{ 10 | GorpMigrations: "gorp_migrations", 11 | Headers: "headers", 12 | } 13 | -------------------------------------------------------------------------------- /pkg/fs/stat_unix.go: -------------------------------------------------------------------------------- 1 | //go:build linux 2 | 3 | package fs 4 | 5 | import "syscall" 6 | 7 | type Stat syscall.Stat_t 8 | 9 | func NewStat( 10 | uid uint32, 11 | gid uint32, 12 | mtim int64, 13 | atim int64, 14 | ctim int64, 15 | ) *Stat { 16 | return &Stat{ 17 | Uid: uid, 18 | Gid: gid, 19 | Mtim: syscall.NsecToTimespec(mtim), 20 | Atim: syscall.NsecToTimespec(atim), 21 | Ctim: syscall.NsecToTimespec(ctim), 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /internal/check/signature.go: -------------------------------------------------------------------------------- 1 | package check 2 | 3 | import "github.com/pojntfx/stfs/pkg/config" 4 | 5 | func CheckSignatureFormat(signatureFormat string) error { 6 | signatureFormatIsKnown := false 7 | 8 | for _, candidate := range config.KnownSignatureFormats { 9 | if signatureFormat == candidate { 10 | signatureFormatIsKnown = true 11 | } 12 | } 13 | 14 | if !signatureFormatIsKnown { 15 | return config.ErrSignatureFormatUnknown 16 | } 17 | 18 | return nil 19 | } 20 | -------------------------------------------------------------------------------- /internal/check/encryption.go: -------------------------------------------------------------------------------- 1 | package check 2 | 3 | import "github.com/pojntfx/stfs/pkg/config" 4 | 5 | func CheckEncryptionFormat(encryptionFormat string) error { 6 | encryptionFormatIsKnown := false 7 | 8 | for _, candidate := range config.KnownEncryptionFormats { 9 | if encryptionFormat == candidate { 10 | encryptionFormatIsKnown = true 11 | } 12 | } 13 | 14 | if !encryptionFormatIsKnown { 15 | return config.ErrEncryptionFormatUnknown 16 | } 17 | 18 | return nil 19 | } 20 | -------------------------------------------------------------------------------- /internal/check/keys.go: -------------------------------------------------------------------------------- 1 | package check 2 | 3 | import ( 4 | "errors" 5 | "os" 6 | 7 | "github.com/pojntfx/stfs/pkg/config" 8 | ) 9 | 10 | var ( 11 | ErrKeyNotAccessible = errors.New("key not found or accessible") 12 | ) 13 | 14 | func CheckKeyAccessible(encryptionFormat string, pathToKey string) error { 15 | if encryptionFormat == config.NoneKey { 16 | return nil 17 | } 18 | 19 | if _, err := os.Stat(pathToKey); err != nil { 20 | return ErrKeyNotAccessible 21 | } 22 | 23 | return nil 24 | } 25 | -------------------------------------------------------------------------------- /internal/handlers/panic.go: -------------------------------------------------------------------------------- 1 | package handlers 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | 7 | "github.com/pojntfx/stfs/internal/logging" 8 | ) 9 | 10 | func PanicHandler(h http.Handler, log *logging.JSONLogger) http.Handler { 11 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 12 | defer func() { 13 | if err := recover(); err != nil { 14 | http.Error(w, fmt.Sprintf("%v", err), http.StatusInternalServerError) 15 | 16 | log.Error("Error during HTTP request", map[string]interface{}{ 17 | "err": err, 18 | }) 19 | } 20 | }() 21 | 22 | h.ServeHTTP(w, r) 23 | }) 24 | } 25 | -------------------------------------------------------------------------------- /pkg/tape/read.go: -------------------------------------------------------------------------------- 1 | package tape 2 | 3 | import "os" 4 | 5 | func OpenTapeReadOnly(drive string) (f *os.File, isRegular bool, err error) { 6 | fileDescription, err := os.Stat(drive) 7 | if err != nil { 8 | return nil, false, err 9 | } 10 | 11 | isRegular = fileDescription.Mode().IsRegular() 12 | if isRegular { 13 | f, err = os.Open(drive) 14 | if err != nil { 15 | return f, isRegular, err 16 | } 17 | 18 | return f, isRegular, nil 19 | } 20 | 21 | f, err = os.OpenFile(drive, os.O_RDONLY, os.ModeCharDevice) 22 | if err != nil { 23 | return f, isRegular, err 24 | } 25 | 26 | return f, isRegular, nil 27 | } 28 | -------------------------------------------------------------------------------- /internal/records/stfs.go: -------------------------------------------------------------------------------- 1 | package records 2 | 3 | const ( 4 | STFSPrefix = "STFS." 5 | 6 | STFSRecordVersion = STFSPrefix + "Version" 7 | STFSRecordVersion1 = "1" 8 | 9 | STFSRecordAction = STFSPrefix + "Action" 10 | STFSRecordActionCreate = "CREATE" 11 | STFSRecordActionDelete = "DELETE" 12 | STFSRecordActionUpdate = "UPDATE" 13 | 14 | STFSRecordReplacesContent = STFSPrefix + "ReplacesContent" 15 | STFSRecordReplacesContentTrue = "true" 16 | STFSRecordReplacesContentFalse = "false" 17 | 18 | STFSRecordReplacesName = STFSPrefix + "ReplacesName" 19 | 20 | STFSRecordUncompressedSize = STFSPrefix + "UncompressedSize" 21 | 22 | STFSRecordSignature = STFSPrefix + "Signature" 23 | 24 | STFSRecordEmbeddedHeader = STFSPrefix + "EmbeddedHeader" 25 | ) 26 | -------------------------------------------------------------------------------- /pkg/mtio/mtio_stub.go: -------------------------------------------------------------------------------- 1 | //go:build !linux 2 | 3 | package mtio 4 | 5 | import ( 6 | "github.com/pojntfx/stfs/pkg/config" 7 | ) 8 | 9 | type MagneticTapeIO struct{} 10 | 11 | func (t MagneticTapeIO) GetCurrentRecordFromTape(fd uintptr) (int64, error) { 12 | return -1, config.ErrTapeDrivesUnsupported 13 | } 14 | 15 | func (t MagneticTapeIO) GoToEndOfTape(fd uintptr) error { 16 | return config.ErrTapeDrivesUnsupported 17 | } 18 | 19 | func (t MagneticTapeIO) GoToNextFileOnTape(fd uintptr) error { 20 | return config.ErrTapeDrivesUnsupported 21 | } 22 | 23 | func (t MagneticTapeIO) EjectTape(fd uintptr) error { 24 | return config.ErrTapeDrivesUnsupported 25 | } 26 | 27 | func (t MagneticTapeIO) SeekToRecordOnTape(fd uintptr, record int32) error { 28 | return config.ErrTapeDrivesUnsupported 29 | } 30 | -------------------------------------------------------------------------------- /internal/check/filesystem.go: -------------------------------------------------------------------------------- 1 | package check 2 | 3 | import "github.com/pojntfx/stfs/pkg/config" 4 | 5 | func CheckFileSystemCacheType(cacheType string) error { 6 | cacheTypeIsKnown := false 7 | 8 | for _, candidate := range config.KnownFileSystemCacheTypes { 9 | if cacheType == candidate { 10 | cacheTypeIsKnown = true 11 | } 12 | } 13 | 14 | if !cacheTypeIsKnown { 15 | return config.ErrFileSystemCacheTypeUnknown 16 | } 17 | 18 | return nil 19 | } 20 | 21 | func CheckWriteCacheType(cacheType string) error { 22 | cacheTypeIsKnown := false 23 | 24 | for _, candidate := range config.KnownWriteCacheTypes { 25 | if cacheType == candidate { 26 | cacheTypeIsKnown = true 27 | } 28 | } 29 | 30 | if !cacheTypeIsKnown { 31 | return config.ErrWriteCacheTypeUnknown 32 | } 33 | 34 | return nil 35 | } 36 | -------------------------------------------------------------------------------- /cmd/stfs/cmd/drive_eject.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "github.com/pojntfx/stfs/pkg/hardware" 5 | "github.com/pojntfx/stfs/pkg/mtio" 6 | "github.com/pojntfx/stfs/pkg/tape" 7 | "github.com/spf13/cobra" 8 | "github.com/spf13/viper" 9 | ) 10 | 11 | var driveEjectCmd = &cobra.Command{ 12 | Use: "eject", 13 | Short: "Eject tape from drive", 14 | RunE: func(cmd *cobra.Command, args []string) error { 15 | if err := viper.BindPFlags(cmd.PersistentFlags()); err != nil { 16 | return err 17 | } 18 | 19 | reader, _, err := tape.OpenTapeReadOnly( 20 | viper.GetString(driveFlag), 21 | ) 22 | if err != nil { 23 | return nil 24 | } 25 | defer reader.Close() 26 | 27 | return hardware.Eject( 28 | mtio.MagneticTapeIO{}, 29 | reader.Fd(), 30 | ) 31 | }, 32 | } 33 | 34 | func init() { 35 | viper.AutomaticEnv() 36 | 37 | driveCmd.AddCommand(driveEjectCmd) 38 | } 39 | -------------------------------------------------------------------------------- /internal/check/compression.go: -------------------------------------------------------------------------------- 1 | package check 2 | 3 | import ( 4 | "github.com/pojntfx/stfs/pkg/config" 5 | ) 6 | 7 | func CheckCompressionFormat(compressionFormat string) error { 8 | compressionFormatIsKnown := false 9 | 10 | for _, candidate := range config.KnownCompressionFormats { 11 | if compressionFormat == candidate { 12 | compressionFormatIsKnown = true 13 | } 14 | } 15 | 16 | if !compressionFormatIsKnown { 17 | return config.ErrCompressionFormatUnknown 18 | } 19 | 20 | return nil 21 | } 22 | 23 | func CheckCompressionLevel(compressionLevel string) error { 24 | compressionLevelIsKnown := false 25 | 26 | for _, candidate := range config.KnownCompressionLevels { 27 | if compressionLevel == candidate { 28 | compressionLevelIsKnown = true 29 | } 30 | } 31 | 32 | if !compressionLevelIsKnown { 33 | return config.ErrCompressionLevelUnknown 34 | } 35 | 36 | return nil 37 | } 38 | -------------------------------------------------------------------------------- /pkg/inventory/list.go: -------------------------------------------------------------------------------- 1 | package inventory 2 | 3 | import ( 4 | "archive/tar" 5 | "context" 6 | 7 | "github.com/pojntfx/stfs/internal/converters" 8 | "github.com/pojntfx/stfs/pkg/config" 9 | ) 10 | 11 | func List( 12 | metadata config.MetadataConfig, 13 | 14 | name string, 15 | limit int, 16 | 17 | onHeader func(hdr *config.Header), 18 | ) ([]*tar.Header, error) { 19 | dbHdrs, err := metadata.Metadata.GetHeaderDirectChildren(context.Background(), name, limit) 20 | if err != nil { 21 | return []*tar.Header{}, err 22 | } 23 | 24 | headers := []*tar.Header{} 25 | for _, dbhdr := range dbHdrs { 26 | hdr, err := converters.DBHeaderToTarHeader(converters.ConfigHeaderToDBHeader(dbhdr)) 27 | if err != nil { 28 | return []*tar.Header{}, err 29 | } 30 | 31 | if onHeader != nil { 32 | onHeader(dbhdr) 33 | } 34 | 35 | headers = append(headers, hdr) 36 | } 37 | 38 | return headers, nil 39 | } 40 | -------------------------------------------------------------------------------- /pkg/inventory/find.go: -------------------------------------------------------------------------------- 1 | package inventory 2 | 3 | import ( 4 | "archive/tar" 5 | "context" 6 | "regexp" 7 | 8 | "github.com/pojntfx/stfs/internal/converters" 9 | "github.com/pojntfx/stfs/pkg/config" 10 | ) 11 | 12 | func Find( 13 | metadata config.MetadataConfig, 14 | 15 | expression string, 16 | 17 | onHeader func(hdr *config.Header), 18 | ) ([]*tar.Header, error) { 19 | dbHdrs, err := metadata.Metadata.GetHeaders(context.Background()) 20 | if err != nil { 21 | return []*tar.Header{}, err 22 | } 23 | 24 | headers := []*tar.Header{} 25 | for _, dbhdr := range dbHdrs { 26 | if regexp.MustCompile(expression).Match([]byte(dbhdr.Name)) { 27 | hdr, err := converters.DBHeaderToTarHeader(converters.ConfigHeaderToDBHeader(dbhdr)) 28 | if err != nil { 29 | return []*tar.Header{}, err 30 | } 31 | 32 | if onHeader != nil { 33 | onHeader(dbhdr) 34 | } 35 | 36 | headers = append(headers, hdr) 37 | } 38 | } 39 | 40 | return headers, nil 41 | } 42 | -------------------------------------------------------------------------------- /pkg/fs/stat_non_unix.go: -------------------------------------------------------------------------------- 1 | //go:build !linux 2 | 3 | package fs 4 | 5 | // From the Go stdlib for linux/amd64 6 | type Timespec struct { 7 | Sec int64 8 | Nsec int64 9 | } 10 | 11 | func (ts *Timespec) Unix() (sec int64, nsec int64) { 12 | return int64(ts.Sec), int64(ts.Nsec) 13 | } 14 | 15 | func (ts *Timespec) Nano() int64 { 16 | return int64(ts.Sec)*1e9 + int64(ts.Nsec) 17 | } 18 | 19 | func NsecToTimespec(nsec int64) Timespec { 20 | sec := nsec / 1e9 21 | nsec = nsec % 1e9 22 | if nsec < 0 { 23 | nsec += 1e9 24 | sec-- 25 | } 26 | 27 | return Timespec{Sec: sec, Nsec: nsec} 28 | } 29 | 30 | type Stat struct { 31 | Uid uint32 32 | Gid uint32 33 | Mtim Timespec 34 | Atim Timespec 35 | Ctim Timespec 36 | } 37 | 38 | func NewStat( 39 | uid uint32, 40 | gid uint32, 41 | mtim int64, 42 | atim int64, 43 | ctim int64, 44 | ) *Stat { 45 | return &Stat{ 46 | Uid: uid, 47 | Gid: gid, 48 | Mtim: NsecToTimespec(mtim), 49 | Atim: NsecToTimespec(atim), 50 | Ctim: NsecToTimespec(ctim), 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /cmd/stfs/cmd/drive_tell.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/pojntfx/stfs/pkg/hardware" 7 | "github.com/pojntfx/stfs/pkg/mtio" 8 | "github.com/pojntfx/stfs/pkg/tape" 9 | "github.com/spf13/cobra" 10 | "github.com/spf13/viper" 11 | ) 12 | 13 | var driveTellCmd = &cobra.Command{ 14 | Use: "tell", 15 | Short: "Get the current record on the tape", 16 | RunE: func(cmd *cobra.Command, args []string) error { 17 | if err := viper.BindPFlags(cmd.PersistentFlags()); err != nil { 18 | return err 19 | } 20 | 21 | reader, _, err := tape.OpenTapeReadOnly( 22 | viper.GetString(driveFlag), 23 | ) 24 | if err != nil { 25 | return nil 26 | } 27 | defer reader.Close() 28 | 29 | currentRecord, err := hardware.Tell( 30 | mtio.MagneticTapeIO{}, 31 | reader.Fd(), 32 | ) 33 | if err != nil { 34 | return err 35 | } 36 | 37 | fmt.Println(currentRecord) 38 | 39 | return nil 40 | }, 41 | } 42 | 43 | func init() { 44 | viper.AutomaticEnv() 45 | 46 | driveCmd.AddCommand(driveTellCmd) 47 | } 48 | -------------------------------------------------------------------------------- /Hydrunfile: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # Test 6 | if [ "$1" = "test" ]; then 7 | # Configure Git 8 | git config --global --add safe.directory '*' 9 | 10 | # Generate dependencies 11 | make depend 12 | 13 | # Run tests 14 | make test 15 | 16 | exit 0 17 | fi 18 | 19 | # Go 20 | if [ "$1" = "go" ]; then 21 | # Install native dependencies 22 | apt update 23 | apt install -y curl make 24 | 25 | # Install bagop 26 | curl -L -o /tmp/bagop "https://github.com/pojntfx/bagop/releases/latest/download/bagop.linux-$(uname -m)" 27 | install /tmp/bagop /usr/local/bin 28 | 29 | # Configure Git 30 | git config --global --add safe.directory '*' 31 | 32 | # Generate dependencies 33 | make depend 34 | 35 | # Build 36 | CGO_ENABLED=0 bagop -j "$(nproc)" -b "$2" -x '(android/*|ios/*|aix/*|plan9/*|illumos/*|dragonfly/*|netbsd/*|openbsd/*|solaris/*|freebsd/(386|arm|riscv64)|js/wasm|linux/(mips|ppc64|riscv64|loong64)|windows/(arm|386)|wasip1/wasm)' -p 'make build/stfs DST=$DST' -d out 37 | 38 | exit 0 39 | fi 40 | -------------------------------------------------------------------------------- /internal/persisters/sqlite.go: -------------------------------------------------------------------------------- 1 | //go:build (darwin && amd64) || (darwin && arm64) || (freebsd && amd64) || (linux && arm) || (linux && arm64) || (linux && 386) || (linux && amd64) || (linux && s390x) || (windows && amd64) 2 | 3 | package persisters 4 | 5 | import ( 6 | "database/sql" 7 | "os" 8 | "path/filepath" 9 | 10 | migrate "github.com/rubenv/sql-migrate" 11 | _ "modernc.org/sqlite" 12 | ) 13 | 14 | type SQLite struct { 15 | DBPath string 16 | Migrations migrate.MigrationSource 17 | 18 | DB *sql.DB 19 | } 20 | 21 | func (s *SQLite) Open() error { 22 | // Create leading directories for database 23 | if err := os.MkdirAll(filepath.Dir(s.DBPath), os.ModePerm); err != nil { 24 | return err 25 | } 26 | 27 | // Open the DB 28 | db, err := sql.Open("sqlite", s.DBPath) 29 | if err != nil { 30 | return err 31 | } 32 | 33 | // Configure the db 34 | db.SetMaxOpenConns(1) // Prevent "database locked" errors 35 | s.DB = db 36 | 37 | // Run migrations if set 38 | if s.Migrations != nil { 39 | if _, err := migrate.Exec(s.DB, "sqlite3", s.Migrations, migrate.Up); err != nil { 40 | return err 41 | } 42 | } 43 | 44 | return nil 45 | } 46 | -------------------------------------------------------------------------------- /internal/persisters/sqite_cgo.go: -------------------------------------------------------------------------------- 1 | //go:build !((darwin && amd64) || (darwin && arm64) || (freebsd && amd64) || (linux && arm) || (linux && arm64) || (linux && 386) || (linux && amd64) || (linux && s390x) || (windows && amd64)) 2 | 3 | package persisters 4 | 5 | import ( 6 | "database/sql" 7 | "os" 8 | "path/filepath" 9 | 10 | _ "github.com/mattn/go-sqlite3" 11 | migrate "github.com/rubenv/sql-migrate" 12 | ) 13 | 14 | type SQLite struct { 15 | DBPath string 16 | Migrations migrate.MigrationSource 17 | 18 | DB *sql.DB 19 | } 20 | 21 | func (s *SQLite) Open() error { 22 | // Create leading directories for database 23 | if err := os.MkdirAll(filepath.Dir(s.DBPath), os.ModePerm); err != nil { 24 | return err 25 | } 26 | 27 | // Open the DB 28 | db, err := sql.Open("sqlite3", s.DBPath) 29 | if err != nil { 30 | return err 31 | } 32 | 33 | // Configure the db 34 | db.SetMaxOpenConns(1) // Prevent "database locked" errors 35 | s.DB = db 36 | 37 | // Run migrations if set 38 | if s.Migrations != nil { 39 | if _, err := migrate.Exec(s.DB, "sqlite3", s.Migrations, migrate.Up); err != nil { 40 | return err 41 | } 42 | } 43 | 44 | return nil 45 | } 46 | -------------------------------------------------------------------------------- /internal/db/sqlite/models/metadata/boil_queries.go: -------------------------------------------------------------------------------- 1 | // Code generated by SQLBoiler 4.16.2 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT. 2 | // This file is meant to be re-generated in place and/or deleted at any time. 3 | 4 | package models 5 | 6 | import ( 7 | "regexp" 8 | 9 | "github.com/volatiletech/sqlboiler/v4/drivers" 10 | "github.com/volatiletech/sqlboiler/v4/queries" 11 | "github.com/volatiletech/sqlboiler/v4/queries/qm" 12 | ) 13 | 14 | var dialect = drivers.Dialect{ 15 | LQ: 0x22, 16 | RQ: 0x22, 17 | 18 | UseIndexPlaceholders: false, 19 | UseLastInsertID: false, 20 | UseSchema: false, 21 | UseDefaultKeyword: true, 22 | UseAutoColumns: false, 23 | UseTopClause: false, 24 | UseOutputClause: false, 25 | UseCaseWhenExistsClause: false, 26 | } 27 | 28 | // This is a dummy variable to prevent unused regexp import error 29 | var _ = ®exp.Regexp{} 30 | 31 | // NewQuery initializes a new Query using the passed in QueryMods 32 | func NewQuery(mods ...qm.QueryMod) *queries.Query { 33 | q := &queries.Query{} 34 | queries.SetDialect(q, &dialect) 35 | qm.Apply(q, mods...) 36 | 37 | return q 38 | } 39 | -------------------------------------------------------------------------------- /examples/logger.go: -------------------------------------------------------------------------------- 1 | package examples 2 | 3 | import ( 4 | "encoding/json" 5 | "log" 6 | 7 | golog "github.com/fclairamb/go-log" 8 | ) 9 | 10 | type Logger struct { 11 | Verbose bool 12 | } 13 | 14 | func (l Logger) log(level, event string, keyvals ...interface{}) { 15 | k, _ := json.Marshal(keyvals) 16 | 17 | log.Println(level, event, string(k)) 18 | } 19 | 20 | func (l Logger) Trace(event string, keyvals ...interface{}) { 21 | if l.Verbose { 22 | l.log("TRACE", event, keyvals) 23 | } 24 | } 25 | 26 | func (l Logger) Debug(event string, keyvals ...interface{}) { 27 | if l.Verbose { 28 | l.log("DEBUG", event, keyvals) 29 | } 30 | } 31 | 32 | func (l Logger) Info(event string, keyvals ...interface{}) { 33 | l.log("INFO", event, keyvals) 34 | } 35 | 36 | func (l Logger) Warn(event string, keyvals ...interface{}) { 37 | l.log("WARN", event, keyvals) 38 | } 39 | 40 | func (l Logger) Error(event string, keyvals ...interface{}) { 41 | l.log("ERROR", event, keyvals) 42 | } 43 | 44 | func (l Logger) Panic(event string, keyvals ...interface{}) { 45 | l.log("PANIC", event, keyvals) 46 | } 47 | 48 | func (l Logger) With(keyvals ...interface{}) golog.Logger { 49 | return l 50 | } 51 | -------------------------------------------------------------------------------- /pkg/operations/operations.go: -------------------------------------------------------------------------------- 1 | package operations 2 | 3 | import ( 4 | "sync" 5 | 6 | "github.com/pojntfx/stfs/pkg/config" 7 | ) 8 | 9 | type Operations struct { 10 | backend config.BackendConfig 11 | metadata config.MetadataConfig 12 | 13 | pipes config.PipeConfig 14 | crypto config.CryptoConfig 15 | 16 | onHeader func(event *config.HeaderEvent) 17 | 18 | diskOperationLock sync.Mutex 19 | } 20 | 21 | func NewOperations( 22 | backend config.BackendConfig, 23 | metadata config.MetadataConfig, 24 | 25 | pipes config.PipeConfig, 26 | crypto config.CryptoConfig, 27 | 28 | onHeader func(event *config.HeaderEvent), 29 | ) *Operations { 30 | return &Operations{ 31 | backend: backend, 32 | metadata: metadata, 33 | 34 | pipes: pipes, 35 | crypto: crypto, 36 | 37 | onHeader: onHeader, 38 | } 39 | } 40 | 41 | func (o *Operations) GetBackend() config.BackendConfig { 42 | return o.backend 43 | } 44 | 45 | func (o *Operations) GetMetadata() config.MetadataConfig { 46 | return o.metadata 47 | } 48 | 49 | func (o *Operations) GetPipes() config.PipeConfig { 50 | return o.pipes 51 | } 52 | 53 | func (o *Operations) GetCrypto() config.CryptoConfig { 54 | return o.crypto 55 | } 56 | -------------------------------------------------------------------------------- /internal/suffix/add.go: -------------------------------------------------------------------------------- 1 | package suffix 2 | 3 | import "github.com/pojntfx/stfs/pkg/config" 4 | 5 | func AddSuffix(name string, compressionFormat string, encryptionFormat string) (string, error) { 6 | switch compressionFormat { 7 | case config.CompressionFormatGZipKey: 8 | fallthrough 9 | case config.CompressionFormatParallelGZipKey: 10 | name += CompressionFormatGZipSuffix 11 | case config.CompressionFormatLZ4Key: 12 | name += CompressionFormatLZ4Suffix 13 | case config.CompressionFormatZStandardKey: 14 | name += CompressionFormatZStandardSuffix 15 | case config.CompressionFormatBrotliKey: 16 | name += CompressionFormatBrotliSuffix 17 | case config.CompressionFormatBzip2Key: 18 | fallthrough 19 | case config.CompressionFormatBzip2ParallelKey: 20 | name += CompressionFormatBzip2Suffix 21 | case config.NoneKey: 22 | default: 23 | return "", config.ErrCompressionFormatUnsupported 24 | } 25 | 26 | switch encryptionFormat { 27 | case config.EncryptionFormatAgeKey: 28 | name += EncryptionFormatAgeSuffix 29 | case config.EncryptionFormatPGPKey: 30 | name += EncryptionFormatPGPSuffix 31 | case config.NoneKey: 32 | default: 33 | return "", config.ErrEncryptionFormatUnsupported 34 | } 35 | 36 | return name, nil 37 | } 38 | -------------------------------------------------------------------------------- /pkg/keys/recipient.go: -------------------------------------------------------------------------------- 1 | package keys 2 | 3 | import ( 4 | "bytes" 5 | 6 | "aead.dev/minisign" 7 | "filippo.io/age" 8 | "github.com/ProtonMail/go-crypto/openpgp" 9 | "github.com/pojntfx/stfs/pkg/config" 10 | ) 11 | 12 | func ParseRecipient( 13 | encryptionFormat string, 14 | pubkey []byte, 15 | ) (interface{}, error) { 16 | switch encryptionFormat { 17 | case config.EncryptionFormatAgeKey: 18 | return age.ParseX25519Recipient(string(pubkey)) 19 | case config.EncryptionFormatPGPKey: 20 | return openpgp.ReadKeyRing(bytes.NewBuffer(pubkey)) 21 | case config.NoneKey: 22 | return pubkey, nil 23 | default: 24 | return nil, config.ErrEncryptionFormatUnsupported 25 | } 26 | } 27 | 28 | func ParseSignerRecipient( 29 | signatureFormat string, 30 | pubkey []byte, 31 | ) (interface{}, error) { 32 | switch signatureFormat { 33 | case config.SignatureFormatMinisignKey: 34 | var recipient minisign.PublicKey 35 | if err := recipient.UnmarshalText(pubkey); err != nil { 36 | return nil, err 37 | } 38 | 39 | return recipient, nil 40 | case config.SignatureFormatPGPKey: 41 | return ParseRecipient(signatureFormat, pubkey) 42 | case config.NoneKey: 43 | return pubkey, nil 44 | default: 45 | return nil, config.ErrSignatureFormatUnsupported 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /internal/tarext/write.go: -------------------------------------------------------------------------------- 1 | package tarext 2 | 3 | import ( 4 | "archive/tar" 5 | "bufio" 6 | "io" 7 | 8 | "github.com/pojntfx/stfs/internal/ioext" 9 | "github.com/pojntfx/stfs/pkg/config" 10 | ) 11 | 12 | func NewTapeWriter(f io.Writer, isRegular bool, recordSize int) (tw *tar.Writer, cleanup func(dirty *bool) error, err error) { 13 | var bw *bufio.Writer 14 | var counter *ioext.CounterWriter 15 | if isRegular { 16 | tw = tar.NewWriter(f) 17 | } else { 18 | bw = bufio.NewWriterSize(f, config.MagneticTapeBlockSize*recordSize) 19 | counter = &ioext.CounterWriter{Writer: bw, BytesRead: 0} 20 | tw = tar.NewWriter(counter) 21 | } 22 | 23 | return tw, func(dirty *bool) error { 24 | // Only write the trailer if we wrote to the archive 25 | if *dirty { 26 | if err := tw.Close(); err != nil { 27 | return err 28 | } 29 | 30 | if !isRegular { 31 | if config.MagneticTapeBlockSize*recordSize-counter.BytesRead > 0 { 32 | // Fill the rest of the record with zeros 33 | if _, err := bw.Write(make([]byte, config.MagneticTapeBlockSize*recordSize-counter.BytesRead)); err != nil { 34 | return err 35 | } 36 | } 37 | 38 | if err := bw.Flush(); err != nil { 39 | return err 40 | } 41 | } 42 | } 43 | 44 | return nil 45 | }, nil 46 | } 47 | -------------------------------------------------------------------------------- /pkg/cache/filesystem.go: -------------------------------------------------------------------------------- 1 | package cache 2 | 3 | import ( 4 | "os" 5 | "time" 6 | 7 | "github.com/pojntfx/stfs/internal/pathext" 8 | "github.com/pojntfx/stfs/pkg/config" 9 | "github.com/spf13/afero" 10 | ) 11 | 12 | func NewCacheFilesystem( 13 | base afero.Fs, 14 | root string, 15 | cacheType string, 16 | ttl time.Duration, 17 | cacheDir string, 18 | ) (afero.Fs, error) { 19 | switch cacheType { 20 | case config.FileSystemCacheTypeMemory: 21 | if pathext.IsRoot(root, false) { 22 | return afero.NewCacheOnReadFs(base, afero.NewMemMapFs(), ttl), nil 23 | } 24 | 25 | return afero.NewCacheOnReadFs(afero.NewBasePathFs(base, root), afero.NewMemMapFs(), ttl), nil 26 | case config.FileSystemCacheTypeDir: 27 | if err := os.RemoveAll(cacheDir); err != nil { 28 | return nil, err 29 | } 30 | 31 | if err := os.MkdirAll(cacheDir, os.ModePerm); err != nil { 32 | return nil, err 33 | } 34 | 35 | if pathext.IsRoot(root, false) { 36 | return afero.NewCacheOnReadFs(base, afero.NewBasePathFs(afero.NewOsFs(), cacheDir), ttl), nil 37 | } 38 | 39 | return afero.NewCacheOnReadFs(afero.NewBasePathFs(base, root), afero.NewBasePathFs(afero.NewOsFs(), cacheDir), ttl), nil 40 | case config.NoneKey: 41 | if pathext.IsRoot(root, false) { 42 | return base, nil 43 | } 44 | 45 | return afero.NewBasePathFs(base, root), nil 46 | default: 47 | return nil, config.ErrFileSystemCacheTypeUnsupported 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /cmd/stfs/cmd/inventory_find.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "github.com/pojntfx/stfs/internal/logging" 5 | "github.com/pojntfx/stfs/pkg/config" 6 | "github.com/pojntfx/stfs/pkg/inventory" 7 | "github.com/pojntfx/stfs/pkg/persisters" 8 | "github.com/spf13/cobra" 9 | "github.com/spf13/viper" 10 | ) 11 | 12 | const ( 13 | expressionFlag = "expression" 14 | ) 15 | 16 | var inventoryFindCmd = &cobra.Command{ 17 | Use: "find", 18 | Aliases: []string{"fin", "f"}, 19 | Short: "Find a file or directory on tape or tar file by matching against a regex", 20 | RunE: func(cmd *cobra.Command, args []string) error { 21 | if err := viper.BindPFlags(cmd.PersistentFlags()); err != nil { 22 | return err 23 | } 24 | 25 | metadataPersister := persisters.NewMetadataPersister(viper.GetString(metadataFlag)) 26 | if err := metadataPersister.Open(); err != nil { 27 | return err 28 | } 29 | 30 | if _, err := inventory.Find( 31 | config.MetadataConfig{ 32 | Metadata: metadataPersister, 33 | }, 34 | 35 | viper.GetString(expressionFlag), 36 | 37 | logging.NewCSVLogger().PrintHeader, 38 | ); err != nil { 39 | return err 40 | } 41 | 42 | return nil 43 | }, 44 | } 45 | 46 | func init() { 47 | inventoryFindCmd.PersistentFlags().StringP(expressionFlag, "x", "", "Regex to match the file/directory name against") 48 | 49 | viper.AutomaticEnv() 50 | 51 | inventoryCmd.AddCommand(inventoryFindCmd) 52 | } 53 | -------------------------------------------------------------------------------- /internal/ioext/counter.go: -------------------------------------------------------------------------------- 1 | package ioext 2 | 3 | import "io" 4 | 5 | type CounterReader struct { 6 | Reader io.Reader 7 | 8 | BytesRead int 9 | } 10 | 11 | func (r *CounterReader) Read(p []byte) (n int, err error) { 12 | n, err = r.Reader.Read(p) 13 | 14 | r.BytesRead += n 15 | 16 | return n, err 17 | } 18 | 19 | type CounterReadCloser struct { 20 | Reader io.ReadCloser 21 | 22 | BytesRead int 23 | } 24 | 25 | func (r *CounterReadCloser) Read(p []byte) (n int, err error) { 26 | n, err = r.Reader.Read(p) 27 | 28 | r.BytesRead += n 29 | 30 | return n, err 31 | } 32 | 33 | func (r *CounterReadCloser) Close() error { 34 | return r.Reader.Close() 35 | } 36 | 37 | type CounterReadSeekCloser struct { 38 | Reader io.ReadSeekCloser 39 | 40 | BytesRead int 41 | } 42 | 43 | func (r *CounterReadSeekCloser) Read(p []byte) (n int, err error) { 44 | n, err = r.Reader.Read(p) 45 | 46 | r.BytesRead += n 47 | 48 | return n, err 49 | } 50 | 51 | func (r *CounterReadSeekCloser) Close() error { 52 | return r.Reader.Close() 53 | } 54 | 55 | func (r *CounterReadSeekCloser) Seek(offset int64, whence int) (int64, error) { 56 | return r.Reader.Seek(offset, whence) 57 | } 58 | 59 | type CounterWriter struct { 60 | Writer io.Writer 61 | 62 | BytesRead int 63 | } 64 | 65 | func (w *CounterWriter) Write(p []byte) (n int, err error) { 66 | n, err = w.Writer.Write(p) 67 | 68 | w.BytesRead += n 69 | 70 | return n, err 71 | } 72 | -------------------------------------------------------------------------------- /internal/suffix/remove.go: -------------------------------------------------------------------------------- 1 | package suffix 2 | 3 | import ( 4 | "strings" 5 | 6 | "github.com/pojntfx/stfs/pkg/config" 7 | ) 8 | 9 | func RemoveSuffix(name string, compressionFormat string, encryptionFormat string) (string, error) { 10 | switch encryptionFormat { 11 | case config.EncryptionFormatAgeKey: 12 | name = strings.TrimSuffix(name, EncryptionFormatAgeSuffix) 13 | case config.EncryptionFormatPGPKey: 14 | name = strings.TrimSuffix(name, EncryptionFormatPGPSuffix) 15 | case config.NoneKey: 16 | default: 17 | return "", config.ErrEncryptionFormatUnsupported 18 | } 19 | 20 | switch compressionFormat { 21 | case config.CompressionFormatGZipKey: 22 | fallthrough 23 | case config.CompressionFormatParallelGZipKey: 24 | name = strings.TrimSuffix(name, CompressionFormatGZipSuffix) 25 | case config.CompressionFormatLZ4Key: 26 | name = strings.TrimSuffix(name, CompressionFormatLZ4Suffix) 27 | case config.CompressionFormatZStandardKey: 28 | name = strings.TrimSuffix(name, CompressionFormatZStandardSuffix) 29 | case config.CompressionFormatBrotliKey: 30 | name = strings.TrimSuffix(name, CompressionFormatBrotliSuffix) 31 | case config.CompressionFormatBzip2Key: 32 | fallthrough 33 | case config.CompressionFormatBzip2ParallelKey: 34 | name = strings.TrimSuffix(name, CompressionFormatBzip2Suffix) 35 | case config.NoneKey: 36 | default: 37 | return "", config.ErrCompressionFormatUnsupported 38 | } 39 | 40 | return name, nil 41 | } 42 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # Public variables 2 | DESTDIR ?= 3 | PREFIX ?= /usr/local 4 | OUTPUT_DIR ?= out 5 | DST ?= 6 | 7 | # Private variables 8 | obj = stfs 9 | all: $(addprefix build/,$(obj)) 10 | 11 | # Build 12 | build: $(addprefix build/,$(obj)) 13 | $(addprefix build/,$(obj)): 14 | ifdef DST 15 | go build -o $(DST) ./cmd/$(subst build/,,$@) 16 | else 17 | go build -o $(OUTPUT_DIR)/$(subst build/,,$@) ./cmd/$(subst build/,,$@) 18 | endif 19 | 20 | # Install 21 | install: $(addprefix install/,$(obj)) 22 | $(addprefix install/,$(obj)): 23 | install -D -m 0755 $(OUTPUT_DIR)/$(subst install/,,$@) $(DESTDIR)$(PREFIX)/bin/$(subst install/,,$@) 24 | 25 | # Uninstall 26 | uninstall: $(addprefix uninstall/,$(obj)) 27 | $(addprefix uninstall/,$(obj)): 28 | rm $(DESTDIR)$(PREFIX)/bin/$(subst uninstall/,,$@) 29 | 30 | # Run 31 | $(addprefix run/,$(obj)): 32 | $(subst run/,,$@) $(ARGS) 33 | 34 | # Test 35 | test: 36 | go test -timeout 3600s -parallel $(shell nproc) ./... 37 | 38 | # Benchmark 39 | benchmark: 40 | go test -timeout 3600s -bench=./... ./... 41 | 42 | # Clean 43 | clean: 44 | rm -rf out internal/db 45 | 46 | # Dependencies 47 | depend: 48 | go install github.com/rubenv/sql-migrate/sql-migrate@latest 49 | go install github.com/volatiletech/sqlboiler/v4@latest 50 | go install github.com/volatiletech/sqlboiler-sqlite3@latest 51 | go install github.com/jteeuwen/go-bindata/go-bindata@latest 52 | sql-migrate up -env="production" -config configs/sql-migrate/metadata.yaml 53 | 54 | go generate ./... -------------------------------------------------------------------------------- /cmd/stfs/cmd/inventory_stat.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "github.com/pojntfx/stfs/internal/logging" 5 | "github.com/pojntfx/stfs/pkg/config" 6 | "github.com/pojntfx/stfs/pkg/inventory" 7 | "github.com/pojntfx/stfs/pkg/persisters" 8 | "github.com/spf13/cobra" 9 | "github.com/spf13/viper" 10 | ) 11 | 12 | const ( 13 | linkFlag = "link" 14 | ) 15 | 16 | var inventoryStatCmd = &cobra.Command{ 17 | Use: "stat", 18 | Aliases: []string{"sta", "s"}, 19 | Short: "Get information on a file or directory on tape or tar file", 20 | RunE: func(cmd *cobra.Command, args []string) error { 21 | if err := viper.BindPFlags(cmd.PersistentFlags()); err != nil { 22 | return err 23 | } 24 | 25 | metadataPersister := persisters.NewMetadataPersister(viper.GetString(metadataFlag)) 26 | if err := metadataPersister.Open(); err != nil { 27 | return err 28 | } 29 | 30 | if _, err := inventory.Stat( 31 | config.MetadataConfig{ 32 | Metadata: metadataPersister, 33 | }, 34 | 35 | viper.GetString(nameFlag), 36 | viper.GetBool(linkFlag), 37 | 38 | logging.NewCSVLogger().PrintHeader, 39 | ); err != nil { 40 | return err 41 | } 42 | 43 | return nil 44 | }, 45 | } 46 | 47 | func init() { 48 | inventoryStatCmd.PersistentFlags().StringP(nameFlag, "n", "", "File or directory to get info for") 49 | inventoryStatCmd.PersistentFlags().BoolP(linkFlag, "l", false, "Resolve as symlink") 50 | 51 | viper.AutomaticEnv() 52 | 53 | inventoryCmd.AddCommand(inventoryStatCmd) 54 | } 55 | -------------------------------------------------------------------------------- /cmd/stfs/cmd/inventory_list.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "github.com/pojntfx/stfs/internal/logging" 5 | "github.com/pojntfx/stfs/pkg/config" 6 | "github.com/pojntfx/stfs/pkg/inventory" 7 | "github.com/pojntfx/stfs/pkg/persisters" 8 | "github.com/spf13/cobra" 9 | "github.com/spf13/viper" 10 | ) 11 | 12 | const ( 13 | limitFlag = "limit" 14 | ) 15 | 16 | var inventoryListCmd = &cobra.Command{ 17 | Use: "list", 18 | Aliases: []string{"lis", "l", "t", "ls"}, 19 | Short: "List the contents of a directory on tape or tar file", 20 | RunE: func(cmd *cobra.Command, args []string) error { 21 | if err := viper.BindPFlags(cmd.PersistentFlags()); err != nil { 22 | return err 23 | } 24 | 25 | metadataPersister := persisters.NewMetadataPersister(viper.GetString(metadataFlag)) 26 | if err := metadataPersister.Open(); err != nil { 27 | return err 28 | } 29 | 30 | if _, err := inventory.List( 31 | config.MetadataConfig{ 32 | Metadata: metadataPersister, 33 | }, 34 | 35 | viper.GetString(nameFlag), 36 | viper.GetInt(limitFlag), 37 | 38 | logging.NewCSVLogger().PrintHeader, 39 | ); err != nil { 40 | return err 41 | } 42 | 43 | return nil 44 | }, 45 | } 46 | 47 | func init() { 48 | inventoryListCmd.PersistentFlags().StringP(nameFlag, "n", "", "Directory to list the contents of") 49 | inventoryListCmd.PersistentFlags().IntP(limitFlag, "l", -1, "Maximum amount of files to list (-1 lists all)") 50 | 51 | viper.AutomaticEnv() 52 | 53 | inventoryCmd.AddCommand(inventoryListCmd) 54 | } 55 | -------------------------------------------------------------------------------- /internal/ftp/server.go: -------------------------------------------------------------------------------- 1 | package ftp 2 | 3 | import ( 4 | "crypto/tls" 5 | "errors" 6 | "sync" 7 | 8 | ftpserver "github.com/fclairamb/ftpserverlib" 9 | "github.com/spf13/afero" 10 | ) 11 | 12 | var ( 13 | ErrNoTLS = errors.New("no TLS supported") 14 | ) 15 | 16 | type FTPServer struct { 17 | Settings *ftpserver.Settings 18 | FileSystem afero.Fs 19 | 20 | clientsLock sync.Mutex 21 | clients []ftpserver.ClientContext 22 | } 23 | 24 | func (driver *FTPServer) GetSettings() (*ftpserver.Settings, error) { 25 | return driver.Settings, nil 26 | } 27 | 28 | func (driver *FTPServer) GetTLSConfig() (*tls.Config, error) { 29 | return nil, ErrNoTLS 30 | } 31 | 32 | func (driver *FTPServer) ClientConnected(cc ftpserver.ClientContext) (string, error) { 33 | driver.clientsLock.Lock() 34 | defer driver.clientsLock.Unlock() 35 | 36 | driver.clients = append(driver.clients, cc) 37 | 38 | return "", nil 39 | } 40 | 41 | func (driver *FTPServer) ClientDisconnected(cc ftpserver.ClientContext) { 42 | driver.clientsLock.Lock() 43 | defer driver.clientsLock.Unlock() 44 | 45 | for idx, client := range driver.clients { 46 | if client.ID() == cc.ID() { 47 | lastIdx := len(driver.clients) - 1 48 | driver.clients[idx] = driver.clients[lastIdx] 49 | driver.clients[lastIdx] = nil 50 | driver.clients = driver.clients[:lastIdx] 51 | 52 | return 53 | } 54 | } 55 | } 56 | 57 | func (driver *FTPServer) AuthUser(_ ftpserver.ClientContext, user, pass string) (ftpserver.ClientDriver, error) { 58 | return driver.FileSystem, nil 59 | } 60 | -------------------------------------------------------------------------------- /internal/db/sqlite/models/metadata/boil_types.go: -------------------------------------------------------------------------------- 1 | // Code generated by SQLBoiler 4.16.2 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT. 2 | // This file is meant to be re-generated in place and/or deleted at any time. 3 | 4 | package models 5 | 6 | import ( 7 | "strconv" 8 | 9 | "github.com/friendsofgo/errors" 10 | "github.com/volatiletech/sqlboiler/v4/boil" 11 | "github.com/volatiletech/strmangle" 12 | ) 13 | 14 | // M type is for providing columns and column values to UpdateAll. 15 | type M map[string]interface{} 16 | 17 | // ErrSyncFail occurs during insert when the record could not be retrieved in 18 | // order to populate default value information. This usually happens when LastInsertId 19 | // fails or there was a primary key configuration that was not resolvable. 20 | var ErrSyncFail = errors.New("models: failed to synchronize data after insert") 21 | 22 | type insertCache struct { 23 | query string 24 | retQuery string 25 | valueMapping []uint64 26 | retMapping []uint64 27 | } 28 | 29 | type updateCache struct { 30 | query string 31 | valueMapping []uint64 32 | } 33 | 34 | func makeCacheKey(cols boil.Columns, nzDefaults []string) string { 35 | buf := strmangle.GetBuffer() 36 | 37 | buf.WriteString(strconv.Itoa(cols.Kind)) 38 | for _, w := range cols.Cols { 39 | buf.WriteString(w) 40 | } 41 | 42 | if len(nzDefaults) != 0 { 43 | buf.WriteByte('.') 44 | } 45 | for _, nz := range nzDefaults { 46 | buf.WriteString(nz) 47 | } 48 | 49 | str := buf.String() 50 | strmangle.PutBuffer(buf) 51 | return str 52 | } 53 | -------------------------------------------------------------------------------- /pkg/compression/decompress.go: -------------------------------------------------------------------------------- 1 | package compression 2 | 3 | import ( 4 | "compress/gzip" 5 | "context" 6 | "io" 7 | 8 | "github.com/andybalholm/brotli" 9 | "github.com/cosnicolaou/pbzip2" 10 | "github.com/dsnet/compress/bzip2" 11 | "github.com/klauspost/compress/zstd" 12 | "github.com/klauspost/pgzip" 13 | "github.com/pierrec/lz4/v4" 14 | "github.com/pojntfx/stfs/pkg/config" 15 | ) 16 | 17 | func Decompress( 18 | src io.Reader, 19 | compressionFormat string, 20 | ) (io.ReadCloser, error) { 21 | switch compressionFormat { 22 | case config.CompressionFormatGZipKey: 23 | fallthrough 24 | case config.CompressionFormatParallelGZipKey: 25 | if compressionFormat == config.CompressionFormatGZipKey { 26 | return gzip.NewReader(src) 27 | } 28 | 29 | return pgzip.NewReader(src) 30 | case config.CompressionFormatLZ4Key: 31 | lz := lz4.NewReader(src) 32 | if err := lz.Apply(lz4.ConcurrencyOption(-1)); err != nil { 33 | return nil, err 34 | } 35 | 36 | return io.NopCloser(lz), nil 37 | case config.CompressionFormatZStandardKey: 38 | zz, err := zstd.NewReader(src) 39 | if err != nil { 40 | return nil, err 41 | } 42 | 43 | return io.NopCloser(zz), nil 44 | case config.CompressionFormatBrotliKey: 45 | br := brotli.NewReader(src) 46 | 47 | return io.NopCloser(br), nil 48 | case config.CompressionFormatBzip2Key: 49 | return bzip2.NewReader(src, nil) 50 | case config.CompressionFormatBzip2ParallelKey: 51 | bz := pbzip2.NewReader(context.Background(), src) 52 | 53 | return io.NopCloser(bz), nil 54 | case config.NoneKey: 55 | return io.NopCloser(src), nil 56 | default: 57 | return nil, config.ErrCompressionFormatUnsupported 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /pkg/cache/write.go: -------------------------------------------------------------------------------- 1 | package cache 2 | 3 | import ( 4 | "io/ioutil" 5 | "os" 6 | "path/filepath" 7 | 8 | "github.com/mattetti/filebuffer" 9 | "github.com/pojntfx/stfs/pkg/config" 10 | "github.com/spf13/afero" 11 | ) 12 | 13 | type fileWithSize struct { 14 | afero.File 15 | } 16 | 17 | func (f fileWithSize) Size() (int64, error) { 18 | info, err := f.Stat() 19 | if err != nil { 20 | return -1, err 21 | } 22 | 23 | return info.Size(), nil 24 | } 25 | 26 | type filebufferWithSize struct { 27 | *filebuffer.Buffer 28 | } 29 | 30 | func (f filebufferWithSize) Size() (int64, error) { 31 | return int64(f.Buff.Len()), nil 32 | } 33 | 34 | func (f filebufferWithSize) Sync() error { 35 | // No need to sync a in-memory buffer 36 | return nil 37 | } 38 | 39 | func (f filebufferWithSize) Truncate(size int64) error { 40 | f.Buff.Truncate(int(size)) 41 | 42 | return nil 43 | } 44 | 45 | func NewCacheWrite( 46 | root string, 47 | cacheType string, 48 | ) (cache WriteCache, cleanup func() error, err error) { 49 | switch cacheType { 50 | case config.WriteCacheTypeMemory: 51 | buff := &filebufferWithSize{filebuffer.New([]byte{})} 52 | 53 | return buff, func() error { 54 | buff = nil 55 | 56 | return nil 57 | }, nil 58 | case config.WriteCacheTypeFile: 59 | tmpdir := filepath.Join(root, "io") 60 | 61 | if err := os.MkdirAll(tmpdir, os.ModePerm); err != nil { 62 | return nil, nil, err 63 | } 64 | 65 | f, err := ioutil.TempFile(tmpdir, "*") 66 | if err != nil { 67 | return nil, nil, err 68 | } 69 | 70 | return fileWithSize{f}, func() error { 71 | return os.Remove(f.Name()) 72 | }, nil 73 | default: 74 | return nil, nil, config.ErrWriteCacheTypeUnsupported 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /pkg/inventory/stat.go: -------------------------------------------------------------------------------- 1 | package inventory 2 | 3 | import ( 4 | "archive/tar" 5 | "context" 6 | "database/sql" 7 | "path/filepath" 8 | "strings" 9 | 10 | "github.com/pojntfx/stfs/internal/converters" 11 | "github.com/pojntfx/stfs/pkg/config" 12 | ) 13 | 14 | func Stat( 15 | metadata config.MetadataConfig, 16 | 17 | name string, 18 | symlink bool, 19 | 20 | onHeader func(hdr *config.Header), 21 | ) (*tar.Header, error) { 22 | name = filepath.ToSlash(name) 23 | linkname := filepath.ToSlash(name) 24 | 25 | if symlink { 26 | // Resolve symlink 27 | link, err := metadata.Metadata.GetHeaderByLinkname(context.Background(), name) 28 | if err != nil { 29 | if err == sql.ErrNoRows { 30 | link, err = metadata.Metadata.GetHeaderByLinkname(context.Background(), strings.TrimSuffix(name, "/")+"/") 31 | if err != nil { 32 | return nil, err 33 | } 34 | } else { 35 | return nil, err 36 | } 37 | } 38 | 39 | name = link.Name 40 | linkname = link.Linkname 41 | } 42 | 43 | dbhdr, err := metadata.Metadata.GetHeader(context.Background(), name) 44 | if err != nil { 45 | if err == sql.ErrNoRows { 46 | dbhdr, err = metadata.Metadata.GetHeader(context.Background(), strings.TrimSuffix(name, "/")+"/") 47 | if err != nil { 48 | return nil, err 49 | } 50 | } else { 51 | return nil, err 52 | } 53 | } 54 | 55 | // Prevent returning broken symlinks as headers 56 | if !symlink && dbhdr.Linkname != "" { 57 | return nil, sql.ErrNoRows 58 | } 59 | 60 | if symlink { 61 | dbhdr.Name = linkname 62 | dbhdr.Linkname = name 63 | } 64 | 65 | hdr, err := converters.DBHeaderToTarHeader(converters.ConfigHeaderToDBHeader(dbhdr)) 66 | if err != nil { 67 | return nil, err 68 | } 69 | 70 | if onHeader != nil { 71 | onHeader(dbhdr) 72 | } 73 | 74 | return hdr, nil 75 | } 76 | -------------------------------------------------------------------------------- /pkg/operations/initialize.go: -------------------------------------------------------------------------------- 1 | package operations 2 | 3 | import ( 4 | "archive/tar" 5 | "io" 6 | "os" 7 | "os/user" 8 | "path/filepath" 9 | "strconv" 10 | "time" 11 | 12 | "github.com/pojntfx/stfs/pkg/config" 13 | ) 14 | 15 | func (o *Operations) Initialize( 16 | name string, 17 | perm os.FileMode, 18 | compressionLevel string, 19 | ) error { 20 | o.diskOperationLock.Lock() 21 | defer o.diskOperationLock.Unlock() 22 | 23 | usr, err := user.Current() 24 | if err != nil { 25 | return err 26 | } 27 | 28 | gid, err := strconv.Atoi(usr.Gid) 29 | if err != nil { 30 | // Some OSes like i.e. Windows don't support numeric GIDs, so use 0 instead 31 | gid = 0 32 | } 33 | 34 | uid, err := strconv.Atoi(usr.Uid) 35 | if err != nil { 36 | // Some OSes like i.e. Windows don't support numeric UIDs, so use 0 instead 37 | uid = 0 38 | } 39 | 40 | groups, err := usr.GroupIds() 41 | if err != nil { 42 | return err 43 | } 44 | 45 | gname := "" 46 | if len(groups) >= 1 { 47 | gname = groups[0] 48 | } 49 | 50 | typeflag := tar.TypeDir 51 | 52 | hdr := &tar.Header{ 53 | Typeflag: byte(typeflag), 54 | 55 | Name: name, 56 | 57 | Mode: int64(perm), 58 | Uid: uid, 59 | Gid: gid, 60 | Uname: usr.Username, 61 | Gname: gname, 62 | 63 | ModTime: time.Now(), 64 | } 65 | 66 | done := false 67 | if _, err := o.archive( 68 | func() (config.FileConfig, error) { 69 | // Exit after the first write 70 | if done { 71 | return config.FileConfig{}, io.EOF 72 | } 73 | done = true 74 | 75 | return config.FileConfig{ 76 | GetFile: nil, // Not required as we never replace 77 | Info: hdr.FileInfo(), 78 | Path: filepath.ToSlash(name), 79 | Link: filepath.ToSlash(hdr.Linkname), 80 | }, nil 81 | }, 82 | compressionLevel, 83 | true, 84 | true, 85 | ); err != nil { 86 | return err 87 | } 88 | 89 | return nil 90 | } 91 | -------------------------------------------------------------------------------- /pkg/tape/write.go: -------------------------------------------------------------------------------- 1 | package tape 2 | 3 | import ( 4 | "os" 5 | 6 | "github.com/pojntfx/stfs/pkg/config" 7 | ) 8 | 9 | func OpenTapeWriteOnly( 10 | drive string, 11 | mt config.MagneticTapeIO, 12 | recordSize int, 13 | overwrite bool, 14 | ) (f *os.File, isRegular bool, err error) { 15 | stat, err := os.Stat(drive) 16 | if err == nil { 17 | isRegular = stat.Mode().IsRegular() 18 | } else { 19 | if os.IsNotExist(err) { 20 | isRegular = true 21 | } else { 22 | return nil, false, err 23 | } 24 | } 25 | 26 | if overwrite { 27 | if isRegular { 28 | f, err := os.OpenFile(drive, os.O_WRONLY|os.O_CREATE, 0600) 29 | if err != nil { 30 | return nil, false, err 31 | } 32 | 33 | // Clear the file's content 34 | if err := f.Truncate(0); err != nil { 35 | return nil, false, err 36 | } 37 | 38 | if err := f.Close(); err != nil { 39 | return nil, false, err 40 | } 41 | } else { 42 | f, err := os.OpenFile(drive, os.O_WRONLY, os.ModeCharDevice) 43 | if err != nil { 44 | return nil, false, err 45 | } 46 | 47 | // Seek to the start of the tape 48 | if err := mt.SeekToRecordOnTape(f.Fd(), 0); err != nil { 49 | return nil, false, err 50 | } 51 | 52 | if err := f.Close(); err != nil { 53 | return nil, false, err 54 | } 55 | } 56 | } 57 | 58 | if isRegular { 59 | f, err = os.OpenFile(drive, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0600) 60 | if err != nil { 61 | return nil, false, err 62 | } 63 | 64 | // No need to go to end manually due to `os.O_APPEND` 65 | } else { 66 | f, err = os.OpenFile(drive, os.O_APPEND|os.O_WRONLY, os.ModeCharDevice) 67 | if err != nil { 68 | return nil, false, err 69 | } 70 | 71 | if !overwrite { 72 | // Go to end of tape 73 | if err := mt.GoToEndOfTape(f.Fd()); err != nil { 74 | return nil, false, err 75 | } 76 | } 77 | } 78 | 79 | return f, isRegular, nil 80 | } 81 | -------------------------------------------------------------------------------- /internal/db/sqlite/models/metadata/sqlite_upsert.go: -------------------------------------------------------------------------------- 1 | // Code generated by SQLBoiler 4.16.2 (https://github.com/volatiletech/sqlboiler). DO NOT EDIT. 2 | // This file is meant to be re-generated in place and/or deleted at any time. 3 | 4 | package models 5 | 6 | import ( 7 | "fmt" 8 | "strings" 9 | 10 | "github.com/volatiletech/sqlboiler/v4/drivers" 11 | "github.com/volatiletech/strmangle" 12 | ) 13 | 14 | // buildUpsertQuerySQLite builds a SQL statement string using the upsertData provided. 15 | func buildUpsertQuerySQLite(dia drivers.Dialect, tableName string, updateOnConflict bool, ret, update, conflict, whitelist []string) string { 16 | conflict = strmangle.IdentQuoteSlice(dia.LQ, dia.RQ, conflict) 17 | whitelist = strmangle.IdentQuoteSlice(dia.LQ, dia.RQ, whitelist) 18 | ret = strmangle.IdentQuoteSlice(dia.LQ, dia.RQ, ret) 19 | 20 | buf := strmangle.GetBuffer() 21 | defer strmangle.PutBuffer(buf) 22 | 23 | columns := "DEFAULT VALUES" 24 | if len(whitelist) != 0 { 25 | columns = fmt.Sprintf("(%s) VALUES (%s)", 26 | strings.Join(whitelist, ", "), 27 | strmangle.Placeholders(dia.UseIndexPlaceholders, len(whitelist), 1, 1)) 28 | } 29 | 30 | fmt.Fprintf( 31 | buf, 32 | "INSERT INTO %s %s ON CONFLICT ", 33 | tableName, 34 | columns, 35 | ) 36 | 37 | if !updateOnConflict || len(update) == 0 { 38 | buf.WriteString("DO NOTHING") 39 | } else { 40 | buf.WriteByte('(') 41 | buf.WriteString(strings.Join(conflict, ", ")) 42 | buf.WriteString(") DO UPDATE SET ") 43 | 44 | for i, v := range update { 45 | if i != 0 { 46 | buf.WriteByte(',') 47 | } 48 | quoted := strmangle.IdentQuote(dia.LQ, dia.RQ, v) 49 | buf.WriteString(quoted) 50 | buf.WriteString(" = EXCLUDED.") 51 | buf.WriteString(quoted) 52 | } 53 | } 54 | 55 | if len(ret) != 0 { 56 | buf.WriteString(" RETURNING ") 57 | buf.WriteString(strings.Join(ret, ", ")) 58 | } 59 | 60 | return buf.String() 61 | } 62 | -------------------------------------------------------------------------------- /cmd/stfs/cmd/keygen.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "io/ioutil" 5 | "os" 6 | "path/filepath" 7 | 8 | "github.com/pojntfx/stfs/pkg/config" 9 | "github.com/pojntfx/stfs/pkg/utility" 10 | "github.com/spf13/cobra" 11 | "github.com/spf13/viper" 12 | ) 13 | 14 | var keygenCmd = &cobra.Command{ 15 | Use: "keygen", 16 | Aliases: []string{"key", "k"}, 17 | Short: "Generate a encryption or signature key", 18 | RunE: func(cmd *cobra.Command, args []string) error { 19 | if err := viper.BindPFlags(cmd.PersistentFlags()); err != nil { 20 | return err 21 | } 22 | 23 | privkey, pubkey, err := utility.Keygen( 24 | config.PipeConfig{ 25 | Compression: viper.GetString(compressionFlag), 26 | Encryption: viper.GetString(encryptionFlag), 27 | Signature: viper.GetString(signatureFlag), 28 | }, 29 | config.PasswordConfig{ 30 | Password: viper.GetString(passwordFlag), 31 | }, 32 | ) 33 | if err != nil { 34 | return err 35 | } 36 | 37 | // Write pubkey (read/writable by everyone) 38 | if err := os.MkdirAll(filepath.Dir(viper.GetString(recipientFlag)), os.ModePerm); err != nil { 39 | return err 40 | } 41 | 42 | if err := ioutil.WriteFile(viper.GetString(recipientFlag), pubkey, os.ModePerm); err != nil { 43 | return err 44 | } 45 | 46 | // Write privkey (read/writable only by the owner) 47 | if err := os.MkdirAll(filepath.Dir(viper.GetString(identityFlag)), 0700); err != nil { 48 | return err 49 | } 50 | 51 | return ioutil.WriteFile(viper.GetString(identityFlag), privkey, 0600) 52 | }, 53 | } 54 | 55 | func init() { 56 | keygenCmd.PersistentFlags().StringP(recipientFlag, "r", "", "Path to write the public key to") 57 | keygenCmd.PersistentFlags().StringP(identityFlag, "i", "", "Path to write the private key to") 58 | keygenCmd.PersistentFlags().StringP(passwordFlag, "p", "", "Password to protect the private key with") 59 | 60 | viper.AutomaticEnv() 61 | 62 | rootCmd.AddCommand(keygenCmd) 63 | } 64 | -------------------------------------------------------------------------------- /pkg/config/constants.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | const ( 4 | NoneKey = "" 5 | 6 | CompressionFormatGZipKey = "gzip" 7 | CompressionFormatParallelGZipKey = "parallelgzip" 8 | CompressionFormatLZ4Key = "lz4" 9 | CompressionFormatZStandardKey = "zstandard" 10 | CompressionFormatBrotliKey = "brotli" 11 | CompressionFormatBzip2Key = "bzip2" 12 | CompressionFormatBzip2ParallelKey = "parallelbzip2" 13 | 14 | EncryptionFormatAgeKey = "age" 15 | EncryptionFormatPGPKey = "pgp" 16 | 17 | SignatureFormatMinisignKey = "minisign" 18 | SignatureFormatPGPKey = "pgp" 19 | 20 | CompressionLevelFastestKey = "fastest" 21 | CompressionLevelBalancedKey = "balanced" 22 | CompressionLevelSmallestKey = "smallest" 23 | 24 | HeaderEventTypeArchive = "archive" 25 | HeaderEventTypeDelete = "delete" 26 | HeaderEventTypeMove = "move" 27 | HeaderEventTypeRestore = "restore" 28 | HeaderEventTypeUpdate = "update" 29 | 30 | FileSystemNameSTFS = "STFS" 31 | 32 | FileSystemCacheTypeMemory = "memory" 33 | FileSystemCacheTypeDir = "dir" 34 | 35 | WriteCacheTypeMemory = "memory" 36 | WriteCacheTypeFile = "file" 37 | 38 | MagneticTapeBlockSize = 512 39 | ) 40 | 41 | var ( 42 | KnownCompressionLevels = []string{CompressionLevelFastestKey, CompressionLevelBalancedKey, CompressionLevelSmallestKey} 43 | 44 | KnownCompressionFormats = []string{NoneKey, CompressionFormatGZipKey, CompressionFormatParallelGZipKey, CompressionFormatLZ4Key, CompressionFormatZStandardKey, CompressionFormatBrotliKey, CompressionFormatBzip2Key, CompressionFormatBzip2ParallelKey} 45 | 46 | KnownEncryptionFormats = []string{NoneKey, EncryptionFormatAgeKey, EncryptionFormatPGPKey} 47 | 48 | KnownSignatureFormats = []string{NoneKey, SignatureFormatMinisignKey, SignatureFormatPGPKey} 49 | 50 | KnownFileSystemCacheTypes = []string{NoneKey, FileSystemCacheTypeMemory, FileSystemCacheTypeDir} 51 | 52 | KnownWriteCacheTypes = []string{WriteCacheTypeMemory, WriteCacheTypeFile} 53 | ) 54 | -------------------------------------------------------------------------------- /pkg/keys/identity.go: -------------------------------------------------------------------------------- 1 | package keys 2 | 3 | import ( 4 | "bytes" 5 | "io" 6 | 7 | "aead.dev/minisign" 8 | "filippo.io/age" 9 | "github.com/ProtonMail/go-crypto/openpgp" 10 | "github.com/pojntfx/stfs/pkg/config" 11 | ) 12 | 13 | func ParseIdentity( 14 | encryptionFormat string, 15 | privkey []byte, 16 | password string, 17 | ) (interface{}, error) { 18 | switch encryptionFormat { 19 | case config.EncryptionFormatAgeKey: 20 | if password != "" { 21 | passwordIdentity, err := age.NewScryptIdentity(password) 22 | if err != nil { 23 | return nil, err 24 | } 25 | 26 | r, err := age.Decrypt(bytes.NewBuffer(privkey), passwordIdentity) 27 | if err != nil { 28 | return nil, err 29 | } 30 | 31 | out := &bytes.Buffer{} 32 | if _, err := io.Copy(out, r); err != nil { 33 | return nil, err 34 | } 35 | 36 | privkey = out.Bytes() 37 | } 38 | 39 | return age.ParseX25519Identity(string(privkey)) 40 | case config.EncryptionFormatPGPKey: 41 | identities, err := openpgp.ReadKeyRing(bytes.NewBuffer(privkey)) 42 | if err != nil { 43 | return nil, err 44 | } 45 | 46 | if password != "" { 47 | for _, identity := range identities { 48 | if identity.PrivateKey == nil { 49 | return nil, config.ErrIdentityUnparsable 50 | } 51 | 52 | if err := identity.PrivateKey.Decrypt([]byte(password)); err != nil { 53 | return nil, err 54 | } 55 | 56 | for _, subkey := range identity.Subkeys { 57 | if err := subkey.PrivateKey.Decrypt([]byte(password)); err != nil { 58 | return nil, err 59 | } 60 | } 61 | } 62 | } 63 | 64 | return identities, nil 65 | case config.NoneKey: 66 | return privkey, nil 67 | default: 68 | return nil, config.ErrEncryptionFormatUnsupported 69 | } 70 | } 71 | 72 | func ParseSignerIdentity( 73 | signatureFormat string, 74 | privkey []byte, 75 | password string, 76 | ) (interface{}, error) { 77 | switch signatureFormat { 78 | case config.SignatureFormatMinisignKey: 79 | return minisign.DecryptKey(password, privkey) 80 | case config.SignatureFormatPGPKey: 81 | return ParseIdentity(signatureFormat, privkey, password) 82 | case config.NoneKey: 83 | return privkey, nil 84 | default: 85 | return nil, config.ErrSignatureFormatUnsupported 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /internal/logging/csv.go: -------------------------------------------------------------------------------- 1 | package logging 2 | 3 | import ( 4 | "encoding/csv" 5 | "fmt" 6 | "os" 7 | "time" 8 | 9 | "github.com/pojntfx/stfs/pkg/config" 10 | ) 11 | 12 | var ( 13 | tarHeaderCSV = []string{ 14 | "record", "lastknownrecord", "block", "lastknownblock", "typeflag", "name", "linkname", "size", "mode", "uid", "gid", "uname", "gname", "modtime", "accesstime", "changetime", "devmajor", "devminor", "paxrecords", "format", 15 | } 16 | tarHeaderEventCSV = append([]string{"type", "indexed"}, tarHeaderCSV...) 17 | ) 18 | 19 | func headerToCSV(hdr *config.Header) []string { 20 | return []string{ 21 | fmt.Sprintf("%v", hdr.Record), fmt.Sprintf("%v", hdr.Lastknownrecord), fmt.Sprintf("%v", hdr.Block), fmt.Sprintf("%v", hdr.Lastknownblock), fmt.Sprintf("%v", hdr.Typeflag), hdr.Name, hdr.Linkname, fmt.Sprintf("%v", hdr.Size), fmt.Sprintf("%v", hdr.Mode), fmt.Sprintf("%v", hdr.UID), fmt.Sprintf("%v", hdr.Gid), fmt.Sprintf("%v", hdr.Uname), fmt.Sprintf("%v", hdr.Gname), hdr.Modtime.Format(time.RFC3339), hdr.Accesstime.Format(time.RFC3339), hdr.Changetime.Format(time.RFC3339), fmt.Sprintf("%v", hdr.Devmajor), fmt.Sprintf("%v", hdr.Devminor), fmt.Sprintf("%v", hdr.Paxrecords), fmt.Sprintf("%v", hdr.Format), 22 | } 23 | } 24 | 25 | func headerEventToCSV(event *config.HeaderEvent) []string { 26 | return append([]string{event.Type, fmt.Sprintf("%v", event.Indexed)}, headerToCSV(event.Header)...) 27 | } 28 | 29 | type CSVLogger struct { 30 | n int 31 | } 32 | 33 | func NewCSVLogger() *CSVLogger { 34 | return &CSVLogger{} 35 | } 36 | 37 | func (l *CSVLogger) PrintHeader(hdr *config.Header) { 38 | w := csv.NewWriter(os.Stdout) 39 | 40 | if l.n <= 0 { 41 | _ = w.Write(tarHeaderCSV) // Errors are ignored for compatibility with traditional logging APIs 42 | } 43 | 44 | _ = w.Write(headerToCSV(hdr)) // Errors are ignored for compatibility with traditional logging APIs 45 | 46 | w.Flush() 47 | 48 | l.n++ 49 | } 50 | 51 | func (l *CSVLogger) PrintHeaderEvent(event *config.HeaderEvent) { 52 | w := csv.NewWriter(os.Stdout) 53 | 54 | if l.n <= 0 { 55 | _ = w.Write(tarHeaderEventCSV) // Errors are ignored for compatibility with traditional logging APIs 56 | } 57 | 58 | _ = w.Write(headerEventToCSV(event)) // Errors are ignored for compatibility with traditional logging APIs 59 | 60 | w.Flush() 61 | 62 | l.n++ 63 | } 64 | -------------------------------------------------------------------------------- /internal/logging/json.go: -------------------------------------------------------------------------------- 1 | package logging 2 | 3 | import ( 4 | "bufio" 5 | "encoding/json" 6 | "fmt" 7 | "io" 8 | "os" 9 | "time" 10 | 11 | golog "github.com/fclairamb/go-log" 12 | ) 13 | 14 | type logMessage struct { 15 | Time int64 `json:"time"` 16 | Level string `json:"level"` 17 | Event string `json:"event"` 18 | Data interface{} `json:"data"` 19 | } 20 | 21 | func printJSON(level string, event string, keyvals interface{}) { 22 | line, _ := json.Marshal(&logMessage{ 23 | Time: time.Now().Unix(), 24 | Level: level, 25 | Event: event, 26 | Data: keyvals, 27 | }) // We are ignoring JSON marshalling erorrs 28 | 29 | _, _ = fmt.Fprintln(os.Stderr, string(line)) // We are ignoring printing errors in line wih the stdlib 30 | } 31 | 32 | type JSONLogger struct { 33 | verbosity int 34 | } 35 | 36 | func NewJSONLogger(verbosity int) *JSONLogger { 37 | return &JSONLogger{ 38 | verbosity: verbosity, 39 | } 40 | } 41 | 42 | func NewJSONLoggerWriter(verbosity int, event, key string) io.Writer { 43 | jsonLogger := NewJSONLogger(verbosity) 44 | 45 | reader, writer := io.Pipe() 46 | scanner := bufio.NewScanner(reader) 47 | go func() { 48 | for scanner.Scan() { 49 | jsonLogger.Trace(event, map[string]interface{}{ 50 | key: scanner.Text(), 51 | }) 52 | } 53 | }() 54 | 55 | return writer 56 | } 57 | 58 | func (l JSONLogger) Trace(event string, keyvals ...interface{}) { 59 | if l.verbosity >= 4 { 60 | printJSON("TRACE", event, keyvals) 61 | } 62 | } 63 | 64 | func (l JSONLogger) Debug(event string, keyvals ...interface{}) { 65 | if l.verbosity >= 3 { 66 | printJSON("DEBUG", event, keyvals) 67 | } 68 | } 69 | 70 | func (l JSONLogger) Info(event string, keyvals ...interface{}) { 71 | if l.verbosity >= 2 { 72 | printJSON("INFO", event, keyvals) 73 | } 74 | } 75 | 76 | func (l JSONLogger) Warn(event string, keyvals ...interface{}) { 77 | if l.verbosity >= 1 { 78 | printJSON("WARN", event, keyvals) 79 | } 80 | } 81 | 82 | func (l JSONLogger) Error(event string, keyvals ...interface{}) { 83 | if l.verbosity >= 0 { 84 | printJSON("ERROR", event, keyvals) 85 | } 86 | } 87 | 88 | func (l JSONLogger) Panic(event string, keyvals ...interface{}) { 89 | if l.verbosity >= 0 { 90 | printJSON("PANIC", event, keyvals) 91 | } 92 | } 93 | 94 | func (l JSONLogger) With(keyvals ...interface{}) golog.Logger { 95 | return l 96 | } 97 | -------------------------------------------------------------------------------- /pkg/config/error.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import "errors" 4 | 5 | var ( 6 | ErrEncryptionFormatUnknown = errors.New("encryption format unknown") 7 | ErrSignatureFormatUnknown = errors.New("signature format unknown") 8 | ErrCompressionFormatUnknown = errors.New("compression format unknown") 9 | 10 | ErrEncryptionFormatUnsupported = errors.New("encryption format unsupported") 11 | ErrSignatureFormatUnsupported = errors.New("signature format unsupported") 12 | ErrCompressionFormatUnsupported = errors.New("compression format unsupported") 13 | 14 | ErrSignatureFormatRegularOnly = errors.New("signature format only supports regular files, not tape drives") 15 | ErrSignatureInvalid = errors.New("signature invalid") 16 | ErrSignatureMissing = errors.New("signature missing") 17 | 18 | ErrCompressionFormatRegularOnly = errors.New("compression format only supports regular files, not tape drives") 19 | ErrCompressionFormatRequiresLargerRecordSize = errors.New("compression format requires larger record size") 20 | ErrCompressionLevelUnsupported = errors.New("compression level unsupported") 21 | ErrCompressionLevelUnknown = errors.New("compression level unknown") 22 | 23 | ErrIdentityUnparsable = errors.New("identity could not be parsed") 24 | ErrRecipientUnparsable = errors.New("recipient could not be parsed") 25 | 26 | ErrKeygenFormatUnsupported = errors.New("key generation for format unsupported") 27 | 28 | ErrTarHeaderMissing = errors.New("tar header missing") 29 | ErrTarHeaderEmbeddedMissing = errors.New("embedded tar header missing") 30 | 31 | ErrTapeDrivesUnsupported = errors.New("system unsupported for tape drives") 32 | 33 | ErrSTFSVersionUnsupported = errors.New("STFS version unsupported") 34 | ErrSTFSActionUnsupported = errors.New("STFS action unsupported") 35 | 36 | ErrNotImplemented = errors.New("not implemented") 37 | ErrIsDirectory = errors.New("is a directory") 38 | ErrIsFile = errors.New("is a file") 39 | 40 | ErrFileSystemCacheTypeUnsupported = errors.New("file system cache type unsupported") 41 | ErrFileSystemCacheTypeUnknown = errors.New("file system cache type unknown") 42 | 43 | ErrWriteCacheTypeUnsupported = errors.New("write cache type unsupported") 44 | ErrWriteCacheTypeUnknown = errors.New("write cache type unknown") 45 | 46 | ErrNoRootDirectory = errors.New("root directory could not be found") 47 | ErrDirectoryNotEmpty = errors.New("directory not empty") 48 | ) 49 | -------------------------------------------------------------------------------- /pkg/tape/manager.go: -------------------------------------------------------------------------------- 1 | package tape 2 | 3 | import ( 4 | "io" 5 | "os" 6 | "sync" 7 | 8 | "github.com/pojntfx/stfs/pkg/config" 9 | ) 10 | 11 | type TapeManager struct { 12 | drive string 13 | mt config.MagneticTapeIO 14 | recordSize int 15 | overwrite bool 16 | 17 | physicalLock sync.Mutex 18 | 19 | readerLock sync.Mutex 20 | reader *os.File 21 | readerIsRegular bool 22 | 23 | closer func() error 24 | 25 | overwrote bool 26 | } 27 | 28 | func NewTapeManager( 29 | drive string, 30 | mt config.MagneticTapeIO, 31 | recordSize int, 32 | overwrite bool, 33 | ) *TapeManager { 34 | return &TapeManager{ 35 | drive: drive, 36 | mt: mt, 37 | recordSize: recordSize, 38 | overwrite: overwrite, 39 | } 40 | } 41 | 42 | func (m *TapeManager) GetWriter() (config.DriveWriterConfig, error) { 43 | m.physicalLock.Lock() 44 | 45 | overwrite := m.overwrite 46 | if m.overwrote { 47 | overwrite = false 48 | } 49 | m.overwrote = true 50 | 51 | writer, writerIsRegular, err := OpenTapeWriteOnly( 52 | m.drive, 53 | m.mt, 54 | m.recordSize, 55 | overwrite, 56 | ) 57 | if err != nil { 58 | return config.DriveWriterConfig{}, err 59 | } 60 | 61 | m.closer = writer.Close 62 | 63 | return config.DriveWriterConfig{ 64 | Drive: writer, 65 | DriveIsRegular: writerIsRegular, 66 | }, nil 67 | } 68 | 69 | func (m *TapeManager) GetReader() (config.DriveReaderConfig, error) { 70 | if err := m.openOrReuseReader(); err != nil { 71 | return config.DriveReaderConfig{}, err 72 | } 73 | 74 | return config.DriveReaderConfig{ 75 | Drive: m.reader, 76 | DriveIsRegular: m.readerIsRegular, 77 | }, nil 78 | } 79 | 80 | func (m *TapeManager) Close() error { 81 | if m.closer != nil { 82 | if err := m.closer(); err != nil { 83 | return err 84 | } 85 | } 86 | 87 | m.physicalLock.Unlock() 88 | 89 | return nil 90 | } 91 | 92 | func (m *TapeManager) openOrReuseReader() error { 93 | m.readerLock.Lock() 94 | defer m.readerLock.Unlock() 95 | 96 | reopen := false 97 | if m.reader == nil { 98 | reopen = true 99 | } else if _, err := m.reader.Seek(0, io.SeekCurrent); err != nil { 100 | // File is closed 101 | reopen = true 102 | } 103 | 104 | if reopen { 105 | m.physicalLock.Lock() 106 | 107 | r, rr, err := OpenTapeReadOnly(m.drive) 108 | if err != nil { 109 | return err 110 | } 111 | 112 | m.reader = r 113 | m.readerIsRegular = rr 114 | 115 | m.closer = r.Close 116 | } 117 | 118 | return nil 119 | } 120 | -------------------------------------------------------------------------------- /pkg/operations/restore.go: -------------------------------------------------------------------------------- 1 | package operations 2 | 3 | import ( 4 | "archive/tar" 5 | "context" 6 | "database/sql" 7 | "io" 8 | "io/fs" 9 | "path" 10 | "path/filepath" 11 | "strings" 12 | 13 | "github.com/pojntfx/stfs/pkg/config" 14 | "github.com/pojntfx/stfs/pkg/recovery" 15 | ) 16 | 17 | func (o *Operations) Restore( 18 | getDst func(path string, mode fs.FileMode) (io.WriteCloser, error), 19 | mkdirAll func(path string, mode fs.FileMode) error, 20 | 21 | from string, 22 | to string, 23 | flatten bool, 24 | ) error { 25 | from, to = filepath.ToSlash(from), filepath.ToSlash(to) 26 | 27 | o.diskOperationLock.Lock() 28 | defer o.diskOperationLock.Unlock() 29 | 30 | headersToRestore := []*config.Header{} 31 | src := strings.TrimSuffix(from, "/") 32 | dbhdr, err := o.metadata.Metadata.GetHeader(context.Background(), src) 33 | if err != nil { 34 | if err == sql.ErrNoRows { 35 | src = src + "/" 36 | 37 | dbhdr, err = o.metadata.Metadata.GetHeader(context.Background(), src) 38 | if err != nil { 39 | return err 40 | } 41 | } else { 42 | return err 43 | } 44 | } 45 | headersToRestore = append(headersToRestore, dbhdr) 46 | 47 | // If the header refers to a directory, get it's children 48 | if dbhdr.Typeflag == tar.TypeDir { 49 | dbhdrs, err := o.metadata.Metadata.GetHeaderChildren(context.Background(), src) 50 | if err != nil { 51 | return err 52 | } 53 | 54 | headersToRestore = append(headersToRestore, dbhdrs...) 55 | } 56 | 57 | reader, err := o.backend.GetReader() 58 | if err != nil { 59 | return err 60 | } 61 | defer o.backend.CloseReader() 62 | 63 | for _, dbhdr := range headersToRestore { 64 | if o.onHeader != nil { 65 | o.onHeader(&config.HeaderEvent{ 66 | Type: config.HeaderEventTypeRestore, 67 | Indexed: true, 68 | Header: dbhdr, 69 | }) 70 | } 71 | 72 | dst := dbhdr.Name 73 | if to != "" { 74 | if flatten { 75 | dst = to 76 | } else { 77 | dst = filepath.Join(to, strings.TrimPrefix(dst, from)) 78 | 79 | if strings.TrimSuffix(dst, "/") == strings.TrimSuffix(to, "/") { 80 | dst = filepath.Join(dst, path.Base(dbhdr.Name)) // Append the name so we don't overwrite 81 | } 82 | } 83 | } 84 | 85 | if err := recovery.Fetch( 86 | reader, 87 | o.backend.MagneticTapeIO, 88 | o.pipes, 89 | o.crypto, 90 | 91 | getDst, 92 | mkdirAll, 93 | 94 | int(dbhdr.Record), 95 | int(dbhdr.Block), 96 | dst, 97 | false, 98 | 99 | nil, 100 | ); err != nil { 101 | return err 102 | } 103 | } 104 | 105 | return nil 106 | } 107 | -------------------------------------------------------------------------------- /pkg/mtio/mtio_linux.go: -------------------------------------------------------------------------------- 1 | //go:build linux 2 | 3 | package mtio 4 | 5 | import ( 6 | "syscall" 7 | "unsafe" 8 | ) 9 | 10 | // See https://github.com/benmcclelland/mtio 11 | const ( 12 | mtioCpos = 0x80086d03 // Get tape position 13 | mtioCtop = 0x40086d01 // Do magnetic tape operation 14 | 15 | mtFsf = 1 // Forward space over FileMark, position at first record of next file 16 | mtOffl = 7 // Rewind and put the drive offline (eject?) 17 | mtEom = 12 // Goto end of recorded media (for appending files) 18 | mtSeek = 22 // Seek to block 19 | ) 20 | 21 | // position is struct for MTIOCPOS 22 | type position struct { 23 | blkNo int64 // Current block number 24 | } 25 | 26 | // operation is struct for MTIOCTOP 27 | type operation struct { 28 | op int16 // Operation ID 29 | pad int16 // Padding to match C structures 30 | count int32 // Operation count 31 | } 32 | 33 | type MagneticTapeIO struct{} 34 | 35 | func (t MagneticTapeIO) GetCurrentRecordFromTape(fd uintptr) (int64, error) { 36 | pos := &position{} 37 | if _, _, err := syscall.Syscall( 38 | syscall.SYS_IOCTL, 39 | fd, 40 | mtioCpos, 41 | uintptr(unsafe.Pointer(pos)), 42 | ); err != 0 { 43 | return 0, err 44 | } 45 | 46 | return pos.blkNo, nil 47 | } 48 | 49 | func (t MagneticTapeIO) GoToEndOfTape(fd uintptr) error { 50 | if _, _, err := syscall.Syscall( 51 | syscall.SYS_IOCTL, 52 | fd, 53 | mtioCtop, 54 | uintptr(unsafe.Pointer( 55 | &operation{ 56 | op: mtEom, 57 | }, 58 | )), 59 | ); err != 0 { 60 | return err 61 | } 62 | 63 | return nil 64 | } 65 | 66 | func (t MagneticTapeIO) GoToNextFileOnTape(fd uintptr) error { 67 | if _, _, err := syscall.Syscall( 68 | syscall.SYS_IOCTL, 69 | fd, 70 | mtioCtop, 71 | uintptr(unsafe.Pointer( 72 | &operation{ 73 | op: mtFsf, 74 | count: 1, 75 | }, 76 | )), 77 | ); err != 0 { 78 | return err 79 | } 80 | 81 | return nil 82 | } 83 | 84 | func (t MagneticTapeIO) EjectTape(fd uintptr) error { 85 | if _, _, err := syscall.Syscall( 86 | syscall.SYS_IOCTL, 87 | fd, 88 | mtioCtop, 89 | uintptr(unsafe.Pointer( 90 | &operation{ 91 | op: mtOffl, 92 | }, 93 | )), 94 | ); err != 0 { 95 | return err 96 | } 97 | 98 | return nil 99 | } 100 | 101 | func (t MagneticTapeIO) SeekToRecordOnTape(fd uintptr, record int32) error { 102 | if _, _, err := syscall.Syscall( 103 | syscall.SYS_IOCTL, 104 | fd, 105 | mtioCtop, 106 | uintptr(unsafe.Pointer( 107 | &operation{ 108 | op: mtSeek, 109 | count: record, 110 | }, 111 | )), 112 | ); err != 0 { 113 | return err 114 | } 115 | 116 | return nil 117 | } 118 | -------------------------------------------------------------------------------- /pkg/fs/fileinfo.go: -------------------------------------------------------------------------------- 1 | package fs 2 | 3 | import ( 4 | "archive/tar" 5 | "io/fs" 6 | "os" 7 | "time" 8 | 9 | "github.com/pojntfx/stfs/pkg/logging" 10 | ) 11 | 12 | type FileInfo struct { 13 | os.FileInfo 14 | 15 | name string 16 | size int64 17 | mode fs.FileMode 18 | modTime time.Time 19 | accessTime time.Time 20 | changeTime time.Time 21 | gid int 22 | uid int 23 | isDir bool 24 | 25 | log logging.StructuredLogger 26 | } 27 | 28 | func NewFileInfo( 29 | name string, 30 | size int64, 31 | mode fs.FileMode, 32 | modTime time.Time, 33 | accessTime time.Time, 34 | changeTime time.Time, 35 | gid int, 36 | uid int, 37 | isDir bool, 38 | 39 | log logging.StructuredLogger, 40 | ) *FileInfo { 41 | return &FileInfo{ 42 | name: name, 43 | size: size, 44 | mode: mode, 45 | modTime: modTime, 46 | accessTime: accessTime, 47 | changeTime: changeTime, 48 | gid: gid, 49 | uid: uid, 50 | isDir: isDir, 51 | 52 | log: log, 53 | } 54 | } 55 | 56 | func NewFileInfoFromTarHeader( 57 | hdr *tar.Header, 58 | 59 | log logging.StructuredLogger, 60 | ) *FileInfo { 61 | return &FileInfo{ 62 | name: hdr.FileInfo().Name(), 63 | size: hdr.FileInfo().Size(), 64 | mode: hdr.FileInfo().Mode(), 65 | modTime: hdr.FileInfo().ModTime(), 66 | accessTime: hdr.AccessTime, 67 | changeTime: hdr.ChangeTime, 68 | gid: hdr.Gid, 69 | uid: hdr.Uid, 70 | isDir: hdr.FileInfo().IsDir(), 71 | 72 | log: log, 73 | } 74 | } 75 | 76 | func (f *FileInfo) Name() string { 77 | f.log.Trace("FileInfo.Name", map[string]interface{}{ 78 | "name": f.name, 79 | }) 80 | 81 | return f.name 82 | } 83 | 84 | func (f *FileInfo) Size() int64 { 85 | f.log.Trace("FileInfo.Size", map[string]interface{}{ 86 | "name": f.name, 87 | }) 88 | 89 | return f.size 90 | } 91 | 92 | func (f *FileInfo) Mode() os.FileMode { 93 | f.log.Trace("FileInfo.Mode", map[string]interface{}{ 94 | "name": f.name, 95 | }) 96 | 97 | return f.mode 98 | } 99 | 100 | func (f *FileInfo) ModTime() time.Time { 101 | f.log.Trace("FileInfo.ModTime", map[string]interface{}{ 102 | "name": f.name, 103 | }) 104 | 105 | return f.modTime 106 | } 107 | 108 | func (f *FileInfo) IsDir() bool { 109 | f.log.Trace("FileInfo.IsDir", map[string]interface{}{ 110 | "name": f.name, 111 | }) 112 | 113 | return f.isDir 114 | } 115 | 116 | func (f *FileInfo) Sys() interface{} { 117 | f.log.Trace("FileInfo.Sys", map[string]interface{}{ 118 | "name": f.name, 119 | }) 120 | 121 | return NewStat( 122 | uint32(f.uid), 123 | uint32(f.gid), 124 | f.modTime.UnixNano(), 125 | f.accessTime.UnixNano(), 126 | f.changeTime.UnixNano(), 127 | ) 128 | } 129 | -------------------------------------------------------------------------------- /cmd/stfs/cmd/root.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path/filepath" 7 | "strings" 8 | 9 | "github.com/pojntfx/stfs/internal/check" 10 | "github.com/pojntfx/stfs/internal/logging" 11 | "github.com/pojntfx/stfs/pkg/config" 12 | "github.com/spf13/cobra" 13 | "github.com/spf13/viper" 14 | "github.com/volatiletech/sqlboiler/v4/boil" 15 | ) 16 | 17 | const ( 18 | driveFlag = "drive" 19 | metadataFlag = "metadata" 20 | verboseFlag = "verbose" 21 | compressionFlag = "compression" 22 | encryptionFlag = "encryption" 23 | signatureFlag = "signature" 24 | ) 25 | 26 | var rootCmd = &cobra.Command{ 27 | Use: "stfs", 28 | Short: "Simple Tape File System", 29 | Long: `Simple Tape File System (STFS), a file system for tapes and tar files. 30 | 31 | Find more information at: 32 | https://github.com/pojntfx/stfs`, 33 | PersistentPreRunE: func(cmd *cobra.Command, args []string) error { 34 | viper.SetEnvPrefix("stfs") 35 | viper.SetEnvKeyReplacer(strings.NewReplacer("-", "_", ".", "_")) 36 | 37 | if err := viper.BindPFlags(cmd.PersistentFlags()); err != nil { 38 | return err 39 | } 40 | 41 | if verbosity := viper.GetInt(verboseFlag); verbosity >= 4 { 42 | boil.DebugMode = true 43 | boil.DebugWriter = logging.NewJSONLoggerWriter(verbosity, "SQL Query", "query") 44 | } 45 | 46 | if err := check.CheckCompressionFormat(viper.GetString(compressionFlag)); err != nil { 47 | return err 48 | } 49 | 50 | if err := check.CheckEncryptionFormat(viper.GetString(encryptionFlag)); err != nil { 51 | return err 52 | } 53 | 54 | return check.CheckSignatureFormat(viper.GetString(signatureFlag)) 55 | }, 56 | } 57 | 58 | func Execute() error { 59 | // Get default working dir 60 | home, err := os.UserHomeDir() 61 | if err != nil { 62 | return err 63 | } 64 | metadataPath := filepath.Join(home, ".local", "share", "stfs", "var", "lib", "stfs", "metadata.sqlite") 65 | 66 | rootCmd.PersistentFlags().StringP(driveFlag, "d", "/dev/nst0", "Tape or tar file to use") 67 | rootCmd.PersistentFlags().StringP(metadataFlag, "m", metadataPath, "Metadata database to use") 68 | rootCmd.PersistentFlags().IntP(verboseFlag, "v", 2, fmt.Sprintf("Verbosity level (default %v, available are %v)", 2, []int{0, 1, 2, 3, 4})) 69 | rootCmd.PersistentFlags().StringP(compressionFlag, "c", config.NoneKey, fmt.Sprintf("Compression format to use (default %v, available are %v)", config.NoneKey, config.KnownCompressionFormats)) 70 | rootCmd.PersistentFlags().StringP(encryptionFlag, "e", config.NoneKey, fmt.Sprintf("Encryption format to use (default %v, available are %v)", config.NoneKey, config.KnownEncryptionFormats)) 71 | rootCmd.PersistentFlags().StringP(signatureFlag, "s", config.NoneKey, fmt.Sprintf("Signature format to use (default %v, available are %v)", config.NoneKey, config.KnownSignatureFormats)) 72 | 73 | if err := viper.BindPFlags(rootCmd.PersistentFlags()); err != nil { 74 | return err 75 | } 76 | 77 | viper.AutomaticEnv() 78 | 79 | return rootCmd.Execute() 80 | } 81 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/pojntfx/stfs 2 | 3 | go 1.21 4 | 5 | toolchain go1.22.5 6 | 7 | require ( 8 | aead.dev/minisign v0.3.0 9 | filippo.io/age v1.2.0 10 | github.com/ProtonMail/go-crypto v1.0.0 11 | github.com/ProtonMail/gopenpgp/v2 v2.7.5 12 | github.com/andybalholm/brotli v1.1.0 13 | github.com/cosnicolaou/pbzip2 v1.0.3 14 | github.com/dsnet/compress v0.0.1 15 | github.com/fclairamb/ftpserverlib v0.24.1 16 | github.com/fclairamb/go-log v0.5.0 17 | github.com/friendsofgo/errors v0.9.2 18 | github.com/klauspost/compress v1.17.9 19 | github.com/klauspost/pgzip v1.2.6 20 | github.com/mattetti/filebuffer v1.0.1 21 | github.com/mattn/go-sqlite3 v1.14.22 22 | github.com/pierrec/lz4/v4 v4.1.21 23 | github.com/rubenv/sql-migrate v1.7.0 24 | github.com/spf13/afero v1.11.0 25 | github.com/spf13/cobra v1.8.1 26 | github.com/spf13/viper v1.19.0 27 | github.com/volatiletech/null/v8 v8.1.2 28 | github.com/volatiletech/sqlboiler/v4 v4.16.2 29 | github.com/volatiletech/strmangle v0.0.6 30 | modernc.org/sqlite v1.31.1 31 | ) 32 | 33 | require ( 34 | github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f // indirect 35 | github.com/cloudflare/circl v1.3.9 // indirect 36 | github.com/dustin/go-humanize v1.0.1 // indirect 37 | github.com/fsnotify/fsnotify v1.7.0 // indirect 38 | github.com/go-gorp/gorp/v3 v3.1.0 // indirect 39 | github.com/gofrs/uuid v4.4.0+incompatible // indirect 40 | github.com/google/uuid v1.6.0 // indirect 41 | github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect 42 | github.com/hashicorp/hcl v1.0.0 // indirect 43 | github.com/inconshreveable/mousetrap v1.1.0 // indirect 44 | github.com/magiconair/properties v1.8.7 // indirect 45 | github.com/mattn/go-isatty v0.0.20 // indirect 46 | github.com/mitchellh/mapstructure v1.5.0 // indirect 47 | github.com/ncruces/go-strftime v0.1.9 // indirect 48 | github.com/pelletier/go-toml/v2 v2.2.2 // indirect 49 | github.com/pkg/errors v0.9.1 // indirect 50 | github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect 51 | github.com/sagikazarmark/locafero v0.6.0 // indirect 52 | github.com/sagikazarmark/slog-shim v0.1.0 // indirect 53 | github.com/sourcegraph/conc v0.3.0 // indirect 54 | github.com/spf13/cast v1.6.0 // indirect 55 | github.com/spf13/pflag v1.0.5 // indirect 56 | github.com/subosito/gotenv v1.6.0 // indirect 57 | github.com/volatiletech/inflect v0.0.1 // indirect 58 | github.com/volatiletech/randomize v0.0.1 // indirect 59 | go.uber.org/multierr v1.11.0 // indirect 60 | golang.org/x/crypto v0.25.0 // indirect 61 | golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect 62 | golang.org/x/sys v0.22.0 // indirect 63 | golang.org/x/text v0.16.0 // indirect 64 | golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect 65 | gopkg.in/ini.v1 v1.67.0 // indirect 66 | gopkg.in/yaml.v3 v3.0.1 // indirect 67 | modernc.org/gc/v3 v3.0.0-20240722195230-4a140ff9c08e // indirect 68 | modernc.org/libc v1.55.6 // indirect 69 | modernc.org/mathutil v1.6.0 // indirect 70 | modernc.org/memory v1.8.0 // indirect 71 | modernc.org/strutil v1.2.0 // indirect 72 | modernc.org/token v1.1.0 // indirect 73 | ) 74 | -------------------------------------------------------------------------------- /pkg/config/config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "context" 5 | "io" 6 | "io/fs" 7 | "time" 8 | ) 9 | 10 | type ReadSeekFder interface { 11 | io.ReadSeeker 12 | Fd() uintptr 13 | } 14 | 15 | type DriveReaderConfig struct { 16 | Drive ReadSeekFder 17 | DriveIsRegular bool 18 | } 19 | 20 | type DriveWriterConfig struct { 21 | Drive io.Writer 22 | DriveIsRegular bool 23 | } 24 | 25 | type BackendConfig struct { 26 | GetWriter func() (DriveWriterConfig, error) 27 | CloseWriter func() error 28 | 29 | GetReader func() (DriveReaderConfig, error) 30 | CloseReader func() error 31 | 32 | MagneticTapeIO MagneticTapeIO 33 | } 34 | 35 | type Header struct { 36 | Record int64 37 | Lastknownrecord int64 38 | Block int64 39 | Lastknownblock int64 40 | Deleted int64 41 | Typeflag int64 42 | Name string 43 | Linkname string 44 | Size int64 45 | Mode int64 46 | UID int64 47 | Gid int64 48 | Uname string 49 | Gname string 50 | Modtime time.Time 51 | Accesstime time.Time 52 | Changetime time.Time 53 | Devmajor int64 54 | Devminor int64 55 | Paxrecords string 56 | Format int64 57 | } 58 | 59 | type MetadataPersister interface { 60 | UpsertHeader(ctx context.Context, dbhdr *Header, initializing bool) error 61 | UpdateHeaderMetadata(ctx context.Context, dbhdr *Header) error 62 | MoveHeader(ctx context.Context, oldName string, newName string, lastknownrecord, lastknownblock int64) error 63 | GetHeaders(ctx context.Context) ([]*Header, error) 64 | GetHeader(ctx context.Context, name string) (*Header, error) 65 | GetHeaderByLinkname(ctx context.Context, linkname string) (*Header, error) 66 | GetHeaderChildren(ctx context.Context, name string) ([]*Header, error) 67 | GetRootPath(ctx context.Context) (string, error) 68 | GetHeaderDirectChildren(ctx context.Context, name string, limit int) ([]*Header, error) 69 | DeleteHeader(ctx context.Context, name string, lastknownrecord, lastknownblock int64) (*Header, error) 70 | GetLastIndexedRecordAndBlock(ctx context.Context, recordSize int) (int64, int64, error) 71 | PurgeAllHeaders(ctx context.Context) error 72 | } 73 | 74 | type MetadataConfig struct { 75 | Metadata MetadataPersister 76 | } 77 | 78 | type PipeConfig struct { 79 | Compression string 80 | Encryption string 81 | Signature string 82 | RecordSize int 83 | } 84 | 85 | type CryptoConfig struct { 86 | Recipient interface{} 87 | Identity interface{} 88 | Password string 89 | } 90 | 91 | type PasswordConfig struct { 92 | Password string 93 | } 94 | 95 | type FileConfig struct { 96 | GetFile func() (io.ReadSeekCloser, error) 97 | Info fs.FileInfo 98 | Path string 99 | Link string 100 | } 101 | 102 | type MagneticTapeIO interface { 103 | GetCurrentRecordFromTape(fd uintptr) (int64, error) 104 | GoToEndOfTape(fd uintptr) error 105 | GoToNextFileOnTape(fd uintptr) error 106 | EjectTape(fd uintptr) error 107 | SeekToRecordOnTape(fd uintptr, record int32) error 108 | } 109 | -------------------------------------------------------------------------------- /db/sqlite/migrations/metadata/1637447083.sql: -------------------------------------------------------------------------------- 1 | -- +migrate Up 2 | create table headers ( 3 | -- Record of this header on the tape 4 | record integer not null, 5 | -- Record of the last update header of this header on the tape 6 | lastknownrecord integer not null, 7 | -- Block of this header in the record 8 | block integer not null, 9 | -- Block of the last update header of this header in the record 10 | lastknownblock integer not null, 11 | -- If set, the header has been deleted on tape, but `lastknownrecord` and `lastknownblock` are still of relevance 12 | deleted integer not null, 13 | -- Typeflag is the type of header entry. 14 | -- The zero value is automatically promoted to either TypeReg or TypeDir 15 | -- depending on the presence of a trailing slash in Name. 16 | typeflag integer not null, 17 | -- Name of file entry 18 | name text not null, 19 | -- Target name of link (valid for TypeLink or TypeSymlink) 20 | linkname text not null, 21 | -- Logical file size in bytes 22 | size integer not null, 23 | -- Permission and mode bits 24 | mode integer not null, 25 | -- User ID of owner 26 | uid integer not null, 27 | -- Group ID of owner 28 | gid integer not null, 29 | -- User name of owner 30 | uname text not null, 31 | -- Group name of owner 32 | gname text not null, 33 | -- If the Format is unspecified, then Writer.WriteHeader rounds ModTime 34 | -- to the nearest second and ignores the AccessTime and ChangeTime fields. 35 | -- 36 | -- To use AccessTime or ChangeTime, specify the Format as PAX or GNU. 37 | -- To use sub-second resolution, specify the Format as PAX. 38 | -- Modification time 39 | modtime date not null, 40 | -- Access time (requires either PAX or GNU support) 41 | accesstime date not null, 42 | -- Change time (requires either PAX or GNU support) 43 | changetime date not null, 44 | -- Major device number (valid for TypeChar or TypeBlock) 45 | devmajor integer not null, 46 | -- Minor device number (valid for TypeChar or TypeBlock) 47 | devminor integer not null, 48 | -- PAXRecords is a map of PAX extended header records. 49 | -- 50 | -- User-defined records should have keys of the following form: 51 | -- VENDOR.keyword 52 | -- Where VENDOR is some namespace in all uppercase, and keyword may 53 | -- not contain the '=' character (e.g., "GOLANG.pkg.version"). 54 | -- The key and value should be non-empty UTF-8 strings. 55 | -- 56 | -- When Writer.WriteHeader is called, PAX records derived from the 57 | -- other fields in Header take precedence over PAXRecords. 58 | paxrecords text not null, 59 | -- Format specifies the format of the tar header. 60 | -- 61 | -- This is set by Reader.Next as a best-effort guess at the format. 62 | -- Since the Reader liberally reads some non-compliant files, 63 | -- it is possible for this to be FormatUnknown. 64 | -- 65 | -- If the format is unspecified when Writer.WriteHeader is called, 66 | -- then it uses the first format (in the order of USTAR, PAX, GNU) 67 | -- capable of encoding this Header (see Format). 68 | format integer not null, 69 | 70 | primary key (name, linkname) 71 | ); 72 | -- +migrate Down 73 | drop table headers; -------------------------------------------------------------------------------- /pkg/encryption/encrypt.go: -------------------------------------------------------------------------------- 1 | package encryption 2 | 3 | import ( 4 | "archive/tar" 5 | "bytes" 6 | "encoding/base64" 7 | "encoding/json" 8 | "io" 9 | 10 | "filippo.io/age" 11 | "github.com/ProtonMail/go-crypto/openpgp" 12 | "github.com/pojntfx/stfs/internal/ioext" 13 | "github.com/pojntfx/stfs/internal/records" 14 | "github.com/pojntfx/stfs/pkg/config" 15 | ) 16 | 17 | func Encrypt( 18 | dst io.Writer, 19 | encryptionFormat string, 20 | recipient interface{}, 21 | ) (io.WriteCloser, error) { 22 | switch encryptionFormat { 23 | case config.EncryptionFormatAgeKey: 24 | recipient, ok := recipient.(*age.X25519Recipient) 25 | if !ok { 26 | return nil, config.ErrRecipientUnparsable 27 | } 28 | 29 | return age.Encrypt(dst, recipient) 30 | case config.EncryptionFormatPGPKey: 31 | recipient, ok := recipient.(openpgp.EntityList) 32 | if !ok { 33 | return nil, config.ErrRecipientUnparsable 34 | } 35 | 36 | return openpgp.Encrypt(dst, recipient, nil, nil, nil) 37 | case config.NoneKey: 38 | return ioext.AddCloseNopToWriter(dst), nil 39 | default: 40 | return nil, config.ErrEncryptionFormatUnsupported 41 | } 42 | } 43 | 44 | func EncryptHeader( 45 | hdr *tar.Header, 46 | encryptionFormat string, 47 | recipient interface{}, 48 | ) error { 49 | if encryptionFormat == config.NoneKey { 50 | return nil 51 | } 52 | 53 | newHdr := &tar.Header{ 54 | Format: tar.FormatPAX, 55 | Size: hdr.Size, 56 | PAXRecords: map[string]string{}, 57 | } 58 | 59 | wrappedHeader, err := json.Marshal(hdr) 60 | if err != nil { 61 | return err 62 | } 63 | 64 | newHdr.PAXRecords[records.STFSRecordEmbeddedHeader], err = EncryptString(string(wrappedHeader), encryptionFormat, recipient) 65 | if err != nil { 66 | return err 67 | } 68 | 69 | *hdr = *newHdr 70 | 71 | return nil 72 | } 73 | 74 | func EncryptString( 75 | src string, 76 | encryptionFormat string, 77 | recipient interface{}, 78 | ) (string, error) { 79 | switch encryptionFormat { 80 | case config.EncryptionFormatAgeKey: 81 | recipient, ok := recipient.(*age.X25519Recipient) 82 | if !ok { 83 | return "", config.ErrRecipientUnparsable 84 | } 85 | 86 | out := &bytes.Buffer{} 87 | w, err := age.Encrypt(out, recipient) 88 | if err != nil { 89 | return "", err 90 | } 91 | 92 | if _, err := io.WriteString(w, src); err != nil { 93 | return "", err 94 | } 95 | 96 | if err := w.Close(); err != nil { 97 | return "", err 98 | } 99 | 100 | return base64.StdEncoding.EncodeToString(out.Bytes()), nil 101 | case config.EncryptionFormatPGPKey: 102 | recipient, ok := recipient.(openpgp.EntityList) 103 | if !ok { 104 | return "", config.ErrRecipientUnparsable 105 | } 106 | 107 | out := &bytes.Buffer{} 108 | w, err := openpgp.Encrypt(out, recipient, nil, nil, nil) 109 | if err != nil { 110 | return "", err 111 | } 112 | 113 | if _, err := io.WriteString(w, src); err != nil { 114 | return "", err 115 | } 116 | 117 | if err := w.Close(); err != nil { 118 | return "", err 119 | } 120 | 121 | return base64.StdEncoding.EncodeToString(out.Bytes()), nil 122 | case config.NoneKey: 123 | return src, nil 124 | default: 125 | return "", config.ErrEncryptionFormatUnsupported 126 | } 127 | } 128 | -------------------------------------------------------------------------------- /cmd/stfs/cmd/recovery_query.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "github.com/pojntfx/stfs/internal/check" 5 | "github.com/pojntfx/stfs/internal/keyext" 6 | "github.com/pojntfx/stfs/internal/logging" 7 | "github.com/pojntfx/stfs/pkg/config" 8 | "github.com/pojntfx/stfs/pkg/keys" 9 | "github.com/pojntfx/stfs/pkg/mtio" 10 | "github.com/pojntfx/stfs/pkg/recovery" 11 | "github.com/pojntfx/stfs/pkg/tape" 12 | "github.com/spf13/cobra" 13 | "github.com/spf13/viper" 14 | ) 15 | 16 | var recoveryQueryCmd = &cobra.Command{ 17 | Use: "query", 18 | Short: "Query contents of tape or tar file without the index", 19 | PreRunE: func(cmd *cobra.Command, args []string) error { 20 | if err := viper.BindPFlags(cmd.PersistentFlags()); err != nil { 21 | return err 22 | } 23 | 24 | if err := check.CheckKeyAccessible(viper.GetString(encryptionFlag), viper.GetString(identityFlag)); err != nil { 25 | return err 26 | } 27 | 28 | return check.CheckKeyAccessible(viper.GetString(signatureFlag), viper.GetString(recipientFlag)) 29 | }, 30 | RunE: func(cmd *cobra.Command, args []string) error { 31 | pubkey, err := keyext.ReadKey(viper.GetString(signatureFlag), viper.GetString(recipientFlag)) 32 | if err != nil { 33 | return err 34 | } 35 | 36 | recipient, err := keys.ParseSignerRecipient(viper.GetString(signatureFlag), pubkey) 37 | if err != nil { 38 | return err 39 | } 40 | 41 | privkey, err := keyext.ReadKey(viper.GetString(encryptionFlag), viper.GetString(identityFlag)) 42 | if err != nil { 43 | return err 44 | } 45 | 46 | identity, err := keys.ParseIdentity(viper.GetString(encryptionFlag), privkey, viper.GetString(passwordFlag)) 47 | if err != nil { 48 | return err 49 | } 50 | 51 | reader, readerIsRegular, err := tape.OpenTapeReadOnly( 52 | viper.GetString(driveFlag), 53 | ) 54 | if err != nil { 55 | return nil 56 | } 57 | defer reader.Close() 58 | 59 | if _, err := recovery.Query( 60 | config.DriveReaderConfig{ 61 | Drive: reader, 62 | DriveIsRegular: readerIsRegular, 63 | }, 64 | mtio.MagneticTapeIO{}, 65 | config.PipeConfig{ 66 | Compression: viper.GetString(compressionFlag), 67 | Encryption: viper.GetString(encryptionFlag), 68 | Signature: viper.GetString(signatureFlag), 69 | RecordSize: viper.GetInt(recordSizeFlag), 70 | }, 71 | config.CryptoConfig{ 72 | Recipient: recipient, 73 | Identity: identity, 74 | Password: viper.GetString(passwordFlag), 75 | }, 76 | 77 | viper.GetInt(recordFlag), 78 | viper.GetInt(blockFlag), 79 | 80 | logging.NewCSVLogger().PrintHeader, 81 | ); err != nil { 82 | return err 83 | } 84 | 85 | return nil 86 | }, 87 | } 88 | 89 | func init() { 90 | recoveryQueryCmd.PersistentFlags().IntP(recordSizeFlag, "z", 20, "Amount of 512-bit blocks per record") 91 | recoveryQueryCmd.PersistentFlags().IntP(recordFlag, "k", 0, "Record to seek too before counting") 92 | recoveryQueryCmd.PersistentFlags().IntP(blockFlag, "b", 0, "Block in record to seek too before counting") 93 | recoveryQueryCmd.PersistentFlags().StringP(identityFlag, "i", "", "Path to private key of recipient that has been encrypted for") 94 | recoveryQueryCmd.PersistentFlags().StringP(passwordFlag, "p", "", "Password for the private key") 95 | recoveryQueryCmd.PersistentFlags().StringP(recipientFlag, "r", "", "Path to the public key to verify with") 96 | 97 | viper.AutomaticEnv() 98 | 99 | recoveryCmd.AddCommand(recoveryQueryCmd) 100 | } 101 | -------------------------------------------------------------------------------- /.github/workflows/hydrun.yaml: -------------------------------------------------------------------------------- 1 | name: hydrun CI 2 | 3 | on: 4 | push: 5 | pull_request: 6 | schedule: 7 | - cron: "0 0 * * 0" 8 | 9 | jobs: 10 | build-linux: 11 | runs-on: ${{ matrix.target.runner }} 12 | permissions: 13 | contents: read 14 | strategy: 15 | matrix: 16 | target: 17 | # Tests 18 | - id: test 19 | src: . 20 | os: golang:bookworm 21 | flags: -e '-v /tmp/ccache:/root/.cache/go-build' 22 | cmd: GOFLAGS="-short" ./Hydrunfile test 23 | dst: out/nonexistent 24 | runner: ubuntu-latest 25 | 26 | # Binaries 27 | - id: go.stfs 28 | src: . 29 | os: golang:bookworm 30 | flags: -e '-v /tmp/ccache:/root/.cache/go-build' 31 | cmd: ./Hydrunfile go stfs 32 | dst: out/* 33 | runner: ubuntu-latest 34 | 35 | steps: 36 | - name: Maximize build space 37 | run: | 38 | sudo rm -rf /usr/share/dotnet 39 | sudo rm -rf /usr/local/lib/android 40 | sudo rm -rf /opt/ghc 41 | - name: Checkout 42 | uses: actions/checkout@v4 43 | - name: Restore ccache 44 | uses: actions/cache/restore@v4 45 | with: 46 | path: | 47 | /tmp/ccache 48 | key: cache-ccache-${{ matrix.target.id }} 49 | - name: Set up QEMU 50 | uses: docker/setup-qemu-action@v3 51 | - name: Set up Docker Buildx 52 | uses: docker/setup-buildx-action@v3 53 | - name: Set up hydrun 54 | run: | 55 | curl -L -o /tmp/hydrun "https://github.com/pojntfx/hydrun/releases/latest/download/hydrun.linux-$(uname -m)" 56 | sudo install /tmp/hydrun /usr/local/bin 57 | - name: Build with hydrun 58 | working-directory: ${{ matrix.target.src }} 59 | run: hydrun -o ${{ matrix.target.os }} ${{ matrix.target.flags }} "${{ matrix.target.cmd }}" 60 | - name: Fix permissions for output 61 | run: sudo chown -R $USER . 62 | - name: Save ccache 63 | uses: actions/cache/save@v4 64 | with: 65 | path: | 66 | /tmp/ccache 67 | key: cache-ccache-${{ matrix.target.id }} 68 | - name: Upload output 69 | uses: actions/upload-artifact@v4 70 | with: 71 | name: ${{ matrix.target.id }} 72 | path: ${{ matrix.target.dst }} 73 | 74 | publish-linux: 75 | runs-on: ubuntu-latest 76 | permissions: 77 | contents: write 78 | needs: build-linux 79 | 80 | steps: 81 | - name: Checkout 82 | uses: actions/checkout@v4 83 | - name: Download output 84 | uses: actions/download-artifact@v4 85 | with: 86 | path: /tmp/out 87 | - name: Extract branch name 88 | id: extract_branch 89 | run: echo "##[set-output name=branch;]$(echo ${GITHUB_REF#refs/heads/})" 90 | - name: Publish pre-release to GitHub releases 91 | if: ${{ github.ref == 'refs/heads/main' }} 92 | uses: softprops/action-gh-release@v2 93 | with: 94 | tag_name: release-${{ steps.extract_branch.outputs.branch }} 95 | prerelease: true 96 | files: | 97 | /tmp/out/*/* 98 | - name: Publish release to GitHub releases 99 | if: startsWith(github.ref, 'refs/tags/v') 100 | uses: softprops/action-gh-release@v2 101 | with: 102 | prerelease: false 103 | files: | 104 | /tmp/out/*/* 105 | -------------------------------------------------------------------------------- /pkg/encryption/decrypt.go: -------------------------------------------------------------------------------- 1 | package encryption 2 | 3 | import ( 4 | "archive/tar" 5 | "bytes" 6 | "encoding/base64" 7 | "encoding/json" 8 | "io" 9 | 10 | "filippo.io/age" 11 | "github.com/ProtonMail/go-crypto/openpgp" 12 | "github.com/pojntfx/stfs/internal/records" 13 | "github.com/pojntfx/stfs/pkg/config" 14 | ) 15 | 16 | func Decrypt( 17 | src io.Reader, 18 | encryptionFormat string, 19 | identity interface{}, 20 | ) (io.ReadCloser, error) { 21 | switch encryptionFormat { 22 | case config.EncryptionFormatAgeKey: 23 | identity, ok := identity.(*age.X25519Identity) 24 | if !ok { 25 | return nil, config.ErrIdentityUnparsable 26 | } 27 | 28 | r, err := age.Decrypt(src, identity) 29 | if err != nil { 30 | return nil, err 31 | } 32 | 33 | return io.NopCloser(r), nil 34 | case config.EncryptionFormatPGPKey: 35 | identity, ok := identity.(openpgp.EntityList) 36 | if !ok { 37 | return nil, config.ErrIdentityUnparsable 38 | } 39 | 40 | r, err := openpgp.ReadMessage(src, identity, nil, nil) 41 | if err != nil { 42 | return nil, err 43 | } 44 | 45 | return io.NopCloser(r.UnverifiedBody), nil 46 | case config.NoneKey: 47 | return io.NopCloser(src), nil 48 | default: 49 | return nil, config.ErrEncryptionFormatUnsupported 50 | } 51 | } 52 | 53 | func DecryptHeader( 54 | hdr *tar.Header, 55 | encryptionFormat string, 56 | identity interface{}, 57 | ) error { 58 | if encryptionFormat == config.NoneKey { 59 | return nil 60 | } 61 | 62 | if hdr.PAXRecords == nil { 63 | return config.ErrTarHeaderEmbeddedMissing 64 | } 65 | 66 | encryptedEmbeddedHeader, ok := hdr.PAXRecords[records.STFSRecordEmbeddedHeader] 67 | if !ok { 68 | return config.ErrTarHeaderEmbeddedMissing 69 | } 70 | 71 | embeddedHeader, err := DecryptString(encryptedEmbeddedHeader, encryptionFormat, identity) 72 | if err != nil { 73 | return err 74 | } 75 | 76 | var newHdr tar.Header 77 | if err := json.Unmarshal([]byte(embeddedHeader), &newHdr); err != nil { 78 | return err 79 | } 80 | 81 | *hdr = newHdr 82 | 83 | return nil 84 | } 85 | 86 | func DecryptString( 87 | src string, 88 | encryptionFormat string, 89 | identity interface{}, 90 | ) (string, error) { 91 | switch encryptionFormat { 92 | case config.EncryptionFormatAgeKey: 93 | identity, ok := identity.(*age.X25519Identity) 94 | if !ok { 95 | return "", config.ErrIdentityUnparsable 96 | } 97 | 98 | decoded, err := base64.StdEncoding.DecodeString(src) 99 | if err != nil { 100 | return "", err 101 | } 102 | 103 | r, err := age.Decrypt(bytes.NewBufferString(string(decoded)), identity) 104 | if err != nil { 105 | return "", err 106 | } 107 | 108 | out := &bytes.Buffer{} 109 | if _, err := io.Copy(out, r); err != nil { 110 | return "", err 111 | } 112 | 113 | return out.String(), nil 114 | case config.EncryptionFormatPGPKey: 115 | identity, ok := identity.(openpgp.EntityList) 116 | if !ok { 117 | return "", config.ErrIdentityUnparsable 118 | } 119 | 120 | decoded, err := base64.StdEncoding.DecodeString(src) 121 | if err != nil { 122 | return "", err 123 | } 124 | 125 | r, err := openpgp.ReadMessage(bytes.NewBufferString(string(decoded)), identity, nil, nil) 126 | if err != nil { 127 | return "", err 128 | } 129 | 130 | out := &bytes.Buffer{} 131 | if _, err := io.Copy(out, r.UnverifiedBody); err != nil { 132 | return "", err 133 | } 134 | 135 | return out.String(), nil 136 | case config.NoneKey: 137 | return src, nil 138 | default: 139 | return "", config.ErrEncryptionFormatUnsupported 140 | } 141 | } 142 | -------------------------------------------------------------------------------- /pkg/utility/keygen.go: -------------------------------------------------------------------------------- 1 | package utility 2 | 3 | import ( 4 | "bytes" 5 | "crypto/rand" 6 | "io" 7 | 8 | "aead.dev/minisign" 9 | "filippo.io/age" 10 | "github.com/ProtonMail/gopenpgp/v2/armor" 11 | "github.com/ProtonMail/gopenpgp/v2/crypto" 12 | "github.com/ProtonMail/gopenpgp/v2/helper" 13 | "github.com/pojntfx/stfs/pkg/config" 14 | ) 15 | 16 | func Keygen( 17 | pipes config.PipeConfig, 18 | password config.PasswordConfig, 19 | ) (privkey []byte, pubkey []byte, err error) { 20 | if pipes.Encryption != config.NoneKey { 21 | priv, pub, err := generateEncryptionKey(pipes.Encryption, password.Password) 22 | if err != nil { 23 | return []byte{}, []byte{}, err 24 | } 25 | 26 | privkey = priv 27 | pubkey = pub 28 | } else if pipes.Signature != config.NoneKey { 29 | priv, pub, err := generateSignatureKey(pipes.Signature, password.Password) 30 | if err != nil { 31 | return []byte{}, []byte{}, err 32 | } 33 | 34 | privkey = priv 35 | pubkey = pub 36 | } else { 37 | return []byte{}, []byte{}, config.ErrKeygenFormatUnsupported 38 | } 39 | 40 | return privkey, pubkey, nil 41 | } 42 | 43 | func generateEncryptionKey( 44 | encryptionFormat string, 45 | password string, 46 | ) (privkey []byte, pubkey []byte, err error) { 47 | switch encryptionFormat { 48 | case config.EncryptionFormatAgeKey: 49 | identity, err := age.GenerateX25519Identity() 50 | if err != nil { 51 | return []byte{}, []byte{}, err 52 | } 53 | 54 | pub := identity.Recipient().String() 55 | priv := identity.String() 56 | 57 | if password != "" { 58 | passwordRecipient, err := age.NewScryptRecipient(password) 59 | if err != nil { 60 | return []byte{}, []byte{}, err 61 | } 62 | 63 | out := &bytes.Buffer{} 64 | w, err := age.Encrypt(out, passwordRecipient) 65 | if err != nil { 66 | return []byte{}, []byte{}, err 67 | } 68 | 69 | if _, err := io.WriteString(w, priv); err != nil { 70 | return []byte{}, []byte{}, err 71 | } 72 | 73 | if err := w.Close(); err != nil { 74 | return []byte{}, []byte{}, err 75 | } 76 | 77 | priv = out.String() 78 | } 79 | 80 | return []byte(priv), []byte(pub), nil 81 | case config.EncryptionFormatPGPKey: 82 | armoredIdentity, err := helper.GenerateKey("STFS", "stfs@example.com", []byte(password), "x25519", 0) 83 | if err != nil { 84 | return []byte{}, []byte{}, err 85 | } 86 | 87 | rawIdentity, err := armor.Unarmor(armoredIdentity) 88 | if err != nil { 89 | return []byte{}, []byte{}, err 90 | } 91 | 92 | identity, err := crypto.NewKey([]byte(rawIdentity)) 93 | if err != nil { 94 | return []byte{}, []byte{}, err 95 | } 96 | 97 | pub, err := identity.GetPublicKey() 98 | if err != nil { 99 | return []byte{}, []byte{}, err 100 | } 101 | 102 | priv, err := identity.Serialize() 103 | if err != nil { 104 | return []byte{}, []byte{}, err 105 | } 106 | 107 | return priv, pub, nil 108 | default: 109 | return []byte{}, []byte{}, config.ErrKeygenFormatUnsupported 110 | } 111 | } 112 | 113 | func generateSignatureKey( 114 | signatureFormat string, 115 | password string, 116 | ) (privkey []byte, pubkey []byte, err error) { 117 | switch signatureFormat { 118 | case config.SignatureFormatMinisignKey: 119 | pub, rawPriv, err := minisign.GenerateKey(rand.Reader) 120 | if err != nil { 121 | return []byte{}, []byte{}, err 122 | } 123 | 124 | priv, err := minisign.EncryptKey(password, rawPriv) 125 | if err != nil { 126 | return []byte{}, []byte{}, err 127 | } 128 | 129 | return priv, []byte(pub.String()), err 130 | case config.SignatureFormatPGPKey: 131 | return generateEncryptionKey(signatureFormat, password) 132 | default: 133 | return []byte{}, []byte{}, config.ErrKeygenFormatUnsupported 134 | } 135 | } 136 | -------------------------------------------------------------------------------- /cmd/stfs/cmd/operation_delete.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "github.com/pojntfx/stfs/internal/check" 5 | "github.com/pojntfx/stfs/internal/keyext" 6 | "github.com/pojntfx/stfs/internal/logging" 7 | "github.com/pojntfx/stfs/pkg/config" 8 | "github.com/pojntfx/stfs/pkg/keys" 9 | "github.com/pojntfx/stfs/pkg/mtio" 10 | "github.com/pojntfx/stfs/pkg/operations" 11 | "github.com/pojntfx/stfs/pkg/persisters" 12 | "github.com/pojntfx/stfs/pkg/tape" 13 | "github.com/spf13/cobra" 14 | "github.com/spf13/viper" 15 | ) 16 | 17 | const ( 18 | nameFlag = "name" 19 | ) 20 | 21 | var operationDeleteCmd = &cobra.Command{ 22 | Use: "delete", 23 | Aliases: []string{"del", "d", "rm", "remove"}, 24 | Short: "Delete a file or directory from tape or tar file", 25 | PreRunE: func(cmd *cobra.Command, args []string) error { 26 | if err := viper.BindPFlags(cmd.PersistentFlags()); err != nil { 27 | return err 28 | } 29 | 30 | if err := check.CheckKeyAccessible(viper.GetString(encryptionFlag), viper.GetString(recipientFlag)); err != nil { 31 | return err 32 | } 33 | 34 | return check.CheckKeyAccessible(viper.GetString(signatureFlag), viper.GetString(identityFlag)) 35 | }, 36 | RunE: func(cmd *cobra.Command, args []string) error { 37 | pubkey, err := keyext.ReadKey(viper.GetString(encryptionFlag), viper.GetString(recipientFlag)) 38 | if err != nil { 39 | return err 40 | } 41 | 42 | recipient, err := keys.ParseRecipient(viper.GetString(encryptionFlag), pubkey) 43 | if err != nil { 44 | return err 45 | } 46 | 47 | privkey, err := keyext.ReadKey(viper.GetString(signatureFlag), viper.GetString(identityFlag)) 48 | if err != nil { 49 | return err 50 | } 51 | 52 | identity, err := keys.ParseSignerIdentity(viper.GetString(signatureFlag), privkey, viper.GetString(passwordFlag)) 53 | if err != nil { 54 | return err 55 | } 56 | 57 | mt := mtio.MagneticTapeIO{} 58 | tm := tape.NewTapeManager( 59 | viper.GetString(driveFlag), 60 | mt, 61 | viper.GetInt(recordSizeFlag), 62 | false, 63 | ) 64 | 65 | metadataPersister := persisters.NewMetadataPersister(viper.GetString(metadataFlag)) 66 | if err := metadataPersister.Open(); err != nil { 67 | return err 68 | } 69 | 70 | ops := operations.NewOperations( 71 | config.BackendConfig{ 72 | GetWriter: tm.GetWriter, 73 | CloseWriter: tm.Close, 74 | 75 | GetReader: tm.GetReader, 76 | CloseReader: tm.Close, 77 | 78 | MagneticTapeIO: mt, 79 | }, 80 | config.MetadataConfig{ 81 | Metadata: metadataPersister, 82 | }, 83 | 84 | config.PipeConfig{ 85 | Compression: viper.GetString(compressionFlag), 86 | Encryption: viper.GetString(encryptionFlag), 87 | Signature: viper.GetString(signatureFlag), 88 | RecordSize: viper.GetInt(recordSizeFlag), 89 | }, 90 | config.CryptoConfig{ 91 | Recipient: recipient, 92 | Identity: identity, 93 | Password: viper.GetString(passwordFlag), 94 | }, 95 | 96 | logging.NewCSVLogger().PrintHeaderEvent, 97 | ) 98 | 99 | return ops.Delete(viper.GetString(nameFlag)) 100 | }, 101 | } 102 | 103 | func init() { 104 | operationDeleteCmd.PersistentFlags().IntP(recordSizeFlag, "z", 20, "Amount of 512-bit blocks per record") 105 | operationDeleteCmd.PersistentFlags().StringP(nameFlag, "n", "", "Name of the file to remove") 106 | operationDeleteCmd.PersistentFlags().StringP(recipientFlag, "r", "", "Path to public key of recipient to encrypt for") 107 | operationDeleteCmd.PersistentFlags().StringP(identityFlag, "i", "", "Path to private key to sign with") 108 | operationDeleteCmd.PersistentFlags().StringP(passwordFlag, "p", "", "Password for the private key") 109 | 110 | viper.AutomaticEnv() 111 | 112 | operationCmd.AddCommand(operationDeleteCmd) 113 | } 114 | -------------------------------------------------------------------------------- /cmd/stfs/cmd/operation_move.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "github.com/pojntfx/stfs/internal/check" 5 | "github.com/pojntfx/stfs/internal/keyext" 6 | "github.com/pojntfx/stfs/internal/logging" 7 | "github.com/pojntfx/stfs/pkg/config" 8 | "github.com/pojntfx/stfs/pkg/keys" 9 | "github.com/pojntfx/stfs/pkg/mtio" 10 | "github.com/pojntfx/stfs/pkg/operations" 11 | "github.com/pojntfx/stfs/pkg/persisters" 12 | "github.com/pojntfx/stfs/pkg/tape" 13 | "github.com/spf13/cobra" 14 | "github.com/spf13/viper" 15 | ) 16 | 17 | var operationMoveCmd = &cobra.Command{ 18 | Use: "move", 19 | Aliases: []string{"mov", "m", "mv"}, 20 | Short: "Move a file or directory on tape or tar file", 21 | PreRunE: func(cmd *cobra.Command, args []string) error { 22 | if err := viper.BindPFlags(cmd.PersistentFlags()); err != nil { 23 | return err 24 | } 25 | 26 | if err := check.CheckKeyAccessible(viper.GetString(encryptionFlag), viper.GetString(recipientFlag)); err != nil { 27 | return err 28 | } 29 | 30 | return check.CheckKeyAccessible(viper.GetString(signatureFlag), viper.GetString(identityFlag)) 31 | }, 32 | RunE: func(cmd *cobra.Command, args []string) error { 33 | pubkey, err := keyext.ReadKey(viper.GetString(encryptionFlag), viper.GetString(recipientFlag)) 34 | if err != nil { 35 | return err 36 | } 37 | 38 | recipient, err := keys.ParseRecipient(viper.GetString(encryptionFlag), pubkey) 39 | if err != nil { 40 | return err 41 | } 42 | 43 | privkey, err := keyext.ReadKey(viper.GetString(signatureFlag), viper.GetString(identityFlag)) 44 | if err != nil { 45 | return err 46 | } 47 | 48 | identity, err := keys.ParseSignerIdentity(viper.GetString(signatureFlag), privkey, viper.GetString(passwordFlag)) 49 | if err != nil { 50 | return err 51 | } 52 | 53 | mt := mtio.MagneticTapeIO{} 54 | tm := tape.NewTapeManager( 55 | viper.GetString(driveFlag), 56 | mt, 57 | viper.GetInt(recordSizeFlag), 58 | false, 59 | ) 60 | 61 | metadataPersister := persisters.NewMetadataPersister(viper.GetString(metadataFlag)) 62 | if err := metadataPersister.Open(); err != nil { 63 | return err 64 | } 65 | 66 | ops := operations.NewOperations( 67 | config.BackendConfig{ 68 | GetWriter: tm.GetWriter, 69 | CloseWriter: tm.Close, 70 | 71 | GetReader: tm.GetReader, 72 | CloseReader: tm.Close, 73 | 74 | MagneticTapeIO: mt, 75 | }, 76 | config.MetadataConfig{ 77 | Metadata: metadataPersister, 78 | }, 79 | 80 | config.PipeConfig{ 81 | Compression: viper.GetString(compressionFlag), 82 | Encryption: viper.GetString(encryptionFlag), 83 | Signature: viper.GetString(signatureFlag), 84 | RecordSize: viper.GetInt(recordSizeFlag), 85 | }, 86 | config.CryptoConfig{ 87 | Recipient: recipient, 88 | Identity: identity, 89 | Password: viper.GetString(passwordFlag), 90 | }, 91 | 92 | logging.NewCSVLogger().PrintHeaderEvent, 93 | ) 94 | 95 | return ops.Move(viper.GetString(fromFlag), viper.GetString(toFlag)) 96 | }, 97 | } 98 | 99 | func init() { 100 | operationMoveCmd.PersistentFlags().IntP(recordSizeFlag, "z", 20, "Amount of 512-bit blocks per record") 101 | operationMoveCmd.PersistentFlags().StringP(fromFlag, "f", "", "Current path of the file or directory to move") 102 | operationMoveCmd.PersistentFlags().StringP(toFlag, "t", "", "Path to move the file or directory to") 103 | operationMoveCmd.PersistentFlags().StringP(recipientFlag, "r", "", "Path to public key of recipient to encrypt for") 104 | operationMoveCmd.PersistentFlags().StringP(identityFlag, "i", "", "Path to private key to sign with") 105 | operationMoveCmd.PersistentFlags().StringP(passwordFlag, "p", "", "Password for the private key") 106 | 107 | viper.AutomaticEnv() 108 | 109 | operationCmd.AddCommand(operationMoveCmd) 110 | } 111 | -------------------------------------------------------------------------------- /pkg/recovery/fetch.go: -------------------------------------------------------------------------------- 1 | package recovery 2 | 3 | import ( 4 | "archive/tar" 5 | "bufio" 6 | "io" 7 | "io/fs" 8 | "path" 9 | "path/filepath" 10 | 11 | "github.com/pojntfx/stfs/internal/converters" 12 | "github.com/pojntfx/stfs/internal/records" 13 | "github.com/pojntfx/stfs/pkg/compression" 14 | "github.com/pojntfx/stfs/pkg/config" 15 | "github.com/pojntfx/stfs/pkg/encryption" 16 | "github.com/pojntfx/stfs/pkg/signature" 17 | ) 18 | 19 | func Fetch( 20 | reader config.DriveReaderConfig, 21 | mt config.MagneticTapeIO, 22 | pipes config.PipeConfig, 23 | crypto config.CryptoConfig, 24 | 25 | getDst func(path string, mode fs.FileMode) (io.WriteCloser, error), 26 | mkdirAll func(path string, mode fs.FileMode) error, 27 | 28 | record int, 29 | block int, 30 | to string, 31 | preview bool, 32 | 33 | onHeader func(hdr *config.Header), 34 | ) error { 35 | to = filepath.ToSlash(to) 36 | 37 | var tr *tar.Reader 38 | if reader.DriveIsRegular { 39 | // Seek to record and block 40 | if _, err := reader.Drive.Seek(int64((pipes.RecordSize*config.MagneticTapeBlockSize*record)+block*config.MagneticTapeBlockSize), io.SeekStart); err != nil { 41 | return err 42 | } 43 | 44 | tr = tar.NewReader(reader.Drive) 45 | } else { 46 | // Seek to record 47 | if err := mt.SeekToRecordOnTape(reader.Drive.Fd(), int32(record)); err != nil { 48 | return err 49 | } 50 | 51 | // Seek to block 52 | br := bufio.NewReaderSize(reader.Drive, config.MagneticTapeBlockSize*pipes.RecordSize) 53 | if _, err := br.Read(make([]byte, block*config.MagneticTapeBlockSize)); err != nil { 54 | return err 55 | } 56 | 57 | tr = tar.NewReader(br) 58 | } 59 | 60 | hdr, err := tr.Next() 61 | if err != nil { 62 | return err 63 | } 64 | 65 | if err := encryption.DecryptHeader(hdr, pipes.Encryption, crypto.Identity); err != nil { 66 | return err 67 | } 68 | 69 | if err := signature.VerifyHeader(hdr, reader.DriveIsRegular, pipes.Signature, crypto.Recipient); err != nil { 70 | return err 71 | } 72 | 73 | if onHeader != nil { 74 | dbhdr, err := converters.TarHeaderToDBHeader(int64(record), -1, int64(block), -1, hdr) 75 | if err != nil { 76 | return err 77 | } 78 | 79 | onHeader(converters.DBHeaderToConfigHeader(dbhdr)) 80 | } 81 | 82 | if !preview { 83 | if to == "" { 84 | to = path.Base(hdr.Name) 85 | } 86 | 87 | if hdr.Typeflag == tar.TypeDir { 88 | return mkdirAll(to, hdr.FileInfo().Mode()) 89 | } 90 | 91 | dstFile, err := getDst(to, hdr.FileInfo().Mode()) 92 | if err != nil { 93 | return err 94 | } 95 | 96 | // Don't decompress non-regular files 97 | if !hdr.FileInfo().Mode().IsRegular() { 98 | if _, err := io.Copy(dstFile, tr); err != nil { 99 | return err 100 | } 101 | 102 | return nil 103 | } 104 | 105 | decryptor, err := encryption.Decrypt(tr, pipes.Encryption, crypto.Identity) 106 | if err != nil { 107 | return err 108 | } 109 | 110 | decompressor, err := compression.Decompress(decryptor, pipes.Compression) 111 | if err != nil { 112 | return err 113 | } 114 | 115 | sig := "" 116 | if hdr.PAXRecords != nil { 117 | if s, ok := hdr.PAXRecords[records.STFSRecordSignature]; ok { 118 | sig = s 119 | } 120 | } 121 | 122 | verifier, verify, err := signature.Verify(decompressor, reader.DriveIsRegular, pipes.Signature, crypto.Recipient, sig) 123 | if err != nil { 124 | return err 125 | } 126 | 127 | if _, err := io.Copy(dstFile, verifier); err != nil { 128 | return err 129 | } 130 | 131 | if err := verify(); err != nil { 132 | return err 133 | } 134 | 135 | if err := decryptor.Close(); err != nil { 136 | return err 137 | } 138 | 139 | if err := decompressor.Close(); err != nil { 140 | return err 141 | } 142 | 143 | if err := dstFile.Close(); err != nil { 144 | return err 145 | } 146 | } 147 | 148 | return nil 149 | } 150 | -------------------------------------------------------------------------------- /cmd/stfs/cmd/operation_initialize.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | 7 | "github.com/pojntfx/stfs/internal/check" 8 | "github.com/pojntfx/stfs/internal/keyext" 9 | "github.com/pojntfx/stfs/internal/logging" 10 | "github.com/pojntfx/stfs/pkg/config" 11 | "github.com/pojntfx/stfs/pkg/keys" 12 | "github.com/pojntfx/stfs/pkg/mtio" 13 | "github.com/pojntfx/stfs/pkg/operations" 14 | "github.com/pojntfx/stfs/pkg/persisters" 15 | "github.com/pojntfx/stfs/pkg/tape" 16 | "github.com/spf13/cobra" 17 | "github.com/spf13/viper" 18 | ) 19 | 20 | var operationInitializeCmd = &cobra.Command{ 21 | Use: "initialize", 22 | Aliases: []string{"ini", "i", "init"}, 23 | Short: "Truncate and initalize a file or directory", 24 | PreRunE: func(cmd *cobra.Command, args []string) error { 25 | if err := viper.BindPFlags(cmd.PersistentFlags()); err != nil { 26 | return err 27 | } 28 | 29 | if err := check.CheckCompressionLevel(viper.GetString(compressionLevelFlag)); err != nil { 30 | return err 31 | } 32 | 33 | if err := check.CheckKeyAccessible(viper.GetString(encryptionFlag), viper.GetString(recipientFlag)); err != nil { 34 | return err 35 | } 36 | 37 | return check.CheckKeyAccessible(viper.GetString(signatureFlag), viper.GetString(identityFlag)) 38 | }, 39 | RunE: func(cmd *cobra.Command, args []string) error { 40 | pubkey, err := keyext.ReadKey(viper.GetString(encryptionFlag), viper.GetString(recipientFlag)) 41 | if err != nil { 42 | return err 43 | } 44 | 45 | recipient, err := keys.ParseRecipient(viper.GetString(encryptionFlag), pubkey) 46 | if err != nil { 47 | return err 48 | } 49 | 50 | privkey, err := keyext.ReadKey(viper.GetString(signatureFlag), viper.GetString(identityFlag)) 51 | if err != nil { 52 | return err 53 | } 54 | 55 | identity, err := keys.ParseSignerIdentity(viper.GetString(signatureFlag), privkey, viper.GetString(passwordFlag)) 56 | if err != nil { 57 | return err 58 | } 59 | 60 | mt := mtio.MagneticTapeIO{} 61 | tm := tape.NewTapeManager( 62 | viper.GetString(driveFlag), 63 | mt, 64 | viper.GetInt(recordSizeFlag), 65 | true, 66 | ) 67 | 68 | metadataPersister := persisters.NewMetadataPersister(viper.GetString(metadataFlag)) 69 | if err := metadataPersister.Open(); err != nil { 70 | return err 71 | } 72 | 73 | ops := operations.NewOperations( 74 | config.BackendConfig{ 75 | GetWriter: tm.GetWriter, 76 | CloseWriter: tm.Close, 77 | 78 | GetReader: tm.GetReader, 79 | CloseReader: tm.Close, 80 | 81 | MagneticTapeIO: mt, 82 | }, 83 | config.MetadataConfig{ 84 | Metadata: metadataPersister, 85 | }, 86 | 87 | config.PipeConfig{ 88 | Compression: viper.GetString(compressionFlag), 89 | Encryption: viper.GetString(encryptionFlag), 90 | Signature: viper.GetString(signatureFlag), 91 | RecordSize: viper.GetInt(recordSizeFlag), 92 | }, 93 | config.CryptoConfig{ 94 | Recipient: recipient, 95 | Identity: identity, 96 | Password: viper.GetString(passwordFlag), 97 | }, 98 | 99 | logging.NewCSVLogger().PrintHeaderEvent, 100 | ) 101 | 102 | return ops.Initialize("/", os.ModePerm, viper.GetString(compressionLevelFlag)) 103 | }, 104 | } 105 | 106 | func init() { 107 | operationInitializeCmd.PersistentFlags().IntP(recordSizeFlag, "z", 20, "Amount of 512-bit blocks per record") 108 | operationInitializeCmd.PersistentFlags().StringP(compressionLevelFlag, "l", config.CompressionLevelBalancedKey, fmt.Sprintf("Compression level to use (default %v, available are %v)", config.CompressionLevelBalancedKey, config.KnownCompressionLevels)) 109 | operationInitializeCmd.PersistentFlags().StringP(recipientFlag, "r", "", "Path to public key of recipient to encrypt for") 110 | operationInitializeCmd.PersistentFlags().StringP(identityFlag, "i", "", "Path to private key to sign with") 111 | operationInitializeCmd.PersistentFlags().StringP(passwordFlag, "p", "", "Password for the private key") 112 | 113 | viper.AutomaticEnv() 114 | 115 | operationCmd.AddCommand(operationInitializeCmd) 116 | } 117 | -------------------------------------------------------------------------------- /internal/converters/header.go: -------------------------------------------------------------------------------- 1 | package converters 2 | 3 | import ( 4 | "archive/tar" 5 | "encoding/json" 6 | 7 | models "github.com/pojntfx/stfs/internal/db/sqlite/models/metadata" 8 | "github.com/pojntfx/stfs/pkg/config" 9 | ) 10 | 11 | func ConfigHeaderToDBHeader(confighdr *config.Header) *models.Header { 12 | return &models.Header{ 13 | Record: confighdr.Record, 14 | Lastknownrecord: confighdr.Lastknownrecord, 15 | Block: confighdr.Block, 16 | Lastknownblock: confighdr.Lastknownblock, 17 | Typeflag: confighdr.Typeflag, 18 | Name: confighdr.Name, 19 | Linkname: confighdr.Linkname, 20 | Size: confighdr.Size, 21 | Mode: confighdr.Mode, 22 | UID: confighdr.UID, 23 | Gid: confighdr.Gid, 24 | Uname: confighdr.Uname, 25 | Gname: confighdr.Gname, 26 | Modtime: confighdr.Modtime, 27 | Accesstime: confighdr.Accesstime, 28 | Changetime: confighdr.Changetime, 29 | Devmajor: confighdr.Devmajor, 30 | Devminor: confighdr.Devminor, 31 | Paxrecords: confighdr.Paxrecords, 32 | Format: confighdr.Format, 33 | Deleted: confighdr.Deleted, 34 | } 35 | } 36 | 37 | func DBHeaderToConfigHeader(dbhdr *models.Header) *config.Header { 38 | return &config.Header{ 39 | Record: dbhdr.Record, 40 | Lastknownrecord: dbhdr.Lastknownrecord, 41 | Block: dbhdr.Block, 42 | Lastknownblock: dbhdr.Lastknownblock, 43 | Typeflag: dbhdr.Typeflag, 44 | Name: dbhdr.Name, 45 | Linkname: dbhdr.Linkname, 46 | Size: dbhdr.Size, 47 | Mode: dbhdr.Mode, 48 | UID: dbhdr.UID, 49 | Gid: dbhdr.Gid, 50 | Uname: dbhdr.Uname, 51 | Gname: dbhdr.Gname, 52 | Modtime: dbhdr.Modtime, 53 | Accesstime: dbhdr.Accesstime, 54 | Changetime: dbhdr.Changetime, 55 | Devmajor: dbhdr.Devmajor, 56 | Devminor: dbhdr.Devminor, 57 | Paxrecords: dbhdr.Paxrecords, 58 | Format: dbhdr.Format, 59 | Deleted: dbhdr.Deleted, 60 | } 61 | } 62 | 63 | func DBHeaderToTarHeader(dbhdr *models.Header) (*tar.Header, error) { 64 | paxRecords := map[string]string{} 65 | if err := json.Unmarshal([]byte(dbhdr.Paxrecords), &paxRecords); err != nil { 66 | return nil, err 67 | } 68 | if paxRecords == nil { 69 | paxRecords = map[string]string{} 70 | } 71 | 72 | hdr := &tar.Header{ 73 | Typeflag: byte(dbhdr.Typeflag), 74 | Name: dbhdr.Name, 75 | Linkname: dbhdr.Linkname, 76 | Size: dbhdr.Size, 77 | Mode: dbhdr.Mode, 78 | Uid: int(dbhdr.UID), 79 | Gid: int(dbhdr.Gid), 80 | Uname: dbhdr.Uname, 81 | Gname: dbhdr.Gname, 82 | ModTime: dbhdr.Modtime, 83 | AccessTime: dbhdr.Accesstime, 84 | ChangeTime: dbhdr.Changetime, 85 | Devmajor: dbhdr.Devmajor, 86 | Devminor: dbhdr.Devminor, 87 | PAXRecords: paxRecords, 88 | Format: tar.Format(dbhdr.Format), 89 | } 90 | 91 | return hdr, nil 92 | } 93 | 94 | func TarHeaderToDBHeader(record, lastKnownRecord, block, lastKnownBlock int64, tarhdr *tar.Header) (*models.Header, error) { 95 | paxRecords, err := json.Marshal(tarhdr.PAXRecords) 96 | if err != nil { 97 | return nil, err 98 | } 99 | 100 | hdr := models.Header{ 101 | Record: record, 102 | Lastknownrecord: lastKnownRecord, 103 | Block: block, 104 | Lastknownblock: lastKnownBlock, 105 | Typeflag: int64(tarhdr.Typeflag), 106 | Name: tarhdr.Name, 107 | Linkname: tarhdr.Linkname, 108 | Size: tarhdr.Size, 109 | Mode: tarhdr.Mode, 110 | UID: int64(tarhdr.Uid), 111 | Gid: int64(tarhdr.Gid), 112 | Uname: tarhdr.Uname, 113 | Gname: tarhdr.Gname, 114 | Modtime: tarhdr.ModTime, 115 | Accesstime: tarhdr.AccessTime, 116 | Changetime: tarhdr.ChangeTime, 117 | Devmajor: tarhdr.Devmajor, 118 | Devminor: tarhdr.Devminor, 119 | Paxrecords: string(paxRecords), 120 | Format: int64(tarhdr.Format), 121 | } 122 | 123 | return &hdr, nil 124 | } 125 | -------------------------------------------------------------------------------- /cmd/stfs/cmd/recovery_index.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "archive/tar" 5 | 6 | "github.com/pojntfx/stfs/internal/check" 7 | "github.com/pojntfx/stfs/internal/keyext" 8 | "github.com/pojntfx/stfs/internal/logging" 9 | "github.com/pojntfx/stfs/pkg/config" 10 | "github.com/pojntfx/stfs/pkg/encryption" 11 | "github.com/pojntfx/stfs/pkg/keys" 12 | "github.com/pojntfx/stfs/pkg/mtio" 13 | "github.com/pojntfx/stfs/pkg/persisters" 14 | "github.com/pojntfx/stfs/pkg/recovery" 15 | "github.com/pojntfx/stfs/pkg/signature" 16 | "github.com/pojntfx/stfs/pkg/tape" 17 | "github.com/spf13/cobra" 18 | "github.com/spf13/viper" 19 | ) 20 | 21 | var recoveryIndexCmd = &cobra.Command{ 22 | Use: "index", 23 | Short: "Index contents of tape or tar file", 24 | PreRunE: func(cmd *cobra.Command, args []string) error { 25 | if err := viper.BindPFlags(cmd.PersistentFlags()); err != nil { 26 | return err 27 | } 28 | 29 | if err := check.CheckKeyAccessible(viper.GetString(encryptionFlag), viper.GetString(identityFlag)); err != nil { 30 | return err 31 | } 32 | 33 | return check.CheckKeyAccessible(viper.GetString(signatureFlag), viper.GetString(recipientFlag)) 34 | }, 35 | RunE: func(cmd *cobra.Command, args []string) error { 36 | pubkey, err := keyext.ReadKey(viper.GetString(signatureFlag), viper.GetString(recipientFlag)) 37 | if err != nil { 38 | return err 39 | } 40 | 41 | recipient, err := keys.ParseSignerRecipient(viper.GetString(signatureFlag), pubkey) 42 | if err != nil { 43 | return err 44 | } 45 | 46 | privkey, err := keyext.ReadKey(viper.GetString(encryptionFlag), viper.GetString(identityFlag)) 47 | if err != nil { 48 | return err 49 | } 50 | 51 | identity, err := keys.ParseIdentity(viper.GetString(encryptionFlag), privkey, viper.GetString(passwordFlag)) 52 | if err != nil { 53 | return err 54 | } 55 | 56 | reader, readerIsRegular, err := tape.OpenTapeReadOnly( 57 | viper.GetString(driveFlag), 58 | ) 59 | if err != nil { 60 | return nil 61 | } 62 | defer reader.Close() 63 | 64 | metadataPersister := persisters.NewMetadataPersister(viper.GetString(metadataFlag)) 65 | if err := metadataPersister.Open(); err != nil { 66 | return err 67 | } 68 | 69 | return recovery.Index( 70 | config.DriveReaderConfig{ 71 | Drive: reader, 72 | DriveIsRegular: readerIsRegular, 73 | }, 74 | mtio.MagneticTapeIO{}, 75 | config.MetadataConfig{ 76 | Metadata: metadataPersister, 77 | }, 78 | config.PipeConfig{ 79 | Compression: viper.GetString(compressionFlag), 80 | Encryption: viper.GetString(encryptionFlag), 81 | Signature: viper.GetString(signatureFlag), 82 | RecordSize: viper.GetInt(recordSizeFlag), 83 | }, 84 | config.CryptoConfig{ 85 | Recipient: recipient, 86 | Identity: identity, 87 | Password: viper.GetString(passwordFlag), 88 | }, 89 | 90 | viper.GetInt(recordFlag), 91 | viper.GetInt(blockFlag), 92 | viper.GetBool(overwriteFlag), 93 | false, 94 | 0, 95 | 96 | func(hdr *tar.Header, i int) error { 97 | return encryption.DecryptHeader(hdr, viper.GetString(encryptionFlag), identity) 98 | }, 99 | func(hdr *tar.Header, isRegular bool) error { 100 | return signature.VerifyHeader(hdr, isRegular, viper.GetString(signatureFlag), recipient) 101 | }, 102 | 103 | logging.NewCSVLogger().PrintHeader, 104 | ) 105 | }, 106 | } 107 | 108 | func init() { 109 | recoveryIndexCmd.PersistentFlags().IntP(recordSizeFlag, "z", 20, "Amount of 512-bit blocks per record") 110 | recoveryIndexCmd.PersistentFlags().IntP(recordFlag, "k", 0, "Record to seek too before counting") 111 | recoveryIndexCmd.PersistentFlags().IntP(blockFlag, "b", 0, "Block in record to seek too before counting") 112 | recoveryIndexCmd.PersistentFlags().BoolP(overwriteFlag, "o", false, "Remove the old index before starting to index") 113 | recoveryIndexCmd.PersistentFlags().StringP(identityFlag, "i", "", "Path to private key of recipient that has been encrypted for") 114 | recoveryIndexCmd.PersistentFlags().StringP(passwordFlag, "p", "", "Password for the private key") 115 | recoveryIndexCmd.PersistentFlags().StringP(recipientFlag, "r", "", "Path to the public key to verify with") 116 | 117 | viper.AutomaticEnv() 118 | 119 | recoveryCmd.AddCommand(recoveryIndexCmd) 120 | } 121 | -------------------------------------------------------------------------------- /pkg/operations/delete.go: -------------------------------------------------------------------------------- 1 | package operations 2 | 3 | import ( 4 | "archive/tar" 5 | "context" 6 | "database/sql" 7 | "path/filepath" 8 | 9 | "github.com/pojntfx/stfs/internal/converters" 10 | "github.com/pojntfx/stfs/internal/records" 11 | "github.com/pojntfx/stfs/internal/tarext" 12 | "github.com/pojntfx/stfs/pkg/config" 13 | "github.com/pojntfx/stfs/pkg/encryption" 14 | "github.com/pojntfx/stfs/pkg/recovery" 15 | "github.com/pojntfx/stfs/pkg/signature" 16 | ) 17 | 18 | func (o *Operations) Delete(name string) error { 19 | name = filepath.ToSlash(name) 20 | 21 | o.diskOperationLock.Lock() 22 | defer o.diskOperationLock.Unlock() 23 | 24 | writer, err := o.backend.GetWriter() 25 | if err != nil { 26 | return err 27 | } 28 | 29 | dirty := false 30 | tw, cleanup, err := tarext.NewTapeWriter(writer.Drive, writer.DriveIsRegular, o.pipes.RecordSize) 31 | if err != nil { 32 | return err 33 | } 34 | 35 | lastIndexedRecord, lastIndexedBlock, err := o.metadata.Metadata.GetLastIndexedRecordAndBlock(context.Background(), o.pipes.RecordSize) 36 | if err != nil { 37 | return err 38 | } 39 | 40 | headersToDelete := []*config.Header{} 41 | dbhdr, err := o.metadata.Metadata.GetHeader(context.Background(), name) 42 | if err != nil { 43 | if err == sql.ErrNoRows { 44 | dbhdr, err = o.metadata.Metadata.GetHeaderByLinkname(context.Background(), name) 45 | if err != nil { 46 | return err 47 | } 48 | } else { 49 | return err 50 | } 51 | } 52 | headersToDelete = append(headersToDelete, dbhdr) 53 | 54 | // If the header refers to a directory, get it's children 55 | if dbhdr.Typeflag == tar.TypeDir && dbhdr.Linkname == "" { 56 | dbhdrs, err := o.metadata.Metadata.GetHeaderChildren(context.Background(), name) 57 | if err != nil { 58 | return err 59 | } 60 | 61 | headersToDelete = append(headersToDelete, dbhdrs...) 62 | } 63 | 64 | // Append deletion hdrs to the tape or tar file 65 | hdrs := []tar.Header{} 66 | for _, dbhdr := range headersToDelete { 67 | hdr, err := converters.DBHeaderToTarHeader(converters.ConfigHeaderToDBHeader(dbhdr)) 68 | if err != nil { 69 | return err 70 | } 71 | 72 | hdr.Size = 0 // Don't try to seek after the record 73 | hdr.PAXRecords[records.STFSRecordVersion] = records.STFSRecordVersion1 74 | hdr.PAXRecords[records.STFSRecordAction] = records.STFSRecordActionDelete 75 | 76 | hdrs = append(hdrs, *hdr) 77 | 78 | if o.onHeader != nil { 79 | dbhdr, err := converters.TarHeaderToDBHeader(-1, -1, -1, -1, hdr) 80 | if err != nil { 81 | return err 82 | } 83 | 84 | o.onHeader(&config.HeaderEvent{ 85 | Type: config.HeaderEventTypeDelete, 86 | Indexed: false, 87 | Header: converters.DBHeaderToConfigHeader(dbhdr), 88 | }) 89 | } 90 | 91 | if err := signature.SignHeader(hdr, writer.DriveIsRegular, o.pipes.Signature, o.crypto.Identity); err != nil { 92 | return err 93 | } 94 | 95 | if err := encryption.EncryptHeader(hdr, o.pipes.Encryption, o.crypto.Recipient); err != nil { 96 | return err 97 | } 98 | 99 | if err := tw.WriteHeader(hdr); err != nil { 100 | return err 101 | } 102 | 103 | dirty = true 104 | } 105 | 106 | if err := cleanup(&dirty); err != nil { 107 | return err 108 | } 109 | 110 | if err := o.backend.CloseWriter(); err != nil { 111 | return err 112 | } 113 | 114 | reader, err := o.backend.GetReader() 115 | if err != nil { 116 | return err 117 | } 118 | defer o.backend.CloseReader() 119 | 120 | return recovery.Index( 121 | reader, 122 | o.backend.MagneticTapeIO, 123 | o.metadata, 124 | o.pipes, 125 | o.crypto, 126 | 127 | int(lastIndexedRecord), 128 | int(lastIndexedBlock), 129 | false, 130 | false, 131 | 1, // Ignore the first header, which is the last header which we already indexed 132 | 133 | func(hdr *tar.Header, i int) error { 134 | if len(hdrs) <= i { 135 | return config.ErrTarHeaderMissing 136 | } 137 | 138 | *hdr = hdrs[i] 139 | 140 | return nil 141 | }, 142 | func(hdr *tar.Header, isRegular bool) error { 143 | return nil // We sign above, no need to verify 144 | }, 145 | 146 | func(hdr *config.Header) { 147 | o.onHeader(&config.HeaderEvent{ 148 | Type: config.HeaderEventTypeDelete, 149 | Indexed: true, 150 | Header: hdr, 151 | }) 152 | }, 153 | ) 154 | } 155 | -------------------------------------------------------------------------------- /cmd/stfs/cmd/recovery_fetch.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "io" 5 | "io/fs" 6 | "os" 7 | 8 | "github.com/pojntfx/stfs/internal/check" 9 | "github.com/pojntfx/stfs/internal/keyext" 10 | "github.com/pojntfx/stfs/internal/logging" 11 | "github.com/pojntfx/stfs/pkg/config" 12 | "github.com/pojntfx/stfs/pkg/keys" 13 | "github.com/pojntfx/stfs/pkg/mtio" 14 | "github.com/pojntfx/stfs/pkg/recovery" 15 | "github.com/pojntfx/stfs/pkg/tape" 16 | "github.com/spf13/cobra" 17 | "github.com/spf13/viper" 18 | ) 19 | 20 | const ( 21 | recordFlag = "record" 22 | blockFlag = "block" 23 | toFlag = "to" 24 | previewFlag = "preview" 25 | ) 26 | 27 | var recoveryFetchCmd = &cobra.Command{ 28 | Use: "fetch", 29 | Short: "Fetch a file or directory from tape or tar file by record and block without the index", 30 | PreRunE: func(cmd *cobra.Command, args []string) error { 31 | if err := viper.BindPFlags(cmd.PersistentFlags()); err != nil { 32 | return err 33 | } 34 | 35 | if err := check.CheckKeyAccessible(viper.GetString(encryptionFlag), viper.GetString(identityFlag)); err != nil { 36 | return err 37 | } 38 | 39 | return check.CheckKeyAccessible(viper.GetString(signatureFlag), viper.GetString(recipientFlag)) 40 | }, 41 | RunE: func(cmd *cobra.Command, args []string) error { 42 | pubkey, err := keyext.ReadKey(viper.GetString(signatureFlag), viper.GetString(recipientFlag)) 43 | if err != nil { 44 | return err 45 | } 46 | 47 | recipient, err := keys.ParseSignerRecipient(viper.GetString(signatureFlag), pubkey) 48 | if err != nil { 49 | return err 50 | } 51 | 52 | privkey, err := keyext.ReadKey(viper.GetString(encryptionFlag), viper.GetString(identityFlag)) 53 | if err != nil { 54 | return err 55 | } 56 | 57 | identity, err := keys.ParseIdentity(viper.GetString(encryptionFlag), privkey, viper.GetString(passwordFlag)) 58 | if err != nil { 59 | return err 60 | } 61 | 62 | reader, readerIsRegular, err := tape.OpenTapeReadOnly( 63 | viper.GetString(driveFlag), 64 | ) 65 | if err != nil { 66 | return nil 67 | } 68 | defer reader.Close() 69 | 70 | return recovery.Fetch( 71 | config.DriveReaderConfig{ 72 | Drive: reader, 73 | DriveIsRegular: readerIsRegular, 74 | }, 75 | mtio.MagneticTapeIO{}, 76 | config.PipeConfig{ 77 | Compression: viper.GetString(compressionFlag), 78 | Encryption: viper.GetString(encryptionFlag), 79 | Signature: viper.GetString(signatureFlag), 80 | RecordSize: viper.GetInt(recordSizeFlag), 81 | }, 82 | config.CryptoConfig{ 83 | Recipient: recipient, 84 | Identity: identity, 85 | Password: viper.GetString(passwordFlag), 86 | }, 87 | 88 | func(path string, mode fs.FileMode) (io.WriteCloser, error) { 89 | dstFile, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, mode) 90 | if err != nil { 91 | return nil, err 92 | } 93 | 94 | if err := dstFile.Truncate(0); err != nil { 95 | return nil, err 96 | } 97 | 98 | return dstFile, nil 99 | }, 100 | func(path string, mode fs.FileMode) error { 101 | return os.MkdirAll(path, mode) 102 | }, 103 | 104 | viper.GetInt(recordFlag), 105 | viper.GetInt(blockFlag), 106 | viper.GetString(toFlag), 107 | viper.GetBool(previewFlag), 108 | 109 | logging.NewCSVLogger().PrintHeader, 110 | ) 111 | }, 112 | } 113 | 114 | func init() { 115 | recoveryFetchCmd.PersistentFlags().IntP(recordSizeFlag, "z", 20, "Amount of 512-bit blocks per record") 116 | recoveryFetchCmd.PersistentFlags().IntP(recordFlag, "k", 0, "Record to seek too") 117 | recoveryFetchCmd.PersistentFlags().IntP(blockFlag, "b", 0, "Block in record to seek too") 118 | recoveryFetchCmd.PersistentFlags().StringP(toFlag, "t", "", "File to restore to (archived name by default)") 119 | recoveryFetchCmd.PersistentFlags().BoolP(previewFlag, "w", false, "Only read the header") 120 | recoveryFetchCmd.PersistentFlags().StringP(identityFlag, "i", "", "Path to private key of recipient that has been encrypted for") 121 | recoveryFetchCmd.PersistentFlags().StringP(passwordFlag, "p", "", "Password for the private key") 122 | recoveryFetchCmd.PersistentFlags().StringP(recipientFlag, "r", "", "Path to the public key to verify with") 123 | 124 | viper.AutomaticEnv() 125 | 126 | recoveryCmd.AddCommand(recoveryFetchCmd) 127 | } 128 | -------------------------------------------------------------------------------- /examples/simple/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "log" 6 | "os" 7 | "path/filepath" 8 | 9 | "github.com/pojntfx/stfs/examples" 10 | "github.com/pojntfx/stfs/pkg/cache" 11 | "github.com/pojntfx/stfs/pkg/config" 12 | "github.com/pojntfx/stfs/pkg/fs" 13 | "github.com/pojntfx/stfs/pkg/mtio" 14 | "github.com/pojntfx/stfs/pkg/operations" 15 | "github.com/pojntfx/stfs/pkg/persisters" 16 | "github.com/pojntfx/stfs/pkg/tape" 17 | ) 18 | 19 | func main() { 20 | driveFlag := flag.String("drive", "/dev/nst0", "Tape or tar file to use") 21 | recordSizeFlag := flag.Int("recordSize", 20, "Amount of 512-bit blocks per record") 22 | metadataFlag := flag.String("metadata", filepath.Join(os.TempDir(), "metadata.sqlite"), "Metadata database to use") 23 | writeCacheFlag := flag.String("writeCache", filepath.Join(os.TempDir(), "stfs-write-cache"), "Directory to use for write cache") 24 | verboseFlag := flag.Bool("verbose", false, "Enable verbose logging") 25 | 26 | flag.Parse() 27 | 28 | mt := mtio.MagneticTapeIO{} 29 | tm := tape.NewTapeManager( 30 | *driveFlag, 31 | mt, 32 | *recordSizeFlag, 33 | false, 34 | ) 35 | 36 | metadataPersister := persisters.NewMetadataPersister(*metadataFlag) 37 | if err := metadataPersister.Open(); err != nil { 38 | panic(err) 39 | } 40 | 41 | l := &examples.Logger{ 42 | Verbose: *verboseFlag, 43 | } 44 | 45 | metadataConfig := config.MetadataConfig{ 46 | Metadata: metadataPersister, 47 | } 48 | pipeConfig := config.PipeConfig{ 49 | Compression: config.NoneKey, 50 | Encryption: config.NoneKey, 51 | Signature: config.NoneKey, 52 | RecordSize: *recordSizeFlag, 53 | } 54 | backendConfig := config.BackendConfig{ 55 | GetWriter: tm.GetWriter, 56 | CloseWriter: tm.Close, 57 | 58 | GetReader: tm.GetReader, 59 | CloseReader: tm.Close, 60 | 61 | MagneticTapeIO: mt, 62 | } 63 | readCryptoConfig := config.CryptoConfig{} 64 | 65 | readOps := operations.NewOperations( 66 | backendConfig, 67 | metadataConfig, 68 | 69 | pipeConfig, 70 | readCryptoConfig, 71 | 72 | func(event *config.HeaderEvent) { 73 | l.Debug("Header read", event) 74 | }, 75 | ) 76 | writeOps := operations.NewOperations( 77 | backendConfig, 78 | metadataConfig, 79 | 80 | pipeConfig, 81 | config.CryptoConfig{}, 82 | 83 | func(event *config.HeaderEvent) { 84 | l.Debug("Header write", event) 85 | }, 86 | ) 87 | 88 | stfs := fs.NewSTFS( 89 | readOps, 90 | writeOps, 91 | 92 | config.MetadataConfig{ 93 | Metadata: metadataPersister, 94 | }, 95 | 96 | config.CompressionLevelFastestKey, 97 | func() (cache.WriteCache, func() error, error) { 98 | return cache.NewCacheWrite( 99 | *writeCacheFlag, 100 | config.WriteCacheTypeFile, 101 | ) 102 | }, 103 | false, 104 | false, 105 | 106 | func(hdr *config.Header) { 107 | l.Trace("Header transform", hdr) 108 | }, 109 | l, 110 | ) 111 | 112 | root, err := stfs.Initialize("/", os.ModePerm) 113 | if err != nil { 114 | panic(err) 115 | } 116 | 117 | fs, err := cache.NewCacheFilesystem( 118 | stfs, 119 | root, 120 | config.NoneKey, 121 | 0, 122 | "", 123 | ) 124 | if err != nil { 125 | panic(err) 126 | } 127 | 128 | log.Println("stat /") 129 | 130 | stat, err := fs.Stat("/") 131 | if err != nil { 132 | panic(err) 133 | } 134 | 135 | log.Println("Result of stat /:", stat) 136 | 137 | log.Println("open /") 138 | 139 | dir, err := fs.Open("/") 140 | if err != nil { 141 | panic(err) 142 | } 143 | 144 | log.Println("Result of open /:", dir) 145 | 146 | log.Println("readdir /") 147 | 148 | children, err := dir.Readdir(-1) 149 | if err != nil { 150 | panic(err) 151 | } 152 | 153 | log.Println("Result of readdir /:", children) 154 | 155 | log.Println("create /test.txt") 156 | 157 | file, err := fs.Create("/test.txt") 158 | if err != nil { 159 | panic(err) 160 | } 161 | 162 | log.Println("Result of create /test.txt:", file) 163 | 164 | log.Println("writeString /test.txt") 165 | 166 | n, err := file.WriteString("Hello, world!") 167 | if err != nil { 168 | panic(err) 169 | } 170 | 171 | log.Println("Result of writeString /test.txt:", n) 172 | 173 | if err := file.Close(); err != nil { 174 | panic(err) 175 | } 176 | 177 | log.Println("readdir /") 178 | 179 | children, err = dir.Readdir(-1) 180 | if err != nil { 181 | panic(err) 182 | } 183 | 184 | log.Println("Result of readdir /:", children) 185 | 186 | if err := dir.Close(); err != nil { 187 | panic(err) 188 | } 189 | } 190 | -------------------------------------------------------------------------------- /pkg/signature/sign.go: -------------------------------------------------------------------------------- 1 | package signature 2 | 3 | import ( 4 | "archive/tar" 5 | "bytes" 6 | "encoding/base64" 7 | "encoding/json" 8 | "io" 9 | 10 | "aead.dev/minisign" 11 | "github.com/ProtonMail/go-crypto/openpgp" 12 | "github.com/ProtonMail/go-crypto/openpgp/packet" 13 | "github.com/pojntfx/stfs/internal/records" 14 | "github.com/pojntfx/stfs/pkg/config" 15 | ) 16 | 17 | func Sign( 18 | src io.Reader, 19 | isRegular bool, 20 | signatureFormat string, 21 | identity interface{}, 22 | ) (io.Reader, func() (string, error), error) { 23 | switch signatureFormat { 24 | case config.SignatureFormatMinisignKey: 25 | if !isRegular { 26 | return nil, nil, config.ErrSignatureFormatRegularOnly 27 | } 28 | 29 | identity, ok := identity.(minisign.PrivateKey) 30 | if !ok { 31 | return nil, nil, config.ErrIdentityUnparsable 32 | } 33 | 34 | signer := minisign.NewReader(src) 35 | 36 | return signer, func() (string, error) { 37 | return base64.StdEncoding.EncodeToString(signer.Sign(identity)), nil 38 | }, nil 39 | case config.SignatureFormatPGPKey: 40 | identities, ok := identity.(openpgp.EntityList) 41 | if !ok { 42 | return nil, nil, config.ErrIdentityUnparsable 43 | } 44 | 45 | if len(identities) < 1 { 46 | return nil, nil, config.ErrIdentityUnparsable 47 | } 48 | 49 | // See openpgp.DetachSign 50 | var c *packet.Config 51 | signingKey, ok := identities[0].SigningKeyById(c.Now(), c.SigningKey()) 52 | if !ok || signingKey.PrivateKey == nil || signingKey.PublicKey == nil { 53 | return nil, nil, config.ErrIdentityUnparsable 54 | } 55 | 56 | sig := new(packet.Signature) 57 | sig.SigType = packet.SigTypeBinary 58 | sig.PubKeyAlgo = signingKey.PrivateKey.PubKeyAlgo 59 | sig.Hash = c.Hash() 60 | sig.CreationTime = c.Now() 61 | sigLifetimeSecs := c.SigLifetime() 62 | sig.SigLifetimeSecs = &sigLifetimeSecs 63 | sig.IssuerKeyId = &signingKey.PrivateKey.KeyId 64 | 65 | hash := sig.Hash.New() 66 | 67 | return io.TeeReader(src, hash), func() (string, error) { 68 | if err := sig.Sign(hash, signingKey.PrivateKey, c); err != nil { 69 | return "", err 70 | } 71 | 72 | out := &bytes.Buffer{} 73 | if err := sig.Serialize(out); err != nil { 74 | return "", err 75 | } 76 | 77 | return base64.StdEncoding.EncodeToString(out.Bytes()), nil 78 | }, nil 79 | case config.NoneKey: 80 | return src, func() (string, error) { 81 | return "", nil 82 | }, nil 83 | default: 84 | return nil, nil, config.ErrSignatureFormatUnsupported 85 | } 86 | } 87 | 88 | func SignHeader( 89 | hdr *tar.Header, 90 | isRegular bool, 91 | signatureFormat string, 92 | identity interface{}, 93 | ) error { 94 | if signatureFormat == config.NoneKey { 95 | return nil 96 | } 97 | 98 | newHdr := &tar.Header{ 99 | Format: tar.FormatPAX, 100 | Size: hdr.Size, 101 | PAXRecords: map[string]string{}, 102 | } 103 | 104 | wrappedHeader, err := json.Marshal(hdr) 105 | if err != nil { 106 | return err 107 | } 108 | 109 | newHdr.PAXRecords[records.STFSRecordEmbeddedHeader] = string(wrappedHeader) 110 | newHdr.PAXRecords[records.STFSRecordSignature], err = SignString(newHdr.PAXRecords[records.STFSRecordEmbeddedHeader], isRegular, signatureFormat, identity) 111 | if err != nil { 112 | return err 113 | } 114 | 115 | *hdr = *newHdr 116 | 117 | return nil 118 | } 119 | 120 | func SignString( 121 | src string, 122 | isRegular bool, 123 | signatureFormat string, 124 | identity interface{}, 125 | ) (string, error) { 126 | switch signatureFormat { 127 | case config.SignatureFormatMinisignKey: 128 | if !isRegular { 129 | return "", config.ErrSignatureFormatRegularOnly 130 | } 131 | 132 | identity, ok := identity.(minisign.PrivateKey) 133 | if !ok { 134 | return "", config.ErrIdentityUnparsable 135 | } 136 | 137 | return base64.StdEncoding.EncodeToString(minisign.Sign(identity, []byte(src))), nil 138 | case config.SignatureFormatPGPKey: 139 | identities, ok := identity.(openpgp.EntityList) 140 | if !ok { 141 | return "", config.ErrIdentityUnparsable 142 | } 143 | 144 | if len(identities) < 1 { 145 | return "", config.ErrIdentityUnparsable 146 | } 147 | 148 | out := &bytes.Buffer{} 149 | if err := openpgp.DetachSign(out, identities[0], bytes.NewBufferString(src), nil); err != nil { 150 | return "", err 151 | } 152 | 153 | return base64.StdEncoding.EncodeToString(out.Bytes()), nil 154 | case config.NoneKey: 155 | return src, nil 156 | default: 157 | return "", config.ErrSignatureFormatUnsupported 158 | } 159 | } 160 | -------------------------------------------------------------------------------- /cmd/stfs/cmd/operation_restore.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "io" 5 | "io/fs" 6 | "os" 7 | 8 | "github.com/pojntfx/stfs/internal/check" 9 | "github.com/pojntfx/stfs/internal/keyext" 10 | "github.com/pojntfx/stfs/internal/logging" 11 | "github.com/pojntfx/stfs/pkg/config" 12 | "github.com/pojntfx/stfs/pkg/keys" 13 | "github.com/pojntfx/stfs/pkg/mtio" 14 | "github.com/pojntfx/stfs/pkg/operations" 15 | "github.com/pojntfx/stfs/pkg/persisters" 16 | "github.com/pojntfx/stfs/pkg/tape" 17 | "github.com/spf13/cobra" 18 | "github.com/spf13/viper" 19 | ) 20 | 21 | const ( 22 | flattenFlag = "flatten" 23 | ) 24 | 25 | var operationRestoreCmd = &cobra.Command{ 26 | Use: "restore", 27 | Aliases: []string{"res", "r", "x", "get", "extract"}, 28 | Short: "Restore a file or directory from tape or tar file", 29 | PreRunE: func(cmd *cobra.Command, args []string) error { 30 | if err := viper.BindPFlags(cmd.PersistentFlags()); err != nil { 31 | return err 32 | } 33 | 34 | if err := check.CheckKeyAccessible(viper.GetString(encryptionFlag), viper.GetString(identityFlag)); err != nil { 35 | return err 36 | } 37 | 38 | return check.CheckKeyAccessible(viper.GetString(signatureFlag), viper.GetString(recipientFlag)) 39 | }, 40 | RunE: func(cmd *cobra.Command, args []string) error { 41 | pubkey, err := keyext.ReadKey(viper.GetString(signatureFlag), viper.GetString(recipientFlag)) 42 | if err != nil { 43 | return err 44 | } 45 | 46 | recipient, err := keys.ParseSignerRecipient(viper.GetString(signatureFlag), pubkey) 47 | if err != nil { 48 | return err 49 | } 50 | 51 | privkey, err := keyext.ReadKey(viper.GetString(encryptionFlag), viper.GetString(identityFlag)) 52 | if err != nil { 53 | return err 54 | } 55 | 56 | identity, err := keys.ParseIdentity(viper.GetString(encryptionFlag), privkey, viper.GetString(passwordFlag)) 57 | if err != nil { 58 | return err 59 | } 60 | 61 | mt := mtio.MagneticTapeIO{} 62 | tm := tape.NewTapeManager( 63 | viper.GetString(driveFlag), 64 | mt, 65 | viper.GetInt(recordSizeFlag), 66 | false, 67 | ) 68 | 69 | metadataPersister := persisters.NewMetadataPersister(viper.GetString(metadataFlag)) 70 | if err := metadataPersister.Open(); err != nil { 71 | return err 72 | } 73 | 74 | ops := operations.NewOperations( 75 | config.BackendConfig{ 76 | GetWriter: tm.GetWriter, 77 | CloseWriter: tm.Close, 78 | 79 | GetReader: tm.GetReader, 80 | CloseReader: tm.Close, 81 | 82 | MagneticTapeIO: mt, 83 | }, 84 | config.MetadataConfig{ 85 | Metadata: metadataPersister, 86 | }, 87 | 88 | config.PipeConfig{ 89 | Compression: viper.GetString(compressionFlag), 90 | Encryption: viper.GetString(encryptionFlag), 91 | Signature: viper.GetString(signatureFlag), 92 | RecordSize: viper.GetInt(recordSizeFlag), 93 | }, 94 | config.CryptoConfig{ 95 | Recipient: recipient, 96 | Identity: identity, 97 | Password: viper.GetString(passwordFlag), 98 | }, 99 | 100 | logging.NewCSVLogger().PrintHeaderEvent, 101 | ) 102 | 103 | return ops.Restore( 104 | func(path string, mode fs.FileMode) (io.WriteCloser, error) { 105 | dstFile, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, mode) 106 | if err != nil { 107 | return nil, err 108 | } 109 | 110 | if err := dstFile.Truncate(0); err != nil { 111 | return nil, err 112 | } 113 | 114 | return dstFile, nil 115 | }, 116 | func(path string, mode fs.FileMode) error { 117 | return os.MkdirAll(path, mode) 118 | }, 119 | 120 | viper.GetString(fromFlag), 121 | viper.GetString(toFlag), 122 | viper.GetBool(flattenFlag), 123 | ) 124 | }, 125 | } 126 | 127 | func init() { 128 | operationRestoreCmd.PersistentFlags().IntP(recordSizeFlag, "z", 20, "Amount of 512-bit blocks per record") 129 | operationRestoreCmd.PersistentFlags().StringP(fromFlag, "f", "", "File or directory to restore") 130 | operationRestoreCmd.PersistentFlags().StringP(toFlag, "t", "", "File or directory restore to (archived name by default)") 131 | operationRestoreCmd.PersistentFlags().BoolP(flattenFlag, "a", false, "Ignore the folder hierarchy on the tape or tar file") 132 | operationRestoreCmd.PersistentFlags().StringP(identityFlag, "i", "", "Path to private key of recipient that has been encrypted for") 133 | operationRestoreCmd.PersistentFlags().StringP(passwordFlag, "p", "", "Password for the private key") 134 | operationRestoreCmd.PersistentFlags().StringP(recipientFlag, "r", "", "Path to the public key to verify with") 135 | 136 | viper.AutomaticEnv() 137 | 138 | operationCmd.AddCommand(operationRestoreCmd) 139 | } 140 | -------------------------------------------------------------------------------- /pkg/operations/move.go: -------------------------------------------------------------------------------- 1 | package operations 2 | 3 | import ( 4 | "archive/tar" 5 | "context" 6 | "database/sql" 7 | "path" 8 | "path/filepath" 9 | "strings" 10 | 11 | "github.com/pojntfx/stfs/internal/converters" 12 | "github.com/pojntfx/stfs/internal/records" 13 | "github.com/pojntfx/stfs/internal/tarext" 14 | "github.com/pojntfx/stfs/pkg/config" 15 | "github.com/pojntfx/stfs/pkg/encryption" 16 | "github.com/pojntfx/stfs/pkg/recovery" 17 | "github.com/pojntfx/stfs/pkg/signature" 18 | ) 19 | 20 | func (o *Operations) Move(from string, to string) error { 21 | from, to = filepath.ToSlash(from), filepath.ToSlash(to) 22 | 23 | // Ignore no-op move operation 24 | if from == to { 25 | return nil 26 | } 27 | 28 | o.diskOperationLock.Lock() 29 | defer o.diskOperationLock.Unlock() 30 | 31 | writer, err := o.backend.GetWriter() 32 | if err != nil { 33 | return err 34 | } 35 | 36 | dirty := false 37 | tw, cleanup, err := tarext.NewTapeWriter(writer.Drive, writer.DriveIsRegular, o.pipes.RecordSize) 38 | if err != nil { 39 | return err 40 | } 41 | 42 | lastIndexedRecord, lastIndexedBlock, err := o.metadata.Metadata.GetLastIndexedRecordAndBlock(context.Background(), o.pipes.RecordSize) 43 | if err != nil { 44 | return err 45 | } 46 | 47 | headersToMove := []*config.Header{} 48 | dbhdr, err := o.metadata.Metadata.GetHeader(context.Background(), from) 49 | if err != nil { 50 | if err == sql.ErrNoRows { 51 | dbhdr, err = o.metadata.Metadata.GetHeaderByLinkname(context.Background(), from) 52 | if err != nil { 53 | return err 54 | } 55 | } else { 56 | return err 57 | } 58 | } 59 | headersToMove = append(headersToMove, dbhdr) 60 | 61 | // Prevent moving from relative to absolute path 62 | if path.IsAbs(to) && !path.IsAbs(dbhdr.Name) { 63 | to = strings.TrimPrefix(to, "/") 64 | } 65 | 66 | // Ignore no-op move operation 67 | if from == to { 68 | return nil 69 | } 70 | 71 | // If the header refers to a directory, get it's children 72 | if dbhdr.Typeflag == tar.TypeDir { 73 | dbhdrs, err := o.metadata.Metadata.GetHeaderChildren(context.Background(), from) 74 | if err != nil { 75 | return err 76 | } 77 | 78 | headersToMove = append(headersToMove, dbhdrs...) 79 | } 80 | 81 | // Append move headers to the tape or tar file 82 | hdrs := []tar.Header{} 83 | for _, dbhdr := range headersToMove { 84 | hdr, err := converters.DBHeaderToTarHeader(converters.ConfigHeaderToDBHeader(dbhdr)) 85 | if err != nil { 86 | return err 87 | } 88 | 89 | hdr.Size = 0 // Don't try to seek after the record 90 | hdr.Name = path.Join(to, strings.TrimPrefix(strings.TrimPrefix(dbhdr.Name, "/"), strings.TrimPrefix(from, "/"))) 91 | hdr.PAXRecords[records.STFSRecordVersion] = records.STFSRecordVersion1 92 | hdr.PAXRecords[records.STFSRecordAction] = records.STFSRecordActionUpdate 93 | hdr.PAXRecords[records.STFSRecordReplacesName] = dbhdr.Name 94 | 95 | hdrs = append(hdrs, *hdr) 96 | 97 | if o.onHeader != nil { 98 | dbhdr, err := converters.TarHeaderToDBHeader(-1, -1, -1, -1, hdr) 99 | if err != nil { 100 | return err 101 | } 102 | 103 | o.onHeader(&config.HeaderEvent{ 104 | Type: config.HeaderEventTypeMove, 105 | Indexed: false, 106 | Header: converters.DBHeaderToConfigHeader(dbhdr), 107 | }) 108 | } 109 | 110 | if err := signature.SignHeader(hdr, writer.DriveIsRegular, o.pipes.Signature, o.crypto.Identity); err != nil { 111 | return err 112 | } 113 | 114 | if err := encryption.EncryptHeader(hdr, o.pipes.Encryption, o.crypto.Recipient); err != nil { 115 | return err 116 | } 117 | 118 | if err := tw.WriteHeader(hdr); err != nil { 119 | return err 120 | } 121 | 122 | dirty = true 123 | } 124 | 125 | if err := cleanup(&dirty); err != nil { 126 | return err 127 | } 128 | 129 | if err := o.backend.CloseWriter(); err != nil { 130 | return err 131 | } 132 | 133 | reader, err := o.backend.GetReader() 134 | if err != nil { 135 | return err 136 | } 137 | defer o.backend.CloseReader() 138 | 139 | return recovery.Index( 140 | reader, 141 | o.backend.MagneticTapeIO, 142 | o.metadata, 143 | o.pipes, 144 | o.crypto, 145 | 146 | int(lastIndexedRecord), 147 | int(lastIndexedBlock), 148 | false, 149 | false, 150 | 1, // Ignore the first header, which is the last header which we already indexed 151 | 152 | func(hdr *tar.Header, i int) error { 153 | if len(hdrs) <= i { 154 | return config.ErrTarHeaderMissing 155 | } 156 | 157 | *hdr = hdrs[i] 158 | 159 | return nil 160 | }, 161 | func(hdr *tar.Header, isRegular bool) error { 162 | return nil // We sign above, no need to verify 163 | }, 164 | 165 | func(hdr *config.Header) { 166 | o.onHeader(&config.HeaderEvent{ 167 | Type: config.HeaderEventTypeMove, 168 | Indexed: true, 169 | Header: hdr, 170 | }) 171 | }, 172 | ) 173 | } 174 | -------------------------------------------------------------------------------- /pkg/signature/verify.go: -------------------------------------------------------------------------------- 1 | package signature 2 | 3 | import ( 4 | "archive/tar" 5 | "bytes" 6 | "encoding/base64" 7 | "encoding/json" 8 | "io" 9 | 10 | "aead.dev/minisign" 11 | "github.com/ProtonMail/go-crypto/openpgp" 12 | "github.com/ProtonMail/go-crypto/openpgp/packet" 13 | "github.com/pojntfx/stfs/internal/records" 14 | "github.com/pojntfx/stfs/pkg/config" 15 | ) 16 | 17 | func Verify( 18 | src io.Reader, 19 | isRegular bool, 20 | signatureFormat string, 21 | recipient interface{}, 22 | signature string, 23 | ) (io.Reader, func() error, error) { 24 | switch signatureFormat { 25 | case config.SignatureFormatMinisignKey: 26 | if !isRegular { 27 | return nil, nil, config.ErrSignatureFormatRegularOnly 28 | } 29 | 30 | recipient, ok := recipient.(minisign.PublicKey) 31 | if !ok { 32 | return nil, nil, config.ErrRecipientUnparsable 33 | } 34 | 35 | verifier := minisign.NewReader(src) 36 | 37 | return verifier, func() error { 38 | decodedSignature, err := base64.StdEncoding.DecodeString(signature) 39 | if err != nil { 40 | return err 41 | } 42 | 43 | if verifier.Verify(recipient, decodedSignature) { 44 | return nil 45 | } 46 | 47 | return config.ErrSignatureInvalid 48 | }, nil 49 | case config.SignatureFormatPGPKey: 50 | recipients, ok := recipient.(openpgp.EntityList) 51 | if !ok { 52 | return nil, nil, config.ErrIdentityUnparsable 53 | } 54 | 55 | if len(recipients) < 1 { 56 | return nil, nil, config.ErrIdentityUnparsable 57 | } 58 | 59 | decodedSignature, err := base64.StdEncoding.DecodeString(signature) 60 | if err != nil { 61 | return nil, nil, err 62 | } 63 | 64 | reader := packet.NewReader(bytes.NewBuffer(decodedSignature)) 65 | pkt, err := reader.Next() 66 | if err != nil { 67 | return nil, nil, err 68 | } 69 | 70 | sig, ok := pkt.(*packet.Signature) 71 | if !ok { 72 | return nil, nil, config.ErrSignatureInvalid 73 | } 74 | 75 | hash := sig.Hash.New() 76 | 77 | tee := io.TeeReader(src, hash) 78 | 79 | return tee, func() error { 80 | return recipients[0].PrimaryKey.VerifySignature(hash, sig) 81 | }, nil 82 | case config.NoneKey: 83 | return io.NopCloser(src), func() error { 84 | return nil 85 | }, nil 86 | default: 87 | return nil, nil, config.ErrSignatureFormatUnsupported 88 | } 89 | } 90 | 91 | func VerifyHeader( 92 | hdr *tar.Header, 93 | isRegular bool, 94 | signatureFormat string, 95 | recipient interface{}, 96 | ) error { 97 | if signatureFormat == config.NoneKey { 98 | return nil 99 | } 100 | 101 | if hdr.PAXRecords == nil { 102 | return config.ErrTarHeaderEmbeddedMissing 103 | } 104 | 105 | embeddedHeader, ok := hdr.PAXRecords[records.STFSRecordEmbeddedHeader] 106 | if !ok { 107 | return config.ErrTarHeaderEmbeddedMissing 108 | } 109 | 110 | signature, ok := hdr.PAXRecords[records.STFSRecordSignature] 111 | if !ok { 112 | return config.ErrSignatureMissing 113 | } 114 | 115 | if err := VerifyString(embeddedHeader, isRegular, signatureFormat, recipient, signature); err != nil { 116 | return err 117 | } 118 | 119 | var newHdr tar.Header 120 | if err := json.Unmarshal([]byte(embeddedHeader), &newHdr); err != nil { 121 | return err 122 | } 123 | 124 | *hdr = newHdr 125 | 126 | return nil 127 | } 128 | 129 | func VerifyString( 130 | src string, 131 | isRegular bool, 132 | signatureFormat string, 133 | recipient interface{}, 134 | signature string, 135 | ) error { 136 | switch signatureFormat { 137 | case config.SignatureFormatMinisignKey: 138 | if !isRegular { 139 | return config.ErrSignatureFormatRegularOnly 140 | } 141 | 142 | recipient, ok := recipient.(minisign.PublicKey) 143 | if !ok { 144 | return config.ErrRecipientUnparsable 145 | } 146 | 147 | decodedSignature, err := base64.StdEncoding.DecodeString(signature) 148 | if err != nil { 149 | return err 150 | } 151 | 152 | if minisign.Verify(recipient, []byte(src), decodedSignature) { 153 | return nil 154 | } 155 | 156 | return config.ErrSignatureInvalid 157 | case config.SignatureFormatPGPKey: 158 | recipients, ok := recipient.(openpgp.EntityList) 159 | if !ok { 160 | return nil 161 | } 162 | 163 | if len(recipients) < 1 { 164 | return nil 165 | } 166 | 167 | decodedSignature, err := base64.StdEncoding.DecodeString(signature) 168 | if err != nil { 169 | return nil 170 | } 171 | 172 | reader := packet.NewReader(bytes.NewBuffer(decodedSignature)) 173 | pkt, err := reader.Next() 174 | if err != nil { 175 | return nil 176 | } 177 | 178 | sig, ok := pkt.(*packet.Signature) 179 | if !ok { 180 | return nil 181 | } 182 | 183 | hash := sig.Hash.New() 184 | 185 | if _, err := io.Copy(hash, bytes.NewBufferString(src)); err != nil { 186 | return err 187 | } 188 | 189 | return recipients[0].PrimaryKey.VerifySignature(hash, sig) 190 | case config.NoneKey: 191 | return nil 192 | default: 193 | return config.ErrSignatureFormatUnsupported 194 | } 195 | } 196 | -------------------------------------------------------------------------------- /cmd/stfs/cmd/operation_update.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "io/fs" 7 | "os" 8 | "path/filepath" 9 | 10 | "github.com/pojntfx/stfs/internal/check" 11 | "github.com/pojntfx/stfs/internal/keyext" 12 | "github.com/pojntfx/stfs/internal/logging" 13 | "github.com/pojntfx/stfs/pkg/config" 14 | "github.com/pojntfx/stfs/pkg/keys" 15 | "github.com/pojntfx/stfs/pkg/mtio" 16 | "github.com/pojntfx/stfs/pkg/operations" 17 | "github.com/pojntfx/stfs/pkg/persisters" 18 | "github.com/pojntfx/stfs/pkg/tape" 19 | "github.com/spf13/cobra" 20 | "github.com/spf13/viper" 21 | ) 22 | 23 | var operationUpdateCmd = &cobra.Command{ 24 | Use: "update", 25 | Aliases: []string{"upd", "u", "put"}, 26 | Short: "Update a file or directory's content and metadata on tape or tar file", 27 | PreRunE: func(cmd *cobra.Command, args []string) error { 28 | if err := viper.BindPFlags(cmd.PersistentFlags()); err != nil { 29 | return err 30 | } 31 | 32 | if err := check.CheckCompressionLevel(viper.GetString(compressionLevelFlag)); err != nil { 33 | return err 34 | } 35 | 36 | if err := check.CheckKeyAccessible(viper.GetString(encryptionFlag), viper.GetString(recipientFlag)); err != nil { 37 | return err 38 | } 39 | 40 | return check.CheckKeyAccessible(viper.GetString(signatureFlag), viper.GetString(identityFlag)) 41 | }, 42 | RunE: func(cmd *cobra.Command, args []string) error { 43 | pubkey, err := keyext.ReadKey(viper.GetString(encryptionFlag), viper.GetString(recipientFlag)) 44 | if err != nil { 45 | return err 46 | } 47 | 48 | recipient, err := keys.ParseRecipient(viper.GetString(encryptionFlag), pubkey) 49 | if err != nil { 50 | return err 51 | } 52 | 53 | privkey, err := keyext.ReadKey(viper.GetString(signatureFlag), viper.GetString(identityFlag)) 54 | if err != nil { 55 | return err 56 | } 57 | 58 | identity, err := keys.ParseSignerIdentity(viper.GetString(signatureFlag), privkey, viper.GetString(passwordFlag)) 59 | if err != nil { 60 | return err 61 | } 62 | 63 | mt := mtio.MagneticTapeIO{} 64 | tm := tape.NewTapeManager( 65 | viper.GetString(driveFlag), 66 | mt, 67 | viper.GetInt(recordSizeFlag), 68 | false, 69 | ) 70 | 71 | metadataPersister := persisters.NewMetadataPersister(viper.GetString(metadataFlag)) 72 | if err := metadataPersister.Open(); err != nil { 73 | return err 74 | } 75 | 76 | ops := operations.NewOperations( 77 | config.BackendConfig{ 78 | GetWriter: tm.GetWriter, 79 | CloseWriter: tm.Close, 80 | 81 | GetReader: tm.GetReader, 82 | CloseReader: tm.Close, 83 | 84 | MagneticTapeIO: mt, 85 | }, 86 | config.MetadataConfig{ 87 | Metadata: metadataPersister, 88 | }, 89 | 90 | config.PipeConfig{ 91 | Compression: viper.GetString(compressionFlag), 92 | Encryption: viper.GetString(encryptionFlag), 93 | Signature: viper.GetString(signatureFlag), 94 | RecordSize: viper.GetInt(recordSizeFlag), 95 | }, 96 | config.CryptoConfig{ 97 | Recipient: recipient, 98 | Identity: identity, 99 | Password: viper.GetString(passwordFlag), 100 | }, 101 | 102 | logging.NewCSVLogger().PrintHeaderEvent, 103 | ) 104 | 105 | files := make(chan config.FileConfig) 106 | errs := make(chan error) 107 | go func() { 108 | if err := filepath.Walk(viper.GetString(fromFlag), func(path string, info fs.FileInfo, err error) error { 109 | path = filepath.ToSlash(path) 110 | 111 | if err != nil { 112 | return err 113 | } 114 | 115 | link := "" 116 | if info.Mode()&os.ModeSymlink == os.ModeSymlink { 117 | if link, err = os.Readlink(path); err != nil { 118 | return err 119 | } 120 | } 121 | 122 | files <- config.FileConfig{ 123 | GetFile: func() (io.ReadSeekCloser, error) { 124 | return os.Open(path) 125 | }, 126 | Info: info, 127 | Path: filepath.ToSlash(path), 128 | Link: filepath.ToSlash(link), 129 | } 130 | 131 | return nil 132 | }); err != nil { 133 | errs <- err 134 | 135 | return 136 | } 137 | 138 | errs <- io.EOF 139 | }() 140 | 141 | if _, err := ops.Update( 142 | func() (config.FileConfig, error) { 143 | select { 144 | case file := <-files: 145 | return file, err 146 | case err := <-errs: 147 | return config.FileConfig{}, err 148 | } 149 | }, 150 | viper.GetString(compressionLevelFlag), 151 | viper.GetBool(overwriteFlag), 152 | false, 153 | ); err != nil { 154 | return err 155 | } 156 | 157 | return nil 158 | }, 159 | } 160 | 161 | func init() { 162 | operationUpdateCmd.PersistentFlags().IntP(recordSizeFlag, "z", 20, "Amount of 512-bit blocks per record") 163 | operationUpdateCmd.PersistentFlags().StringP(fromFlag, "f", "", "Path of the file or directory to update") 164 | operationUpdateCmd.PersistentFlags().BoolP(overwriteFlag, "o", false, "Replace the content on the tape or tar file") 165 | operationUpdateCmd.PersistentFlags().StringP(compressionLevelFlag, "l", config.CompressionLevelBalancedKey, fmt.Sprintf("Compression level to use (default %v, available are %v)", config.CompressionLevelBalancedKey, config.KnownCompressionLevels)) 166 | operationUpdateCmd.PersistentFlags().StringP(recipientFlag, "r", "", "Path to public key of recipient to encrypt for") 167 | operationUpdateCmd.PersistentFlags().StringP(identityFlag, "i", "", "Path to private key to sign with") 168 | operationUpdateCmd.PersistentFlags().StringP(passwordFlag, "p", "", "Password for the private key") 169 | 170 | viper.AutomaticEnv() 171 | 172 | operationCmd.AddCommand(operationUpdateCmd) 173 | } 174 | -------------------------------------------------------------------------------- /cmd/stfs/cmd/serve_http.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | "os" 7 | "path/filepath" 8 | "time" 9 | 10 | "github.com/pojntfx/stfs/internal/check" 11 | "github.com/pojntfx/stfs/internal/handlers" 12 | "github.com/pojntfx/stfs/internal/keyext" 13 | "github.com/pojntfx/stfs/internal/logging" 14 | "github.com/pojntfx/stfs/pkg/cache" 15 | "github.com/pojntfx/stfs/pkg/config" 16 | "github.com/pojntfx/stfs/pkg/fs" 17 | "github.com/pojntfx/stfs/pkg/keys" 18 | "github.com/pojntfx/stfs/pkg/mtio" 19 | "github.com/pojntfx/stfs/pkg/operations" 20 | "github.com/pojntfx/stfs/pkg/persisters" 21 | "github.com/pojntfx/stfs/pkg/tape" 22 | "github.com/spf13/afero" 23 | "github.com/spf13/cobra" 24 | "github.com/spf13/viper" 25 | ) 26 | 27 | const ( 28 | laddrFlag = "laddr" 29 | cacheFileSystemFlag = "cache-filesystem-type" 30 | cacheDirFlag = "cache-dir" 31 | cacheDurationFlag = "cache-duration" 32 | ) 33 | 34 | var serveHTTPCmd = &cobra.Command{ 35 | Use: "http", 36 | Aliases: []string{"htt", "h"}, 37 | Short: "Serve tape or tar file and the index over HTTP (read-only)", 38 | PreRunE: func(cmd *cobra.Command, args []string) error { 39 | if err := viper.BindPFlags(cmd.PersistentFlags()); err != nil { 40 | return err 41 | } 42 | 43 | if err := check.CheckFileSystemCacheType(viper.GetString(cacheFileSystemFlag)); err != nil { 44 | return err 45 | } 46 | 47 | if err := check.CheckKeyAccessible(viper.GetString(encryptionFlag), viper.GetString(identityFlag)); err != nil { 48 | return err 49 | } 50 | 51 | return check.CheckKeyAccessible(viper.GetString(signatureFlag), viper.GetString(recipientFlag)) 52 | }, 53 | RunE: func(cmd *cobra.Command, args []string) error { 54 | pubkey, err := keyext.ReadKey(viper.GetString(signatureFlag), viper.GetString(recipientFlag)) 55 | if err != nil { 56 | return err 57 | } 58 | 59 | recipient, err := keys.ParseSignerRecipient(viper.GetString(signatureFlag), pubkey) 60 | if err != nil { 61 | return err 62 | } 63 | 64 | privkey, err := keyext.ReadKey(viper.GetString(encryptionFlag), viper.GetString(identityFlag)) 65 | if err != nil { 66 | return err 67 | } 68 | 69 | identity, err := keys.ParseIdentity(viper.GetString(encryptionFlag), privkey, viper.GetString(passwordFlag)) 70 | if err != nil { 71 | return err 72 | } 73 | 74 | mt := mtio.MagneticTapeIO{} 75 | tm := tape.NewTapeManager( 76 | viper.GetString(driveFlag), 77 | mt, 78 | viper.GetInt(recordSizeFlag), 79 | false, 80 | ) 81 | 82 | metadataPersister := persisters.NewMetadataPersister(viper.GetString(metadataFlag)) 83 | if err := metadataPersister.Open(); err != nil { 84 | return err 85 | } 86 | 87 | jsonLogger := logging.NewJSONLogger(viper.GetInt(verboseFlag)) 88 | 89 | readOps := operations.NewOperations( 90 | config.BackendConfig{ 91 | GetWriter: tm.GetWriter, 92 | CloseWriter: tm.Close, 93 | 94 | GetReader: tm.GetReader, 95 | CloseReader: tm.Close, 96 | 97 | MagneticTapeIO: mt, 98 | }, 99 | config.MetadataConfig{ 100 | Metadata: metadataPersister, 101 | }, 102 | 103 | config.PipeConfig{ 104 | Compression: viper.GetString(compressionFlag), 105 | Encryption: viper.GetString(encryptionFlag), 106 | Signature: viper.GetString(signatureFlag), 107 | RecordSize: viper.GetInt(recordSizeFlag), 108 | }, 109 | config.CryptoConfig{ 110 | Recipient: recipient, 111 | Identity: identity, 112 | Password: viper.GetString(passwordFlag), 113 | }, 114 | 115 | func(event *config.HeaderEvent) { 116 | jsonLogger.Debug("Header read", event) 117 | }, 118 | ) 119 | 120 | stfs := fs.NewSTFS( 121 | readOps, 122 | nil, 123 | 124 | config.MetadataConfig{ 125 | Metadata: metadataPersister, 126 | }, 127 | 128 | "", // We never write 129 | nil, // We never write 130 | true, // We never write 131 | false, // We never write 132 | 133 | func(hdr *config.Header) { 134 | jsonLogger.Trace("Header transform", hdr) 135 | }, 136 | jsonLogger, 137 | ) 138 | 139 | root, err := stfs.Initialize("/", os.ModePerm) 140 | if err != nil { 141 | return err 142 | } 143 | 144 | fs, err := cache.NewCacheFilesystem( 145 | stfs, 146 | root, 147 | viper.GetString(cacheFileSystemFlag), 148 | viper.GetDuration(cacheDurationFlag), 149 | filepath.Join(viper.GetString(cacheDirFlag), "filesystem"), 150 | ) 151 | if err != nil { 152 | return err 153 | } 154 | 155 | jsonLogger.Info("HTTP server listening", map[string]interface{}{ 156 | "laddr": viper.GetString(laddrFlag), 157 | }) 158 | 159 | return http.ListenAndServe( 160 | viper.GetString(laddrFlag), 161 | handlers.PanicHandler( 162 | http.FileServer( 163 | afero.NewHttpFs(fs), 164 | ), 165 | jsonLogger, 166 | ), 167 | ) 168 | }, 169 | } 170 | 171 | func init() { 172 | serveHTTPCmd.PersistentFlags().IntP(recordSizeFlag, "z", 20, "Amount of 512-bit blocks per record") 173 | serveHTTPCmd.PersistentFlags().StringP(identityFlag, "i", "", "Path to private key of recipient that has been encrypted for") 174 | serveHTTPCmd.PersistentFlags().StringP(passwordFlag, "p", "", "Password for the private key") 175 | serveHTTPCmd.PersistentFlags().StringP(recipientFlag, "r", "", "Path to the public key to verify with") 176 | serveHTTPCmd.PersistentFlags().StringP(laddrFlag, "a", ":1337", "Listen address") 177 | serveHTTPCmd.PersistentFlags().StringP(cacheFileSystemFlag, "n", config.NoneKey, fmt.Sprintf("File system cache to use (default %v, available are %v)", config.NoneKey, config.KnownFileSystemCacheTypes)) 178 | serveHTTPCmd.PersistentFlags().DurationP(cacheDurationFlag, "u", time.Hour, "Duration until cache is invalidated") 179 | serveHTTPCmd.PersistentFlags().StringP(cacheDirFlag, "w", cacheDir, "Directory to use if dir cache is enabled") 180 | 181 | viper.AutomaticEnv() 182 | 183 | serveCmd.AddCommand(serveHTTPCmd) 184 | } 185 | -------------------------------------------------------------------------------- /cmd/stfs/cmd/operation_archive.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "io/fs" 7 | "os" 8 | "path/filepath" 9 | 10 | "github.com/pojntfx/stfs/internal/check" 11 | "github.com/pojntfx/stfs/internal/keyext" 12 | "github.com/pojntfx/stfs/internal/logging" 13 | "github.com/pojntfx/stfs/pkg/config" 14 | "github.com/pojntfx/stfs/pkg/keys" 15 | "github.com/pojntfx/stfs/pkg/mtio" 16 | "github.com/pojntfx/stfs/pkg/operations" 17 | "github.com/pojntfx/stfs/pkg/persisters" 18 | "github.com/pojntfx/stfs/pkg/tape" 19 | "github.com/spf13/cobra" 20 | "github.com/spf13/viper" 21 | ) 22 | 23 | const ( 24 | recordSizeFlag = "record-size" 25 | fromFlag = "from" 26 | overwriteFlag = "overwrite" 27 | compressionLevelFlag = "compression-level" 28 | recipientFlag = "recipient" 29 | identityFlag = "identity" 30 | passwordFlag = "password" 31 | ) 32 | 33 | var operationArchiveCmd = &cobra.Command{ 34 | Use: "archive", 35 | Aliases: []string{"arc", "a", "c", "add", "post"}, 36 | Short: "Archive a file or directory to tape or tar file", 37 | PreRunE: func(cmd *cobra.Command, args []string) error { 38 | if err := viper.BindPFlags(cmd.PersistentFlags()); err != nil { 39 | return err 40 | } 41 | 42 | if err := check.CheckCompressionLevel(viper.GetString(compressionLevelFlag)); err != nil { 43 | return err 44 | } 45 | 46 | if err := check.CheckKeyAccessible(viper.GetString(encryptionFlag), viper.GetString(recipientFlag)); err != nil { 47 | return err 48 | } 49 | 50 | return check.CheckKeyAccessible(viper.GetString(signatureFlag), viper.GetString(identityFlag)) 51 | }, 52 | RunE: func(cmd *cobra.Command, args []string) error { 53 | pubkey, err := keyext.ReadKey(viper.GetString(encryptionFlag), viper.GetString(recipientFlag)) 54 | if err != nil { 55 | return err 56 | } 57 | 58 | recipient, err := keys.ParseRecipient(viper.GetString(encryptionFlag), pubkey) 59 | if err != nil { 60 | return err 61 | } 62 | 63 | privkey, err := keyext.ReadKey(viper.GetString(signatureFlag), viper.GetString(identityFlag)) 64 | if err != nil { 65 | return err 66 | } 67 | 68 | identity, err := keys.ParseSignerIdentity(viper.GetString(signatureFlag), privkey, viper.GetString(passwordFlag)) 69 | if err != nil { 70 | return err 71 | } 72 | 73 | mt := mtio.MagneticTapeIO{} 74 | tm := tape.NewTapeManager( 75 | viper.GetString(driveFlag), 76 | mt, 77 | viper.GetInt(recordSizeFlag), 78 | viper.GetBool(overwriteFlag), 79 | ) 80 | 81 | metadataPersister := persisters.NewMetadataPersister(viper.GetString(metadataFlag)) 82 | if err := metadataPersister.Open(); err != nil { 83 | return err 84 | } 85 | 86 | ops := operations.NewOperations( 87 | config.BackendConfig{ 88 | GetWriter: tm.GetWriter, 89 | CloseWriter: tm.Close, 90 | 91 | GetReader: tm.GetReader, 92 | CloseReader: tm.Close, 93 | 94 | MagneticTapeIO: mt, 95 | }, 96 | config.MetadataConfig{ 97 | Metadata: metadataPersister, 98 | }, 99 | 100 | config.PipeConfig{ 101 | Compression: viper.GetString(compressionFlag), 102 | Encryption: viper.GetString(encryptionFlag), 103 | Signature: viper.GetString(signatureFlag), 104 | RecordSize: viper.GetInt(recordSizeFlag), 105 | }, 106 | config.CryptoConfig{ 107 | Recipient: recipient, 108 | Identity: identity, 109 | Password: viper.GetString(passwordFlag), 110 | }, 111 | 112 | logging.NewCSVLogger().PrintHeaderEvent, 113 | ) 114 | 115 | files := make(chan config.FileConfig) 116 | errs := make(chan error) 117 | go func() { 118 | if err := filepath.Walk(viper.GetString(fromFlag), func(path string, info fs.FileInfo, err error) error { 119 | path = filepath.ToSlash(path) 120 | 121 | if err != nil { 122 | return err 123 | } 124 | 125 | link := "" 126 | if info.Mode()&os.ModeSymlink == os.ModeSymlink { 127 | if link, err = os.Readlink(path); err != nil { 128 | return err 129 | } 130 | } 131 | 132 | files <- config.FileConfig{ 133 | GetFile: func() (io.ReadSeekCloser, error) { 134 | return os.Open(path) 135 | }, 136 | Info: info, 137 | Path: filepath.ToSlash(path), 138 | Link: filepath.ToSlash(link), 139 | } 140 | 141 | return nil 142 | }); err != nil { 143 | errs <- err 144 | 145 | return 146 | } 147 | 148 | errs <- io.EOF 149 | }() 150 | 151 | if _, err := ops.Archive( 152 | func() (config.FileConfig, error) { 153 | select { 154 | case file := <-files: 155 | return file, err 156 | case err := <-errs: 157 | return config.FileConfig{}, err 158 | } 159 | }, 160 | viper.GetString(compressionLevelFlag), 161 | viper.GetBool(overwriteFlag), 162 | false, 163 | ); err != nil { 164 | return err 165 | } 166 | 167 | return nil 168 | }, 169 | } 170 | 171 | func init() { 172 | operationArchiveCmd.PersistentFlags().IntP(recordSizeFlag, "z", 20, "Amount of 512-bit blocks per record") 173 | operationArchiveCmd.PersistentFlags().StringP(fromFlag, "f", ".", "File or directory to archive") 174 | operationArchiveCmd.PersistentFlags().BoolP(overwriteFlag, "o", false, "Start writing from the start instead of from the end of the tape or tar file") 175 | operationArchiveCmd.PersistentFlags().StringP(compressionLevelFlag, "l", config.CompressionLevelBalancedKey, fmt.Sprintf("Compression level to use (default %v, available are %v)", config.CompressionLevelBalancedKey, config.KnownCompressionLevels)) 176 | operationArchiveCmd.PersistentFlags().StringP(recipientFlag, "r", "", "Path to public key of recipient to encrypt for") 177 | operationArchiveCmd.PersistentFlags().StringP(identityFlag, "i", "", "Path to private key to sign with") 178 | operationArchiveCmd.PersistentFlags().StringP(passwordFlag, "p", "", "Password for the private key") 179 | 180 | viper.AutomaticEnv() 181 | 182 | operationCmd.AddCommand(operationArchiveCmd) 183 | } 184 | -------------------------------------------------------------------------------- /pkg/compression/compress.go: -------------------------------------------------------------------------------- 1 | package compression 2 | 3 | import ( 4 | "compress/gzip" 5 | "io" 6 | "math" 7 | 8 | "github.com/andybalholm/brotli" 9 | "github.com/dsnet/compress/bzip2" 10 | "github.com/klauspost/compress/zstd" 11 | "github.com/klauspost/pgzip" 12 | "github.com/pierrec/lz4/v4" 13 | "github.com/pojntfx/stfs/internal/ioext" 14 | "github.com/pojntfx/stfs/pkg/config" 15 | ) 16 | 17 | func Compress( 18 | dst io.Writer, 19 | compressionFormat string, 20 | compressionLevel string, 21 | isRegular bool, 22 | recordSize int, 23 | ) (ioext.FlusherWriter, error) { 24 | switch compressionFormat { 25 | case config.CompressionFormatGZipKey: 26 | fallthrough 27 | case config.CompressionFormatParallelGZipKey: 28 | if compressionFormat == config.CompressionFormatGZipKey { 29 | if !isRegular { 30 | maxSize := getNearestPowerOf2Lower(config.MagneticTapeBlockSize * recordSize) 31 | 32 | if maxSize < 65535 { // See https://www.daylight.com/meetings/mug00/Sayle/gzip.html#:~:text=Stored%20blocks%20are%20allowed%20to,size%20of%20the%20gzip%20header. 33 | return nil, config.ErrCompressionFormatRequiresLargerRecordSize 34 | } 35 | } 36 | 37 | l := gzip.DefaultCompression 38 | switch compressionLevel { 39 | case config.CompressionLevelFastestKey: 40 | l = gzip.BestSpeed 41 | case config.CompressionLevelBalancedKey: 42 | l = gzip.DefaultCompression 43 | case config.CompressionLevelSmallestKey: 44 | l = gzip.BestCompression 45 | default: 46 | return nil, config.ErrCompressionLevelUnsupported 47 | } 48 | 49 | return gzip.NewWriterLevel(dst, l) 50 | } 51 | 52 | if !isRegular { 53 | return nil, config.ErrCompressionFormatRegularOnly // "device or resource busy" 54 | } 55 | 56 | l := pgzip.DefaultCompression 57 | switch compressionLevel { 58 | case config.CompressionLevelFastestKey: 59 | l = pgzip.BestSpeed 60 | case config.CompressionLevelBalancedKey: 61 | l = pgzip.DefaultCompression 62 | case config.CompressionLevelSmallestKey: 63 | l = pgzip.BestCompression 64 | default: 65 | return nil, config.ErrCompressionLevelUnsupported 66 | } 67 | 68 | return pgzip.NewWriterLevel(dst, l) 69 | case config.CompressionFormatLZ4Key: 70 | l := lz4.Level5 71 | switch compressionLevel { 72 | case config.CompressionLevelFastestKey: 73 | l = lz4.Level1 74 | case config.CompressionLevelBalancedKey: 75 | l = lz4.Level5 76 | case config.CompressionLevelSmallestKey: 77 | l = lz4.Level9 78 | default: 79 | return nil, config.ErrCompressionLevelUnsupported 80 | } 81 | 82 | opts := []lz4.Option{lz4.CompressionLevelOption(l), lz4.ConcurrencyOption(-1)} 83 | if !isRegular { 84 | maxSize := getNearestPowerOf2Lower(config.MagneticTapeBlockSize * recordSize) 85 | 86 | if uint32(maxSize) < uint32(lz4.Block64Kb) { 87 | return nil, config.ErrCompressionFormatRequiresLargerRecordSize 88 | } 89 | 90 | if uint32(maxSize) < uint32(lz4.Block256Kb) { 91 | opts = append(opts, lz4.BlockSizeOption(lz4.Block64Kb)) 92 | } else if uint32(maxSize) < uint32(lz4.Block1Mb) { 93 | opts = append(opts, lz4.BlockSizeOption(lz4.Block256Kb)) 94 | } else if uint32(maxSize) < uint32(lz4.Block4Mb) { 95 | opts = append(opts, lz4.BlockSizeOption(lz4.Block1Mb)) 96 | } else { 97 | opts = append(opts, lz4.BlockSizeOption(lz4.Block4Mb)) 98 | } 99 | } 100 | 101 | lz := lz4.NewWriter(dst) 102 | if err := lz.Apply(opts...); err != nil { 103 | return nil, err 104 | } 105 | 106 | return ioext.AddFlushNop(lz), nil 107 | case config.CompressionFormatZStandardKey: 108 | l := zstd.SpeedDefault 109 | switch compressionLevel { 110 | case config.CompressionLevelFastestKey: 111 | l = zstd.SpeedFastest 112 | case config.CompressionLevelBalancedKey: 113 | l = zstd.SpeedDefault 114 | case config.CompressionLevelSmallestKey: 115 | l = zstd.SpeedBestCompression 116 | default: 117 | return nil, config.ErrCompressionLevelUnsupported 118 | } 119 | 120 | opts := []zstd.EOption{zstd.WithEncoderLevel(l)} 121 | if !isRegular { 122 | opts = append(opts, zstd.WithWindowSize(getNearestPowerOf2Lower(config.MagneticTapeBlockSize*recordSize))) 123 | } 124 | 125 | zz, err := zstd.NewWriter(dst, opts...) 126 | if err != nil { 127 | return nil, err 128 | } 129 | 130 | return zz, nil 131 | case config.CompressionFormatBrotliKey: 132 | if !isRegular { 133 | return nil, config.ErrCompressionFormatRegularOnly // "cannot allocate memory" 134 | } 135 | 136 | l := brotli.DefaultCompression 137 | switch compressionLevel { 138 | case config.CompressionLevelFastestKey: 139 | l = brotli.BestSpeed 140 | case config.CompressionLevelBalancedKey: 141 | l = brotli.DefaultCompression 142 | case config.CompressionLevelSmallestKey: 143 | l = brotli.BestCompression 144 | default: 145 | return nil, config.ErrCompressionLevelUnsupported 146 | } 147 | 148 | br := brotli.NewWriterLevel(dst, l) 149 | 150 | return br, nil 151 | case config.CompressionFormatBzip2Key: 152 | fallthrough 153 | case config.CompressionFormatBzip2ParallelKey: 154 | l := bzip2.DefaultCompression 155 | switch compressionLevel { 156 | case config.CompressionLevelFastestKey: 157 | l = bzip2.BestSpeed 158 | case config.CompressionLevelBalancedKey: 159 | l = bzip2.DefaultCompression 160 | case config.CompressionLevelSmallestKey: 161 | l = bzip2.BestCompression 162 | default: 163 | return nil, config.ErrCompressionLevelUnsupported 164 | } 165 | 166 | bz, err := bzip2.NewWriter(dst, &bzip2.WriterConfig{ 167 | Level: l, 168 | }) 169 | if err != nil { 170 | return nil, err 171 | } 172 | 173 | return ioext.AddFlushNop(bz), nil 174 | case config.NoneKey: 175 | return ioext.AddFlushNop(ioext.AddCloseNopToWriter(dst)), nil 176 | default: 177 | return nil, config.ErrCompressionFormatUnsupported 178 | } 179 | } 180 | 181 | func getNearestPowerOf2Lower(n int) int { 182 | return int(math.Pow(2, float64(getNearestLogOf2Lower(n)))) // Truncation is intentional, see https://www.geeksforgeeks.org/highest-power-2-less-equal-given-number/ 183 | } 184 | 185 | func getNearestLogOf2Lower(n int) int { 186 | return int(math.Log2(float64(n))) // Truncation is intentional, see https://www.geeksforgeeks.org/highest-power-2-less-equal-given-number/ 187 | } 188 | -------------------------------------------------------------------------------- /examples/full/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "log" 5 | "os" 6 | "path/filepath" 7 | "time" 8 | 9 | "github.com/pojntfx/stfs/examples" 10 | "github.com/pojntfx/stfs/pkg/cache" 11 | "github.com/pojntfx/stfs/pkg/config" 12 | "github.com/pojntfx/stfs/pkg/fs" 13 | "github.com/pojntfx/stfs/pkg/keys" 14 | "github.com/pojntfx/stfs/pkg/mtio" 15 | "github.com/pojntfx/stfs/pkg/operations" 16 | "github.com/pojntfx/stfs/pkg/persisters" 17 | "github.com/pojntfx/stfs/pkg/tape" 18 | "github.com/pojntfx/stfs/pkg/utility" 19 | "github.com/spf13/afero" 20 | ) 21 | 22 | func createFs( 23 | drive string, 24 | metadata string, 25 | 26 | recordSize int, 27 | readOnly bool, 28 | verbose bool, 29 | 30 | signature string, 31 | signaturePassword string, 32 | 33 | encryption string, 34 | encryptionPassword string, 35 | 36 | compression string, 37 | compressionLevel string, 38 | 39 | writeCache string, 40 | writeCacheDir string, 41 | 42 | fileSystemCache string, 43 | fileSystemCacheDir string, 44 | fileSystemCacheDuration time.Duration, 45 | ) (afero.Fs, error) { 46 | signaturePrivkey, signaturePubkey, err := utility.Keygen( 47 | config.PipeConfig{ 48 | Signature: signature, 49 | Encryption: config.NoneKey, 50 | }, 51 | config.PasswordConfig{ 52 | Password: signaturePassword, 53 | }, 54 | ) 55 | if err != nil { 56 | return nil, err 57 | } 58 | 59 | signatureRecipient, err := keys.ParseSignerRecipient(signature, signaturePubkey) 60 | if err != nil { 61 | return nil, err 62 | } 63 | 64 | signatureIdentity, err := keys.ParseSignerIdentity(signature, signaturePrivkey, signaturePassword) 65 | if err != nil { 66 | return nil, err 67 | } 68 | 69 | encryptionPrivkey, encryptionPubkey, err := utility.Keygen( 70 | config.PipeConfig{ 71 | Signature: config.NoneKey, 72 | Encryption: encryption, 73 | }, 74 | config.PasswordConfig{ 75 | Password: encryptionPassword, 76 | }, 77 | ) 78 | if err != nil { 79 | return nil, err 80 | } 81 | 82 | encryptionRecipient, err := keys.ParseRecipient(encryption, encryptionPubkey) 83 | if err != nil { 84 | return nil, err 85 | } 86 | 87 | encryptionIdentity, err := keys.ParseIdentity(encryption, encryptionPrivkey, encryptionPassword) 88 | if err != nil { 89 | return nil, err 90 | } 91 | 92 | mt := mtio.MagneticTapeIO{} 93 | tm := tape.NewTapeManager( 94 | drive, 95 | mt, 96 | recordSize, 97 | false, 98 | ) 99 | 100 | metadataPersister := persisters.NewMetadataPersister(metadata) 101 | if err := metadataPersister.Open(); err != nil { 102 | return nil, err 103 | } 104 | 105 | jsonLogger := &examples.Logger{ 106 | Verbose: verbose, 107 | } 108 | 109 | metadataConfig := config.MetadataConfig{ 110 | Metadata: metadataPersister, 111 | } 112 | pipeConfig := config.PipeConfig{ 113 | Compression: compression, 114 | Encryption: encryption, 115 | Signature: signature, 116 | RecordSize: recordSize, 117 | } 118 | backendConfig := config.BackendConfig{ 119 | GetWriter: tm.GetWriter, 120 | CloseWriter: tm.Close, 121 | 122 | GetReader: tm.GetReader, 123 | CloseReader: tm.Close, 124 | 125 | MagneticTapeIO: mt, 126 | } 127 | readCryptoConfig := config.CryptoConfig{ 128 | Recipient: signatureRecipient, 129 | Identity: encryptionIdentity, 130 | Password: encryptionPassword, 131 | } 132 | 133 | readOps := operations.NewOperations( 134 | backendConfig, 135 | metadataConfig, 136 | 137 | pipeConfig, 138 | readCryptoConfig, 139 | 140 | func(event *config.HeaderEvent) { 141 | jsonLogger.Debug("Header read", event) 142 | }, 143 | ) 144 | 145 | writeOps := operations.NewOperations( 146 | backendConfig, 147 | metadataConfig, 148 | 149 | pipeConfig, 150 | config.CryptoConfig{ 151 | Recipient: encryptionRecipient, 152 | Identity: signatureIdentity, 153 | Password: signaturePassword, 154 | }, 155 | 156 | func(event *config.HeaderEvent) { 157 | jsonLogger.Debug("Header write", event) 158 | }, 159 | ) 160 | 161 | stfs := fs.NewSTFS( 162 | readOps, 163 | writeOps, 164 | 165 | config.MetadataConfig{ 166 | Metadata: metadataPersister, 167 | }, 168 | 169 | compressionLevel, 170 | func() (cache.WriteCache, func() error, error) { 171 | return cache.NewCacheWrite( 172 | writeCacheDir, 173 | writeCache, 174 | ) 175 | }, 176 | readOnly, 177 | false, 178 | 179 | func(hdr *config.Header) { 180 | jsonLogger.Trace("Header transform", hdr) 181 | }, 182 | jsonLogger, 183 | ) 184 | 185 | root, err := stfs.Initialize("/", os.ModePerm) 186 | if err != nil { 187 | return nil, err 188 | } 189 | 190 | return cache.NewCacheFilesystem( 191 | stfs, 192 | root, 193 | fileSystemCache, 194 | fileSystemCacheDuration, 195 | fileSystemCacheDir, 196 | ) 197 | } 198 | 199 | func main() { 200 | tmp, err := os.MkdirTemp(os.TempDir(), "stfs-test-*") 201 | if err != nil { 202 | panic(err) 203 | } 204 | 205 | drive := filepath.Join(tmp, "drive.tar") 206 | metadata := filepath.Join(tmp, "metadata.sqlite") 207 | 208 | recordSize := 20 209 | readOnly := false 210 | verbose := true 211 | 212 | signature := config.SignatureFormatPGPKey 213 | signaturePassword := "testSignaturePassword" 214 | 215 | encryption := config.EncryptionFormatAgeKey 216 | encryptionPassword := "testEncryptionPassword" 217 | 218 | compression := config.CompressionFormatZStandardKey 219 | compressionLevel := config.CompressionLevelFastestKey 220 | 221 | writeCache := config.WriteCacheTypeFile 222 | writeCacheDir := filepath.Join(tmp, "write-cache") 223 | 224 | fileSystemCache := config.FileSystemCacheTypeDir 225 | fileSystemCacheDir := filepath.Join(tmp, "filesystem-cache") 226 | fileSystemCacheDuration := time.Hour 227 | 228 | fs, err := createFs( 229 | drive, 230 | metadata, 231 | 232 | recordSize, 233 | readOnly, 234 | verbose, 235 | 236 | signature, 237 | signaturePassword, 238 | 239 | encryption, 240 | encryptionPassword, 241 | 242 | compression, 243 | compressionLevel, 244 | 245 | writeCache, 246 | writeCacheDir, 247 | 248 | fileSystemCache, 249 | fileSystemCacheDir, 250 | fileSystemCacheDuration, 251 | ) 252 | if err != nil { 253 | panic(err) 254 | } 255 | 256 | log.Println("stat /") 257 | 258 | stat, err := fs.Stat("/") 259 | if err != nil { 260 | panic(err) 261 | } 262 | 263 | log.Println("Result of stat /:", stat) 264 | } 265 | -------------------------------------------------------------------------------- /pkg/recovery/query.go: -------------------------------------------------------------------------------- 1 | package recovery 2 | 3 | import ( 4 | "archive/tar" 5 | "bufio" 6 | "io" 7 | "io/ioutil" 8 | "math" 9 | 10 | "github.com/pojntfx/stfs/internal/converters" 11 | "github.com/pojntfx/stfs/internal/ioext" 12 | "github.com/pojntfx/stfs/pkg/config" 13 | "github.com/pojntfx/stfs/pkg/encryption" 14 | "github.com/pojntfx/stfs/pkg/signature" 15 | ) 16 | 17 | func Query( 18 | reader config.DriveReaderConfig, 19 | mt config.MagneticTapeIO, 20 | pipes config.PipeConfig, 21 | crypto config.CryptoConfig, 22 | 23 | record int, 24 | block int, 25 | 26 | onHeader func(hdr *config.Header), 27 | ) ([]*tar.Header, error) { 28 | headers := []*tar.Header{} 29 | 30 | if reader.DriveIsRegular { 31 | // Seek to record and block 32 | if _, err := reader.Drive.Seek(int64((pipes.RecordSize*config.MagneticTapeBlockSize*record)+block*config.MagneticTapeBlockSize), 0); err != nil { 33 | return []*tar.Header{}, err 34 | } 35 | 36 | tr := tar.NewReader(reader.Drive) 37 | 38 | record := int64(record) 39 | block := int64(block) 40 | 41 | for { 42 | hdr, err := tr.Next() 43 | if err != nil { 44 | for { 45 | curr, err := reader.Drive.Seek(0, io.SeekCurrent) 46 | if err != nil { 47 | return []*tar.Header{}, err 48 | } 49 | 50 | nextTotalBlocks := math.Ceil(float64((curr)) / float64(config.MagneticTapeBlockSize)) 51 | record = int64(nextTotalBlocks) / int64(pipes.RecordSize) 52 | block = int64(nextTotalBlocks) - (record * int64(pipes.RecordSize)) 53 | 54 | if block < 0 { 55 | record-- 56 | block = int64(pipes.RecordSize) - 1 57 | } else if block >= int64(pipes.RecordSize) { 58 | record++ 59 | block = 0 60 | } 61 | 62 | // Seek to record and block 63 | if _, err := reader.Drive.Seek(int64((pipes.RecordSize*config.MagneticTapeBlockSize*int(record))+int(block)*config.MagneticTapeBlockSize), io.SeekStart); err != nil { 64 | return []*tar.Header{}, err 65 | } 66 | 67 | tr = tar.NewReader(reader.Drive) 68 | 69 | hdr, err = tr.Next() 70 | if err != nil { 71 | if err == io.EOF { 72 | // EOF 73 | 74 | break 75 | } 76 | 77 | continue 78 | } 79 | 80 | break 81 | } 82 | } 83 | 84 | if hdr == nil { 85 | // EOF 86 | 87 | break 88 | } 89 | 90 | if err := encryption.DecryptHeader(hdr, pipes.Encryption, crypto.Identity); err != nil { 91 | return []*tar.Header{}, err 92 | } 93 | 94 | if err := signature.VerifyHeader(hdr, reader.DriveIsRegular, pipes.Signature, crypto.Recipient); err != nil { 95 | return []*tar.Header{}, err 96 | } 97 | 98 | if onHeader != nil { 99 | dbhdr, err := converters.TarHeaderToDBHeader(record, -1, block, -1, hdr) 100 | if err != nil { 101 | return []*tar.Header{}, err 102 | } 103 | 104 | onHeader(converters.DBHeaderToConfigHeader(dbhdr)) 105 | } 106 | 107 | headers = append(headers, hdr) 108 | 109 | curr, err := reader.Drive.Seek(0, io.SeekCurrent) 110 | if err != nil { 111 | return []*tar.Header{}, err 112 | } 113 | 114 | if _, err := io.Copy(ioutil.Discard, tr); err != nil { 115 | return []*tar.Header{}, err 116 | } 117 | 118 | currAndSize, err := reader.Drive.Seek(0, io.SeekCurrent) 119 | if err != nil { 120 | return []*tar.Header{}, err 121 | } 122 | 123 | nextTotalBlocks := math.Ceil(float64(curr+(currAndSize-curr)) / float64(config.MagneticTapeBlockSize)) 124 | record = int64(nextTotalBlocks) / int64(pipes.RecordSize) 125 | block = int64(nextTotalBlocks) - (record * int64(pipes.RecordSize)) 126 | 127 | if block > int64(pipes.RecordSize) { 128 | record++ 129 | block = 0 130 | } 131 | } 132 | } else { 133 | // Seek to record 134 | if err := mt.SeekToRecordOnTape(reader.Drive.Fd(), int32(record)); err != nil { 135 | return []*tar.Header{}, err 136 | } 137 | 138 | // Seek to block 139 | br := bufio.NewReaderSize(reader.Drive, config.MagneticTapeBlockSize*pipes.RecordSize) 140 | if _, err := br.Read(make([]byte, block*config.MagneticTapeBlockSize)); err != nil { 141 | return []*tar.Header{}, err 142 | } 143 | 144 | record := int64(record) 145 | block := int64(block) 146 | 147 | curr := int64((pipes.RecordSize * config.MagneticTapeBlockSize * int(record)) + (int(block) * config.MagneticTapeBlockSize)) 148 | counter := &ioext.CounterReader{Reader: br, BytesRead: int(curr)} 149 | 150 | tr := tar.NewReader(counter) 151 | for { 152 | hdr, err := tr.Next() 153 | if err != nil { 154 | if err == io.EOF { 155 | if err := mt.GoToNextFileOnTape(reader.Drive.Fd()); err != nil { 156 | // EOD 157 | 158 | break 159 | } 160 | 161 | record, err = mt.GetCurrentRecordFromTape(reader.Drive.Fd()) 162 | if err != nil { 163 | return []*tar.Header{}, err 164 | } 165 | block = 0 166 | 167 | br = bufio.NewReaderSize(reader.Drive, config.MagneticTapeBlockSize*pipes.RecordSize) 168 | curr := int64(int64(pipes.RecordSize) * config.MagneticTapeBlockSize * record) 169 | counter := &ioext.CounterReader{Reader: br, BytesRead: int(curr)} 170 | tr = tar.NewReader(counter) 171 | 172 | continue 173 | } else { 174 | return []*tar.Header{}, err 175 | } 176 | } 177 | 178 | if err := encryption.DecryptHeader(hdr, pipes.Encryption, crypto.Identity); err != nil { 179 | return []*tar.Header{}, err 180 | } 181 | 182 | if err := signature.VerifyHeader(hdr, reader.DriveIsRegular, pipes.Signature, crypto.Recipient); err != nil { 183 | return []*tar.Header{}, err 184 | } 185 | 186 | if onHeader != nil { 187 | dbhdr, err := converters.TarHeaderToDBHeader(record, -1, block, -1, hdr) 188 | if err != nil { 189 | return []*tar.Header{}, err 190 | } 191 | 192 | onHeader(converters.DBHeaderToConfigHeader(dbhdr)) 193 | } 194 | 195 | headers = append(headers, hdr) 196 | 197 | if _, err := io.Copy(ioutil.Discard, tr); err != nil { 198 | return []*tar.Header{}, err 199 | } 200 | 201 | currAndSize := int64(counter.BytesRead) 202 | 203 | nextTotalBlocks := math.Ceil(float64(curr+(currAndSize-curr)) / float64(config.MagneticTapeBlockSize)) 204 | record = int64(nextTotalBlocks) / int64(pipes.RecordSize) 205 | block = int64(nextTotalBlocks) - (record * int64(pipes.RecordSize)) 206 | 207 | if block > int64(pipes.RecordSize) { 208 | record++ 209 | block = 0 210 | } 211 | } 212 | } 213 | 214 | return headers, nil 215 | } 216 | -------------------------------------------------------------------------------- /internal/db/sqlite/migrations/metadata/migrations.go: -------------------------------------------------------------------------------- 1 | package metadata 2 | 3 | import ( 4 | "bytes" 5 | "compress/gzip" 6 | "fmt" 7 | "io" 8 | "strings" 9 | ) 10 | 11 | func bindata_read(data []byte, name string) ([]byte, error) { 12 | gz, err := gzip.NewReader(bytes.NewBuffer(data)) 13 | if err != nil { 14 | return nil, fmt.Errorf("Read %q: %v", name, err) 15 | } 16 | 17 | var buf bytes.Buffer 18 | _, err = io.Copy(&buf, gz) 19 | gz.Close() 20 | 21 | if err != nil { 22 | return nil, fmt.Errorf("Read %q: %v", name, err) 23 | } 24 | 25 | return buf.Bytes(), nil 26 | } 27 | 28 | var _db_sqlite_migrations_metadata_1637447083_sql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xa4\x56\x4d\x73\xdb\x36\x13\x3e\xbf\xfe\x15\x3b\xb9\x44\x9a\x57\xd4\xb9\xd3\x4c\x0f\x6e\xdc\xb8\x9e\x89\x9d\x8c\x62\x35\x39\x1a\x24\x96\x24\x2a\x10\x8b\x2e\x40\xc9\xcc\xaf\xef\x2c\x40\xaa\xb2\x65\xc9\xe9\xf4\x24\x0a\xd8\x7d\x76\xf7\xd9\x2f\x14\x05\xfc\xbf\x33\x0d\xab\x88\xb0\xf6\x17\x15\xa3\x7c\x45\x55\x5a\x84\x16\x95\x46\x0e\x30\xbb\x00\x00\x28\x0a\x58\x61\x45\xac\x81\x6a\x88\xad\x09\xe3\x3d\x90\x83\xd8\x8a\x8e\xc7\x24\xc8\x59\xca\xb8\x88\x0d\x32\x38\x8a\xe0\x7a\x6b\x17\x2f\xa1\x20\x58\x15\x22\xf4\x5e\x8b\xd9\x09\xf0\x3c\xbe\x68\x6c\x1c\xed\xdc\x6b\x86\x7e\xb5\x54\x6d\x9e\xa3\x99\x8c\x96\x75\x93\x64\x99\xc4\x7e\x04\xe5\x47\xbc\x3d\xc6\xdf\xfb\xfb\x8a\xa1\x9b\x1a\x02\xc6\x45\x52\x1f\xc1\x5a\x15\xa0\x44\x74\xa0\xd1\x62\x44\x9d\xb8\x50\x1e\x17\x50\xf6\x11\x1e\x9e\x31\xf1\x00\xca\xe9\x83\xd3\x64\xef\x01\x14\x23\x84\x68\xac\x15\x57\x19\x2d\x6e\x95\xab\x32\x95\x13\xec\x49\x9f\xee\x07\x8f\xb5\x55\x0d\x98\x90\x93\x30\x78\x14\x98\xd1\x3f\x74\x91\x87\xe5\x5e\xb8\x45\xf8\x8e\x4c\xb0\x55\xb6\x47\x51\x51\x7d\xa4\x4e\x45\x53\x29\x6b\x07\xf0\x4c\x1d\x89\xb9\x48\x80\x26\xb6\xc8\x09\x7f\x85\x0d\x50\xfe\xbc\x32\x3c\x81\x69\xf4\xe8\xb4\x71\xcd\x94\x7f\xcf\x18\xd0\x55\xc9\xbc\x82\xc8\xca\x58\xb9\x0d\x56\x85\x56\x58\xbf\x53\x1d\x66\x57\xe2\xde\xe9\x53\x61\x89\xac\xe0\xd4\xc6\x62\x0e\x22\xdd\x38\x39\x8e\xf8\x18\x5f\x20\x42\x71\x83\x31\x4b\x50\x0d\xd6\xb8\x0d\xcc\xb6\xca\x1a\x0d\xf5\xe8\xfc\x47\x39\x1b\xbf\xbf\x0c\x9d\x88\xcc\x73\x01\x18\xb7\x39\x03\xfd\x91\x1a\x21\x28\x3b\x13\xcc\x77\x94\x68\xca\x21\x62\x48\x12\xe3\xc9\x89\x48\x3e\x23\x77\x26\x04\x43\x2e\x25\xbf\x23\x8d\x50\x9a\x98\x55\xd3\xbf\x93\xaa\xeb\x80\x0c\x37\x57\x12\x0e\xed\x1c\x66\xe6\x7b\x73\xa6\x1a\xae\x99\x7a\x7f\xa4\xd2\x9c\x53\x49\x46\x26\xd6\x0e\xcc\x9c\xe1\x23\x5b\x39\xd6\x69\xce\xe8\xdc\xe4\xf6\xfc\x40\xdc\xa9\x28\x95\xd7\xbb\xe0\xb1\x32\xb5\x41\x9d\x3a\xca\xc1\x57\x36\x11\x79\x99\x7e\x7e\xcf\xe5\xcb\xd4\x3b\x1d\xe0\x96\xf4\xbd\xe9\x70\xc2\x8a\x94\xb0\x1c\x2a\xc6\x10\x21\x60\x45\x4e\x27\x76\x4d\xe3\x88\x31\x77\xc2\x65\x55\x61\x08\xa2\x97\xae\xde\xb7\xca\x35\x98\xfe\xd6\x06\xad\x0e\x53\x57\xec\x0b\x88\xa0\x0f\x4f\xd4\x88\x0f\xb4\x16\x90\xfd\x1d\x0e\xe3\x50\x01\x3e\x5f\x7e\x13\xc1\xeb\xbb\xf5\xf2\x19\x52\xe8\xcb\x62\xf4\x8d\x31\x90\xed\xa3\x21\x77\x06\x66\xaf\x7f\x4b\xda\xd4\xa6\x52\x22\x0f\x71\x0a\xbc\x23\x2d\xdf\x90\x46\xdb\x11\xbf\xd9\xed\x24\x0d\x33\xc6\xbf\x7a\x23\x3c\x8c\x4d\xfc\x8f\x8f\x10\x7a\xef\x89\x63\xae\x7b\x95\x94\xce\xa0\xe6\xf0\xff\x25\x6a\x95\x94\xce\xa0\xde\xaa\x3f\x89\x41\xe3\xd6\x54\x08\xae\xef\x4a\xe4\xe7\xad\xfa\xbe\x55\x3c\xb5\x6a\x9a\xef\xf3\x71\x1c\x6e\xbb\xa4\x7c\xb2\x9c\x6f\x8d\xfb\x0f\xd8\x49\xf9\x74\x2b\x5f\x7e\xcb\xab\x31\xa4\xd1\x09\x9d\xf2\xd2\x00\xc2\x03\x3e\x46\x74\x1a\xf5\x34\x77\xf3\xbc\x3f\xaa\x31\x69\xb6\x42\x63\x6d\x1c\xea\x49\x06\x42\x4b\xbd\xd5\xd0\xaa\x2d\xc2\x06\x87\x30\xad\xb2\x9a\xac\xa5\x9d\x8c\xd0\x9a\xb8\xfb\x79\xc4\xf8\xdf\x1f\xbf\xdd\x5d\x7d\x5a\x2d\x37\x38\xec\xa6\x05\x56\x14\xf0\xb5\x45\x46\xc8\x77\xe2\x5d\xa0\x0e\x53\x8b\x06\xaf\xaa\x34\xb0\x94\xb5\xd0\x7b\x8f\x5c\xa9\x80\x8b\xd4\x13\x23\x06\x74\x6a\x98\x70\x24\xe8\x8a\x5c\x54\xe3\x9a\x7c\xfb\xcb\x5b\x49\x28\xab\x2a\x0a\x93\xb8\x6c\x96\x0b\x78\x73\xfd\xe9\xe3\xe5\xdd\xf5\xd2\x6f\x9a\xe5\x16\x59\x86\xdb\x9b\xf9\x93\x2d\xb3\xc1\x21\x59\xc8\x8b\x66\x8c\xb0\x94\x5a\x70\x05\x76\x3e\x0e\xb0\xbe\xff\x50\xfc\x04\x21\xb2\x71\xcd\x11\x4f\x5f\x4f\xcc\x03\x13\x40\x36\x95\x8c\x0c\x61\x7d\x62\x50\x23\x9b\x2d\x6a\xa8\x99\x3a\xf1\x7a\x82\xa1\x54\xa7\xb9\xdd\x85\x82\x11\x25\xaa\x4d\xda\x56\x15\xea\xbc\xaf\xb6\xb9\x9a\x57\x87\x49\xf3\xea\x71\x82\x7f\x79\xa2\x8d\xbd\x3b\x4d\xb1\x30\xe6\x2c\x1d\x8e\x19\x8c\x8a\xc7\x82\x38\x1a\x36\xf2\x20\x91\x34\x61\x84\x72\x80\x55\x16\xba\x13\x43\x4a\x4a\xab\xc4\x10\x0b\xac\x6b\xe2\x08\x4d\x2f\x9d\xad\xe2\x81\x81\x3d\xd7\x5f\x8c\x04\x20\x17\x19\x02\xac\x29\x91\xd3\x32\x67\x54\x7a\xaa\x03\x72\x45\x45\x9d\xb7\x46\xb9\x98\xd6\x58\xd8\x87\x61\xd2\x34\xf6\x14\x82\x91\x27\xa5\x34\x49\x7a\x2d\x45\x92\x7c\xe5\x20\xd7\x2e\x3d\x57\x9e\x07\x71\x53\x1f\xc6\xfc\x74\xa6\xc3\xee\xd5\x14\xee\xc7\xb9\x48\x9a\x28\x33\x73\x24\xd1\x70\x88\x13\xec\x6c\x2c\x44\xe2\xf1\x2d\xb7\xfe\x72\x7f\xb9\x4a\xf9\x5f\xc8\xec\x99\x4f\x30\x95\xf2\xe9\x51\x4c\x35\xa0\xab\x28\xbd\x4c\x52\x24\xa3\xe1\x59\xc0\x29\x9e\xb1\x56\x27\xc7\x8f\xfa\x3d\x17\x00\x9b\x4e\xf1\x90\x4a\x79\x26\x9d\xb4\xd8\xbf\x13\xe6\x17\xf3\x77\x17\x87\x0f\xf3\x2b\xda\xb9\x0b\xcd\xe4\x9f\x3e\xcc\xdf\xfd\x1d\x00\x00\xff\xff\x7e\x7f\x3e\xa6\xbd\x0b\x00\x00") 29 | 30 | func db_sqlite_migrations_metadata_1637447083_sql() ([]byte, error) { 31 | return bindata_read( 32 | _db_sqlite_migrations_metadata_1637447083_sql, 33 | "../../db/sqlite/migrations/metadata/1637447083.sql", 34 | ) 35 | } 36 | 37 | // Asset loads and returns the asset for the given name. 38 | // It returns an error if the asset could not be found or 39 | // could not be loaded. 40 | func Asset(name string) ([]byte, error) { 41 | cannonicalName := strings.Replace(name, "\\", "/", -1) 42 | if f, ok := _bindata[cannonicalName]; ok { 43 | return f() 44 | } 45 | return nil, fmt.Errorf("Asset %s not found", name) 46 | } 47 | 48 | // AssetNames returns the names of the assets. 49 | func AssetNames() []string { 50 | names := make([]string, 0, len(_bindata)) 51 | for name := range _bindata { 52 | names = append(names, name) 53 | } 54 | return names 55 | } 56 | 57 | // _bindata is a table, holding each asset generator, mapped to its name. 58 | var _bindata = map[string]func() ([]byte, error){ 59 | "../../db/sqlite/migrations/metadata/1637447083.sql": db_sqlite_migrations_metadata_1637447083_sql, 60 | } 61 | // AssetDir returns the file names below a certain 62 | // directory embedded in the file by go-bindata. 63 | // For example if you run go-bindata on data/... and data contains the 64 | // following hierarchy: 65 | // data/ 66 | // foo.txt 67 | // img/ 68 | // a.png 69 | // b.png 70 | // then AssetDir("data") would return []string{"foo.txt", "img"} 71 | // AssetDir("data/img") would return []string{"a.png", "b.png"} 72 | // AssetDir("foo.txt") and AssetDir("notexist") would return an error 73 | // AssetDir("") will return []string{"data"}. 74 | func AssetDir(name string) ([]string, error) { 75 | node := _bintree 76 | if len(name) != 0 { 77 | cannonicalName := strings.Replace(name, "\\", "/", -1) 78 | pathList := strings.Split(cannonicalName, "/") 79 | for _, p := range pathList { 80 | node = node.Children[p] 81 | if node == nil { 82 | return nil, fmt.Errorf("Asset %s not found", name) 83 | } 84 | } 85 | } 86 | if node.Func != nil { 87 | return nil, fmt.Errorf("Asset %s not found", name) 88 | } 89 | rv := make([]string, 0, len(node.Children)) 90 | for name := range node.Children { 91 | rv = append(rv, name) 92 | } 93 | return rv, nil 94 | } 95 | 96 | type _bintree_t struct { 97 | Func func() ([]byte, error) 98 | Children map[string]*_bintree_t 99 | } 100 | var _bintree = &_bintree_t{nil, map[string]*_bintree_t{ 101 | "..": &_bintree_t{nil, map[string]*_bintree_t{ 102 | "..": &_bintree_t{nil, map[string]*_bintree_t{ 103 | "db": &_bintree_t{nil, map[string]*_bintree_t{ 104 | "sqlite": &_bintree_t{nil, map[string]*_bintree_t{ 105 | "migrations": &_bintree_t{nil, map[string]*_bintree_t{ 106 | "metadata": &_bintree_t{nil, map[string]*_bintree_t{ 107 | "1637447083.sql": &_bintree_t{db_sqlite_migrations_metadata_1637447083_sql, map[string]*_bintree_t{ 108 | }}, 109 | }}, 110 | }}, 111 | }}, 112 | }}, 113 | }}, 114 | }}, 115 | }} 116 | -------------------------------------------------------------------------------- /pkg/operations/archive.go: -------------------------------------------------------------------------------- 1 | package operations 2 | 3 | import ( 4 | "archive/tar" 5 | "context" 6 | "errors" 7 | "io" 8 | "strconv" 9 | "strings" 10 | 11 | "github.com/pojntfx/stfs/internal/converters" 12 | "github.com/pojntfx/stfs/internal/ioext" 13 | "github.com/pojntfx/stfs/internal/records" 14 | "github.com/pojntfx/stfs/internal/suffix" 15 | "github.com/pojntfx/stfs/internal/tarext" 16 | "github.com/pojntfx/stfs/pkg/compression" 17 | "github.com/pojntfx/stfs/pkg/config" 18 | "github.com/pojntfx/stfs/pkg/encryption" 19 | "github.com/pojntfx/stfs/pkg/recovery" 20 | "github.com/pojntfx/stfs/pkg/signature" 21 | ) 22 | 23 | var ( 24 | errSocketsNotSupported = errors.New("archive/tar: sockets not supported") 25 | ) 26 | 27 | func (o *Operations) Archive( 28 | getSrc func() (config.FileConfig, error), 29 | compressionLevel string, 30 | overwrite bool, 31 | initializing bool, 32 | ) ([]*tar.Header, error) { 33 | o.diskOperationLock.Lock() 34 | defer o.diskOperationLock.Unlock() 35 | 36 | return o.archive(getSrc, compressionLevel, overwrite, initializing) 37 | } 38 | 39 | func (o *Operations) archive( 40 | getSrc func() (config.FileConfig, error), 41 | compressionLevel string, 42 | overwrite bool, 43 | initializing bool, 44 | ) ([]*tar.Header, error) { 45 | 46 | writer, err := o.backend.GetWriter() 47 | if err != nil { 48 | return []*tar.Header{}, err 49 | } 50 | 51 | dirty := false 52 | tw, cleanup, err := tarext.NewTapeWriter(writer.Drive, writer.DriveIsRegular, o.pipes.RecordSize) 53 | if err != nil { 54 | return []*tar.Header{}, err 55 | } 56 | 57 | lastIndexedRecord := int64(0) 58 | lastIndexedBlock := int64(0) 59 | if !overwrite { 60 | lastIndexedRecord, lastIndexedBlock, err = o.metadata.Metadata.GetLastIndexedRecordAndBlock(context.Background(), o.pipes.RecordSize) 61 | if err != nil { 62 | return []*tar.Header{}, err 63 | } 64 | } 65 | 66 | hdrs := []*tar.Header{} 67 | for { 68 | file, err := getSrc() 69 | if err == io.EOF { 70 | break 71 | } 72 | 73 | if err != nil { 74 | return []*tar.Header{}, err 75 | } 76 | 77 | hdr, err := tar.FileInfoHeader(file.Info, file.Link) 78 | if err != nil { 79 | // Skip sockets 80 | if strings.Contains(err.Error(), errSocketsNotSupported.Error()) { 81 | continue 82 | } 83 | 84 | return []*tar.Header{}, err 85 | } 86 | 87 | hdr.Name = file.Path 88 | hdr.Format = tar.FormatPAX 89 | 90 | var f io.ReadSeekCloser 91 | if file.Info.Mode().IsRegular() && file.Info.Size() > 0 { 92 | // Get the compressed size for the header 93 | fileSizeCounter := &ioext.CounterWriter{ 94 | Writer: io.Discard, 95 | } 96 | 97 | encryptor, err := encryption.Encrypt(fileSizeCounter, o.pipes.Encryption, o.crypto.Recipient) 98 | if err != nil { 99 | return []*tar.Header{}, err 100 | } 101 | 102 | compressor, err := compression.Compress( 103 | encryptor, 104 | o.pipes.Compression, 105 | compressionLevel, 106 | writer.DriveIsRegular, 107 | o.pipes.RecordSize, 108 | ) 109 | if err != nil { 110 | return []*tar.Header{}, err 111 | } 112 | 113 | f, err = file.GetFile() 114 | if err != nil { 115 | return []*tar.Header{}, err 116 | } 117 | 118 | signer, sign, err := signature.Sign(f, writer.DriveIsRegular, o.pipes.Signature, o.crypto.Identity) 119 | if err != nil { 120 | return []*tar.Header{}, err 121 | } 122 | 123 | if writer.DriveIsRegular { 124 | if _, err := io.Copy(compressor, signer); err != nil { 125 | return []*tar.Header{}, err 126 | } 127 | } else { 128 | buf := make([]byte, config.MagneticTapeBlockSize*o.pipes.RecordSize) 129 | if _, err := io.CopyBuffer(compressor, signer, buf); err != nil { 130 | return []*tar.Header{}, err 131 | } 132 | } 133 | 134 | if err := compressor.Flush(); err != nil { 135 | return []*tar.Header{}, err 136 | } 137 | 138 | if err := compressor.Close(); err != nil { 139 | return []*tar.Header{}, err 140 | } 141 | 142 | if err := encryptor.Close(); err != nil { 143 | return []*tar.Header{}, err 144 | } 145 | 146 | if hdr.PAXRecords == nil { 147 | hdr.PAXRecords = map[string]string{} 148 | } 149 | hdr.PAXRecords[records.STFSRecordUncompressedSize] = strconv.Itoa(int(hdr.Size)) 150 | signature, err := sign() 151 | if err != nil { 152 | return []*tar.Header{}, err 153 | } 154 | 155 | if signature != "" { 156 | hdr.PAXRecords[records.STFSRecordSignature] = signature 157 | } 158 | hdr.Size = int64(fileSizeCounter.BytesRead) 159 | 160 | hdr.Name, err = suffix.AddSuffix(hdr.Name, o.pipes.Compression, o.pipes.Encryption) 161 | if err != nil { 162 | return []*tar.Header{}, err 163 | } 164 | } 165 | 166 | if o.onHeader != nil { 167 | dbhdr, err := converters.TarHeaderToDBHeader(-1, -1, -1, -1, hdr) 168 | if err != nil { 169 | return []*tar.Header{}, err 170 | } 171 | 172 | o.onHeader(&config.HeaderEvent{ 173 | Type: config.HeaderEventTypeArchive, 174 | Indexed: false, 175 | Header: converters.DBHeaderToConfigHeader(dbhdr), 176 | }) 177 | } 178 | 179 | hdrToAppend := *hdr 180 | hdrs = append(hdrs, &hdrToAppend) 181 | 182 | if err := signature.SignHeader(hdr, writer.DriveIsRegular, o.pipes.Signature, o.crypto.Identity); err != nil { 183 | return []*tar.Header{}, err 184 | } 185 | 186 | if err := encryption.EncryptHeader(hdr, o.pipes.Encryption, o.crypto.Recipient); err != nil { 187 | return []*tar.Header{}, err 188 | } 189 | 190 | if err := tw.WriteHeader(hdr); err != nil { 191 | return []*tar.Header{}, err 192 | } 193 | 194 | dirty = true 195 | 196 | if !file.Info.Mode().IsRegular() || file.Info.Size() <= 0 { 197 | if f != nil { 198 | if err := f.Close(); err != nil { 199 | return []*tar.Header{}, err 200 | } 201 | } 202 | 203 | continue 204 | } 205 | 206 | // Compress and write the file 207 | encryptor, err := encryption.Encrypt(tw, o.pipes.Encryption, o.crypto.Recipient) 208 | if err != nil { 209 | return []*tar.Header{}, err 210 | } 211 | 212 | compressor, err := compression.Compress( 213 | encryptor, 214 | o.pipes.Compression, 215 | compressionLevel, 216 | writer.DriveIsRegular, 217 | o.pipes.RecordSize, 218 | ) 219 | if err != nil { 220 | return []*tar.Header{}, err 221 | } 222 | 223 | if _, err := f.Seek(0, io.SeekStart); err != nil { 224 | return []*tar.Header{}, err 225 | } 226 | 227 | if writer.DriveIsRegular { 228 | if _, err := io.Copy(compressor, f); err != nil { 229 | return []*tar.Header{}, err 230 | } 231 | } else { 232 | buf := make([]byte, config.MagneticTapeBlockSize*o.pipes.RecordSize) 233 | if _, err := io.CopyBuffer(compressor, f, buf); err != nil { 234 | return []*tar.Header{}, err 235 | } 236 | } 237 | 238 | if err := compressor.Flush(); err != nil { 239 | return []*tar.Header{}, err 240 | } 241 | 242 | if err := compressor.Close(); err != nil { 243 | return []*tar.Header{}, err 244 | } 245 | 246 | if err := encryptor.Close(); err != nil { 247 | return []*tar.Header{}, err 248 | } 249 | 250 | if err := f.Close(); err != nil { 251 | return []*tar.Header{}, err 252 | } 253 | } 254 | 255 | if err := cleanup(&dirty); err != nil { 256 | return []*tar.Header{}, err 257 | } 258 | 259 | index := 1 // Ignore the first header, which is the last header which we already indexed 260 | if overwrite { 261 | index = 0 // If we are starting fresh, index from start 262 | } 263 | 264 | if err := o.backend.CloseWriter(); err != nil { 265 | return []*tar.Header{}, err 266 | } 267 | 268 | reader, err := o.backend.GetReader() 269 | if err != nil { 270 | return []*tar.Header{}, err 271 | } 272 | defer o.backend.CloseReader() 273 | 274 | return hdrs, recovery.Index( 275 | reader, 276 | o.backend.MagneticTapeIO, 277 | o.metadata, 278 | o.pipes, 279 | o.crypto, 280 | 281 | int(lastIndexedRecord), 282 | int(lastIndexedBlock), 283 | overwrite, 284 | initializing, 285 | index, 286 | 287 | func(hdr *tar.Header, i int) error { 288 | if len(hdrs) <= i { 289 | return config.ErrTarHeaderMissing 290 | } 291 | 292 | *hdr = *hdrs[i] 293 | 294 | return nil 295 | }, 296 | func(hdr *tar.Header, isRegular bool) error { 297 | return nil // We sign above, no need to verify 298 | }, 299 | 300 | func(hdr *config.Header) { 301 | o.onHeader(&config.HeaderEvent{ 302 | Type: config.HeaderEventTypeArchive, 303 | Indexed: true, 304 | Header: hdr, 305 | }) 306 | }, 307 | ) 308 | } 309 | --------------------------------------------------------------------------------