├── .dockerignore ├── .github ├── FUNDING.yml └── workflows │ └── build.yml ├── triggers ├── lidarr │ ├── testdata │ │ ├── invalid.json │ │ ├── test.json │ │ ├── marshmello.json │ │ └── blink-182.json │ ├── lidarr.go │ └── lidarr_test.go ├── radarr │ ├── testdata │ │ ├── invalid.json │ │ ├── test.json │ │ ├── rename.json │ │ ├── movie_delete.json │ │ ├── movie_file_delete.json │ │ └── interstellar.json │ ├── radarr.go │ └── radarr_test.go ├── sonarr │ ├── testdata │ │ ├── invalid.json │ │ ├── test.json │ │ ├── series_delete.json │ │ ├── westworld.json │ │ ├── episode_delete.json │ │ └── rename.json │ ├── sonarr_test.go │ └── sonarr.go ├── readarr │ ├── testdata │ │ ├── invalid.json │ │ ├── test.json │ │ └── sanderson.json │ ├── readarr_test.go │ └── readarr.go ├── a_train │ ├── testdata │ │ ├── modified.json │ │ └── full.json │ ├── a_train.go │ └── a_train_test.go ├── bernard │ ├── limiter.go │ ├── datastore.go │ ├── postprocess.go │ ├── paths.go │ └── bernard.go ├── manual │ ├── manual.go │ ├── manual_test.go │ └── template.html └── inotify │ └── inotify.go ├── docker ├── run └── Dockerfile ├── .gitignore ├── processor ├── migrations │ └── 1_init.sql ├── processor.go ├── datastore.go └── datastore_test.go ├── logging.go ├── util.go ├── cmd └── autoscan │ ├── stats.go │ ├── config.go │ ├── router.go │ └── main.go ├── LICENSE ├── util_test.go ├── .goreleaser.yml ├── targets ├── autoscan │ ├── autoscan.go │ └── api.go ├── emby │ ├── emby.go │ └── api.go ├── jellyfin │ ├── jellyfin.go │ └── api.go └── plex │ ├── plex.go │ └── api.go ├── Taskfile.yml ├── go.mod ├── migrate ├── migrator.go └── util.go ├── autoscan_test.go ├── autoscan.go ├── go.sum └── README.md /.dockerignore: -------------------------------------------------------------------------------- 1 | ** 2 | !docker/ 3 | !dist/ -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | github: [l3uddz, m-rots] -------------------------------------------------------------------------------- /triggers/lidarr/testdata/invalid.json: -------------------------------------------------------------------------------- 1 | This is an invalid JSON file -------------------------------------------------------------------------------- /triggers/lidarr/testdata/test.json: -------------------------------------------------------------------------------- 1 | { 2 | "eventType": "Test" 3 | } -------------------------------------------------------------------------------- /triggers/radarr/testdata/invalid.json: -------------------------------------------------------------------------------- 1 | This is an invalid JSON file -------------------------------------------------------------------------------- /triggers/radarr/testdata/test.json: -------------------------------------------------------------------------------- 1 | { 2 | "eventType": "Test" 3 | } -------------------------------------------------------------------------------- /triggers/sonarr/testdata/invalid.json: -------------------------------------------------------------------------------- 1 | This is an invalid JSON file -------------------------------------------------------------------------------- /triggers/sonarr/testdata/test.json: -------------------------------------------------------------------------------- 1 | { 2 | "eventType": "Test" 3 | } -------------------------------------------------------------------------------- /triggers/readarr/testdata/invalid.json: -------------------------------------------------------------------------------- 1 | This is an invalid JSON file 2 | -------------------------------------------------------------------------------- /triggers/readarr/testdata/test.json: -------------------------------------------------------------------------------- 1 | { 2 | "eventType": "Test" 3 | } 4 | -------------------------------------------------------------------------------- /docker/run: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv sh 2 | 3 | exec s6-setuidgid abc /app/autoscan/autoscan 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .idea/ 3 | vendor/ 4 | dist/ 5 | 6 | *.db 7 | *.log 8 | /autoscan 9 | /config.yml 10 | 11 | /.task/ -------------------------------------------------------------------------------- /triggers/sonarr/testdata/series_delete.json: -------------------------------------------------------------------------------- 1 | { 2 | "eventType": "SeriesDelete", 3 | "series": { 4 | "path": "/TV/Westworld" 5 | } 6 | } -------------------------------------------------------------------------------- /triggers/radarr/testdata/rename.json: -------------------------------------------------------------------------------- 1 | { 2 | "eventType": "Rename", 3 | "movie": { 4 | "folderPath": "/Movies/Deadpool (2016)" 5 | } 6 | } -------------------------------------------------------------------------------- /triggers/radarr/testdata/movie_delete.json: -------------------------------------------------------------------------------- 1 | { 2 | "eventType": "MovieDelete", 3 | "movie": { 4 | "folderPath": "/Movies/Wonder Woman 1984 (2020)" 5 | } 6 | } -------------------------------------------------------------------------------- /triggers/a_train/testdata/modified.json: -------------------------------------------------------------------------------- 1 | { 2 | "created": [ 3 | "/TV/Legion/Season 1" 4 | ], 5 | "deleted": [ 6 | "/TV/Legion/Season 1" 7 | ] 8 | } -------------------------------------------------------------------------------- /processor/migrations/1_init.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE IF NOT EXISTS scan ( 2 | "folder" TEXT NOT NULL, 3 | "priority" INTEGER NOT NULL, 4 | "time" DATETIME NOT NULL, 5 | PRIMARY KEY(folder) 6 | ) -------------------------------------------------------------------------------- /triggers/sonarr/testdata/westworld.json: -------------------------------------------------------------------------------- 1 | { 2 | "eventType": "Download", 3 | "episodeFile": { 4 | "relativePath": "Season 1/Westworld.S01E01.mkv" 5 | }, 6 | "series": { 7 | "path": "/TV/Westworld" 8 | } 9 | } -------------------------------------------------------------------------------- /triggers/radarr/testdata/movie_file_delete.json: -------------------------------------------------------------------------------- 1 | { 2 | "eventType": "MovieFileDelete", 3 | "movieFile": { 4 | "relativePath": "Tenet.2020.mkv" 5 | }, 6 | "movie": { 7 | "folderPath": "/Movies/Tenet (2020)" 8 | } 9 | } -------------------------------------------------------------------------------- /triggers/sonarr/testdata/episode_delete.json: -------------------------------------------------------------------------------- 1 | { 2 | "eventType": "EpisodeFileDelete", 3 | "episodeFile": { 4 | "relativePath": "Season 2/Westworld.S02E01.mkv" 5 | }, 6 | "series": { 7 | "path": "/TV/Westworld" 8 | } 9 | } -------------------------------------------------------------------------------- /triggers/radarr/testdata/interstellar.json: -------------------------------------------------------------------------------- 1 | { 2 | "eventType": "Download", 3 | "movieFile": { 4 | "relativePath": "Interstellar.2014.UHD.BluRay.2160p.REMUX.mkv" 5 | }, 6 | "movie": { 7 | "folderPath": "/Movies/Interstellar (2014)" 8 | } 9 | } -------------------------------------------------------------------------------- /triggers/a_train/testdata/full.json: -------------------------------------------------------------------------------- 1 | { 2 | "created": [ 3 | "/Movies/Interstellar (2014)", 4 | "/TV/Legion/Season 1" 5 | ], 6 | "deleted": [ 7 | "/Movies/Wonder Woman 1984 (2020)", 8 | "/Movies/Mortal Kombat (2021)" 9 | ] 10 | } -------------------------------------------------------------------------------- /triggers/readarr/testdata/sanderson.json: -------------------------------------------------------------------------------- 1 | { 2 | "eventType": "Download", 3 | "isUpgrade": false, 4 | "bookFiles": [ 5 | { 6 | "path": "/Books/Brandon Sanderson/The Way of Kings (2010)/The Way of Kings - Brandon Sanderson.epub" 7 | } 8 | ], 9 | "author": { 10 | "name": "Brandon Sanderson", 11 | "path": "/Books/Brandon Sanderson" 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /logging.go: -------------------------------------------------------------------------------- 1 | package autoscan 2 | 3 | import ( 4 | "github.com/rs/zerolog" 5 | "github.com/rs/zerolog/log" 6 | ) 7 | 8 | func GetLogger(verbosity string) zerolog.Logger { 9 | if verbosity == "" { 10 | return log.Logger 11 | } 12 | 13 | level, err := zerolog.ParseLevel(verbosity) 14 | if err != nil { 15 | return log.Logger 16 | } 17 | 18 | return log.Level(level) 19 | } 20 | -------------------------------------------------------------------------------- /util.go: -------------------------------------------------------------------------------- 1 | package autoscan 2 | 3 | import ( 4 | "fmt" 5 | "net/url" 6 | "path" 7 | "strings" 8 | ) 9 | 10 | func JoinURL(base string, paths ...string) string { 11 | // credits: https://stackoverflow.com/a/57220413 12 | p := path.Join(paths...) 13 | return fmt.Sprintf("%s/%s", strings.TrimRight(base, "/"), strings.TrimLeft(p, "/")) 14 | } 15 | 16 | // DSN creates a data source name for use with sql.Open. 17 | func DSN(path string, q url.Values) string { 18 | u := url.URL{ 19 | Scheme: "file", 20 | Path: path, 21 | RawQuery: q.Encode(), 22 | } 23 | 24 | return u.String() 25 | } 26 | -------------------------------------------------------------------------------- /triggers/lidarr/testdata/marshmello.json: -------------------------------------------------------------------------------- 1 | { 2 | "eventType": "Download", 3 | "isUpgrade": false, 4 | "trackFiles": [ 5 | { 6 | "path": "/Music/Marshmello/Joytime III (2019)/01 - Down.mp3" 7 | }, 8 | { 9 | "path": "/Music/Marshmello/Joytime III (2019)/02 - Run It Up.mp3" 10 | }, 11 | { 12 | "path": "/Music/Marshmello/Joytime III (2019)/03 - Put Yo Hands Up.mp3" 13 | }, 14 | { 15 | "path": "/Music/Marshmello/Joytime III (2019)/04 - Let’s Get Down.mp3" 16 | } 17 | ], 18 | "artist": { 19 | "name": "Marshmello", 20 | "path": "/Music/Marshmello" 21 | } 22 | } -------------------------------------------------------------------------------- /docker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM saltydk/alpine-s6overlay:latest 2 | 3 | ARG TARGETOS 4 | ARG TARGETARCH 5 | ARG TARGETVARIANT 6 | 7 | ENV \ 8 | PATH="/app/autoscan:${PATH}" \ 9 | AUTOSCAN_CONFIG="/config/config.yml" \ 10 | AUTOSCAN_DATABASE="/config/autoscan.db" \ 11 | AUTOSCAN_LOG="/config/activity.log" \ 12 | AUTOSCAN_VERBOSITY="0" 13 | 14 | # Binary 15 | COPY ["dist/autoscan_${TARGETOS}_${TARGETARCH}${TARGETVARIANT:+_7}/autoscan", "/app/autoscan/autoscan"] 16 | 17 | # Add root files 18 | COPY ["docker/run", "/etc/services.d/autoscan/run"] 19 | 20 | # Volume 21 | VOLUME ["/config"] 22 | 23 | # Port 24 | EXPOSE 3030 25 | -------------------------------------------------------------------------------- /triggers/lidarr/testdata/blink-182.json: -------------------------------------------------------------------------------- 1 | { 2 | "eventType": "Download", 3 | "isUpgrade": false, 4 | "trackFiles": [ 5 | { 6 | "path": "/Music/blink‐182/California (2016)/CD 01/01 - Cynical.mp3" 7 | }, 8 | { 9 | "path": "/Music/blink‐182/California (2016)/CD 01/02 - Bored to Death.mp3" 10 | }, 11 | { 12 | "path": "/Music/blink‐182/California (2016)/CD 02/01 - Parking Lot.mp3" 13 | }, 14 | { 15 | "path": "/Music/blink‐182/California (2016)/CD 02/02 - Misery.mp3" 16 | } 17 | ], 18 | "artist": { 19 | "name": "blink-182", 20 | "path": "/Music/blink-182" 21 | } 22 | } -------------------------------------------------------------------------------- /triggers/sonarr/testdata/rename.json: -------------------------------------------------------------------------------- 1 | { 2 | "eventType": "Rename", 3 | "series": { 4 | "path": "/TV/Westworld [imdb:tt0475784]" 5 | }, 6 | "renamedEpisodeFiles": [ 7 | { 8 | "previousPath": "/TV/Westworld/Season 1/Westworld.S01E01.mkv", 9 | "relativePath": "Season 1/Westworld.S01E01.mkv" 10 | }, 11 | { 12 | "previousPath": "/TV/Westworld/Season 1/Westworld.S01E02.mkv", 13 | "relativePath": "Season 1/Westworld.S01E02.mkv" 14 | }, 15 | { 16 | "previousPath": "/TV/Westworld/Season 2/Westworld.S01E02.mkv", 17 | "relativePath": "Season 2/Westworld.S02E01.mkv" 18 | } 19 | ] 20 | } -------------------------------------------------------------------------------- /cmd/autoscan/stats.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "errors" 5 | "time" 6 | 7 | "github.com/rs/zerolog/log" 8 | 9 | "github.com/saltydk/autoscan" 10 | "github.com/saltydk/autoscan/processor" 11 | ) 12 | 13 | func scanStats(proc *processor.Processor, interval time.Duration) { 14 | st := time.NewTicker(interval) 15 | for { 16 | select { 17 | case _ = <-st.C: 18 | // retrieve amount of scans remaining 19 | sm, err := proc.ScansRemaining() 20 | switch { 21 | case err == nil: 22 | log.Info(). 23 | Int("remaining", sm). 24 | Int64("processed", proc.ScansProcessed()). 25 | Msg("Scan stats") 26 | case errors.Is(err, autoscan.ErrFatal): 27 | log.Error(). 28 | Err(err). 29 | Msg("Fatal error determining amount of remaining scans, scan stats stopped...") 30 | st.Stop() 31 | return 32 | default: 33 | // ErrNoScans should never occur as COUNT should always at-least return 0 34 | log.Error(). 35 | Err(err). 36 | Msg("Failed determining amount of remaining scans") 37 | } 38 | } 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /cmd/autoscan/config.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path/filepath" 7 | ) 8 | 9 | func defaultConfigDirectory(app string, filename string) string { 10 | // binary path 11 | bcd := getBinaryPath() 12 | if _, err := os.Stat(filepath.Join(bcd, filename)); err == nil { 13 | // there is a config file in the binary path 14 | // so use this directory as the default 15 | return bcd 16 | } 17 | 18 | // config dir 19 | ucd, err := os.UserConfigDir() 20 | if err != nil { 21 | panic(fmt.Sprintf("userconfigdir: %v", err)) 22 | } 23 | 24 | acd := filepath.Join(ucd, app) 25 | if _, err := os.Stat(acd); os.IsNotExist(err) { 26 | if err := os.MkdirAll(acd, os.ModePerm); err != nil { 27 | panic(fmt.Sprintf("mkdirall: %v", err)) 28 | } 29 | } 30 | 31 | return acd 32 | } 33 | 34 | func getBinaryPath() string { 35 | // get current binary path 36 | dir, err := filepath.Abs(filepath.Dir(os.Args[0])) 37 | if err != nil { 38 | // get current working dir 39 | if dir, err = os.Getwd(); err != nil { 40 | panic(fmt.Sprintf("getwd: %v", err)) 41 | } 42 | } 43 | 44 | return dir 45 | } 46 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Storm Timmermans and James Bayliss 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /util_test.go: -------------------------------------------------------------------------------- 1 | package autoscan 2 | 3 | import ( 4 | "net/url" 5 | "testing" 6 | ) 7 | 8 | func TestDSN(t *testing.T) { 9 | type Test struct { 10 | Name string 11 | Path string 12 | Query url.Values 13 | Want string 14 | } 15 | 16 | var testCases = []Test{ 17 | { 18 | Name: "readonly", 19 | Path: "library.db", 20 | Query: url.Values{ 21 | "mode": []string{"ro"}, 22 | }, 23 | Want: "file://library.db?mode=ro", 24 | }, 25 | { 26 | Name: "No Query", 27 | Path: "library.db", 28 | Want: "file://library.db", 29 | }, 30 | { 31 | Name: "Path from root", 32 | Path: "/mnt/unionfs/library.db", 33 | Query: url.Values{ 34 | "mode": []string{"rw"}, 35 | }, 36 | Want: "file:///mnt/unionfs/library.db?mode=rw", 37 | }, 38 | { 39 | Name: "Query sorted by key", 40 | Path: "library.db", 41 | Query: url.Values{ 42 | "cache": []string{"shared"}, 43 | "mode": []string{"rw"}, 44 | }, 45 | Want: "file://library.db?cache=shared&mode=rw", 46 | }, 47 | } 48 | 49 | for _, tc := range testCases { 50 | t.Run(tc.Name, func(t *testing.T) { 51 | dsn := DSN(tc.Path, tc.Query) 52 | if dsn != tc.Want { 53 | t.Log(dsn) 54 | t.Log(tc.Want) 55 | t.Error("DSNs do not match") 56 | } 57 | }) 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /.goreleaser.yml: -------------------------------------------------------------------------------- 1 | # https://goreleaser.com 2 | project_name: autoscan 3 | 4 | # Build 5 | builds: 6 | - 7 | env: 8 | - CGO_ENABLED=0 9 | goos: 10 | - linux 11 | - darwin 12 | - freebsd 13 | main: ./cmd/autoscan 14 | goarch: 15 | - amd64 16 | - arm64 17 | - arm 18 | goarm: 19 | - 7 20 | ldflags: 21 | - -s -w 22 | - -X "main.Version={{ .Version }}" 23 | - -X "main.GitCommit={{ .ShortCommit }}" 24 | - -X "main.Timestamp={{ .Timestamp }}" 25 | flags: 26 | - -trimpath 27 | ignore: 28 | - goos: freebsd 29 | goarch: arm64 30 | - goos: freebsd 31 | goarch: arm 32 | 33 | # MacOS Universal Binaries 34 | universal_binaries: 35 | - 36 | replace: true 37 | 38 | # Archive 39 | archives: 40 | - 41 | name_template: "{{ .ProjectName }}_v{{ .Version }}_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}" 42 | format: "binary" 43 | 44 | # Checksum 45 | checksum: 46 | name_template: "checksums.txt" 47 | algorithm: sha512 48 | 49 | # Snapshot 50 | snapshot: 51 | name_template: "{{ .Major }}.{{ .Minor }}.{{ .Patch }}-dev+{{ .Branch }}" 52 | 53 | # Changelog 54 | changelog: 55 | filters: 56 | exclude: 57 | - "^docs:" 58 | - "^test:" 59 | - "^Merge branch" -------------------------------------------------------------------------------- /triggers/bernard/limiter.go: -------------------------------------------------------------------------------- 1 | package bernard 2 | 3 | import ( 4 | "context" 5 | "sync" 6 | "time" 7 | 8 | "golang.org/x/sync/semaphore" 9 | "golang.org/x/time/rate" 10 | ) 11 | 12 | const ( 13 | // how many requests can be sent per second by all drives using the same account file 14 | requestLimit = 8 15 | // how many drives can run at once (at the trigger level), e.g. 2 triggers, with 5 drives each. 16 | syncLimit = 5 17 | ) 18 | 19 | type rateLimiter struct { 20 | ctx context.Context 21 | rl *rate.Limiter 22 | sem *semaphore.Weighted 23 | } 24 | 25 | func (r *rateLimiter) Wait() { 26 | _ = r.rl.Wait(r.ctx) 27 | } 28 | 29 | func (r *rateLimiter) Acquire(n int64) error { 30 | return r.sem.Acquire(r.ctx, n) 31 | } 32 | 33 | func (r *rateLimiter) Release(n int64) { 34 | r.sem.Release(n) 35 | } 36 | 37 | func newRateLimiter() *rateLimiter { 38 | return &rateLimiter{ 39 | ctx: context.Background(), 40 | rl: rate.NewLimiter(rate.Every(time.Second/time.Duration(requestLimit)), requestLimit), 41 | sem: semaphore.NewWeighted(int64(syncLimit)), 42 | } 43 | } 44 | 45 | var ( 46 | limiters = make(map[string]*rateLimiter) 47 | lock = &sync.Mutex{} 48 | ) 49 | 50 | func getRateLimiter(account string) (*rateLimiter, error) { 51 | lock.Lock() 52 | defer lock.Unlock() 53 | 54 | // return existing limiter for the account 55 | if limiter, ok := limiters[account]; ok { 56 | return limiter, nil 57 | } 58 | 59 | // add limiter to map 60 | limiter := newRateLimiter() 61 | limiters[account] = limiter 62 | 63 | return limiter, nil 64 | } 65 | -------------------------------------------------------------------------------- /targets/autoscan/autoscan.go: -------------------------------------------------------------------------------- 1 | package autoscan 2 | 3 | import ( 4 | "github.com/rs/zerolog" 5 | 6 | "github.com/saltydk/autoscan" 7 | ) 8 | 9 | type Config struct { 10 | URL string `yaml:"url"` 11 | User string `yaml:"username"` 12 | Pass string `yaml:"password"` 13 | Rewrite []autoscan.Rewrite `yaml:"rewrite"` 14 | Verbosity string `yaml:"verbosity"` 15 | } 16 | 17 | type target struct { 18 | url string 19 | user string 20 | pass string 21 | 22 | log zerolog.Logger 23 | rewrite autoscan.Rewriter 24 | api apiClient 25 | } 26 | 27 | func New(c Config) (autoscan.Target, error) { 28 | l := autoscan.GetLogger(c.Verbosity).With(). 29 | Str("target", "autoscan"). 30 | Str("url", c.URL).Logger() 31 | 32 | rewriter, err := autoscan.NewRewriter(c.Rewrite) 33 | if err != nil { 34 | return nil, err 35 | } 36 | 37 | return &target{ 38 | url: c.URL, 39 | user: c.User, 40 | pass: c.Pass, 41 | 42 | log: l, 43 | rewrite: rewriter, 44 | api: newAPIClient(c.URL, c.User, c.Pass, l), 45 | }, nil 46 | } 47 | 48 | func (t target) Scan(scan autoscan.Scan) error { 49 | scanFolder := t.rewrite(scan.Folder) 50 | 51 | // send scan request 52 | l := t.log.With(). 53 | Str("path", scanFolder). 54 | Logger() 55 | 56 | l.Trace().Msg("Sending scan request") 57 | 58 | if err := t.api.Scan(scanFolder); err != nil { 59 | return err 60 | } 61 | 62 | l.Info().Msg("Scan moved to target") 63 | return nil 64 | } 65 | 66 | func (t target) Available() error { 67 | return t.api.Available() 68 | } 69 | -------------------------------------------------------------------------------- /Taskfile.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | vars: 4 | APP: autoscan 5 | CGO_ENABLED: 0 6 | GOOS: 7 | sh: go env GOOS 8 | GOARCH: 9 | sh: go env GOARCH 10 | DIST_PATH: dist 11 | BUILD_PATH: "{{.DIST_PATH}}/{{.APP}}_{{.GOOS}}_{{.GOARCH}}" 12 | 13 | env: 14 | GIT_COMMIT: 15 | sh: git rev-parse --short HEAD 16 | TIMESTAMP: '{{now | unixEpoch}}' 17 | VERSION: 0.0.0-dev 18 | 19 | tasks: 20 | test: 21 | desc: Go tests 22 | cmds: 23 | - go test ./... -cover -v -race ${GO_PACKAGES} 24 | 25 | vendor: 26 | desc: Go vendor 27 | sources: 28 | - '**/*.go' 29 | - ./go.sum 30 | cmds: 31 | - go mod vendor 32 | - go mod tidy 33 | 34 | vendor_update: 35 | desc: Go vendor update 36 | cmds: 37 | - go get -u ./... 38 | - task: vendor 39 | 40 | build: 41 | desc: Generate a development binary 42 | dir: '{{.BUILD_PATH}}' 43 | deps: [ vendor ] 44 | cmds: 45 | - | 46 | CGO_ENABLED={{.CGO_ENABLED}} \ 47 | go build \ 48 | -mod vendor \ 49 | -trimpath \ 50 | -ldflags "-s -w -X main.Version=${VERSION} -X main.GitCommit=${GIT_COMMIT} -X main.Timestamp=${TIMESTAMP}" \ 51 | ../../cmd/{{.APP}} 52 | 53 | release: 54 | desc: Generate a release, but don't publish 55 | cmds: 56 | - goreleaser --skip-validate --skip-publish --rm-dist 57 | 58 | snapshot: 59 | desc: Generate a snapshot release 60 | cmds: 61 | - goreleaser --snapshot --skip-publish --rm-dist 62 | 63 | publish: 64 | desc: Generate a release, and publish 65 | cmds: 66 | - goreleaser --rm-dist -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/saltydk/autoscan 2 | 3 | go 1.19 4 | 5 | require ( 6 | github.com/BurntSushi/toml v0.3.1 // indirect 7 | github.com/alecthomas/kong v0.6.1 8 | github.com/fsnotify/fsnotify v1.5.4 9 | github.com/go-chi/chi/v5 v5.0.7 10 | github.com/l3uddz/bernard v0.5.1 11 | github.com/m-rots/stubbs v1.1.0 12 | github.com/mattn/go-sqlite3 v2.0.3+incompatible // indirect 13 | github.com/natefinch/lumberjack v2.0.0+incompatible 14 | github.com/oriser/regroup v0.0.0-20210730155327-fca8d7531263 15 | github.com/robfig/cron/v3 v3.0.1 16 | github.com/rs/zerolog v1.28.0 17 | golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect 18 | golang.org/x/sync v0.0.0-20220907140024-f12130a52804 19 | golang.org/x/sys v0.0.0-20220915200043-7b5979e65e41 // indirect 20 | golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9 21 | golang.org/x/tools v0.1.12 // indirect 22 | gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect 23 | gopkg.in/yaml.v2 v2.4.0 24 | modernc.org/cc/v3 v3.38.1 // indirect 25 | modernc.org/sqlite v1.18.2 26 | modernc.org/strutil v1.1.3 // indirect 27 | ) 28 | 29 | require ( 30 | github.com/google/uuid v1.3.0 // indirect 31 | github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect 32 | github.com/mattn/go-colorable v0.1.13 // indirect 33 | github.com/mattn/go-isatty v0.0.16 // indirect 34 | github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect 35 | github.com/rs/xid v1.4.0 // indirect 36 | lukechampine.com/uint128 v1.2.0 // indirect 37 | modernc.org/ccgo/v3 v3.16.9 // indirect 38 | modernc.org/libc v1.19.0 // indirect 39 | modernc.org/mathutil v1.5.0 // indirect 40 | modernc.org/memory v1.4.0 // indirect 41 | modernc.org/opt v0.1.3 // indirect 42 | modernc.org/token v1.0.1 // indirect 43 | ) 44 | -------------------------------------------------------------------------------- /migrate/migrator.go: -------------------------------------------------------------------------------- 1 | package migrate 2 | 3 | import ( 4 | "database/sql" 5 | "embed" 6 | "errors" 7 | "fmt" 8 | 9 | "github.com/oriser/regroup" 10 | "modernc.org/sqlite" 11 | ) 12 | 13 | type Migrator struct { 14 | db *sql.DB 15 | dir string 16 | 17 | re *regroup.ReGroup 18 | } 19 | 20 | /* Credits to https://github.com/Boostport/migration */ 21 | 22 | func New(db *sql.DB, dir string) (*Migrator, error) { 23 | var err error 24 | 25 | m := &Migrator{ 26 | db: db, 27 | dir: dir, 28 | } 29 | 30 | // validate supported driver 31 | if _, ok := db.Driver().(*sqlite.Driver); !ok { 32 | return nil, errors.New("database instance is not using the sqlite driver") 33 | } 34 | 35 | // verify schema 36 | if err = m.verify(); err != nil { 37 | return nil, fmt.Errorf("verify: %w", err) 38 | } 39 | 40 | // compile migration regexp 41 | m.re, err = regroup.Compile(`(?P\d+)\w?(?P.+)?\.sql`) 42 | if err != nil { 43 | return nil, fmt.Errorf("regexp: %w", err) 44 | } 45 | 46 | return m, nil 47 | } 48 | 49 | func (m *Migrator) Migrate(fs *embed.FS, component string) error { 50 | // parse migrations 51 | migrations, err := m.parse(fs) 52 | if err != nil { 53 | return fmt.Errorf("parse: %w", err) 54 | } 55 | 56 | if len(migrations) == 0 { 57 | return nil 58 | } 59 | 60 | // get current migration versions 61 | versions, err := m.versions(component) 62 | if err != nil { 63 | return fmt.Errorf("versions: %v: %w", component, err) 64 | } 65 | 66 | // migrate 67 | for _, mg := range migrations { 68 | // already have this version? 69 | if _, exists := versions[mg.Version]; exists { 70 | continue 71 | } 72 | 73 | // migrate 74 | if err := m.exec(component, mg); err != nil { 75 | return fmt.Errorf("migrate: %v: %w", mg.Filename, err) 76 | } 77 | } 78 | 79 | return nil 80 | } 81 | -------------------------------------------------------------------------------- /triggers/bernard/datastore.go: -------------------------------------------------------------------------------- 1 | package bernard 2 | 3 | import ( 4 | "database/sql" 5 | "errors" 6 | "fmt" 7 | 8 | "github.com/l3uddz/bernard/datastore" 9 | "github.com/l3uddz/bernard/datastore/sqlite" 10 | ) 11 | 12 | type bds struct { 13 | *sqlite.Datastore 14 | } 15 | 16 | const sqlSelectFile = `SELECT id, name, parent, size, md5, trashed FROM file WHERE drive = $1 AND id = $2 LIMIT 1` 17 | 18 | func (d *bds) GetFile(driveID string, fileID string) (*datastore.File, error) { 19 | f := new(datastore.File) 20 | 21 | row := d.DB.QueryRow(sqlSelectFile, driveID, fileID) 22 | err := row.Scan(&f.ID, &f.Name, &f.Parent, &f.Size, &f.MD5, &f.Trashed) 23 | 24 | switch { 25 | case errors.Is(err, sql.ErrNoRows): 26 | return nil, fmt.Errorf("%v: file not found: %w", fileID, sql.ErrNoRows) 27 | case err != nil: 28 | return nil, err 29 | default: 30 | break 31 | } 32 | 33 | return f, nil 34 | } 35 | 36 | const sqlSelectFolder = `SELECT id, name, trashed, parent FROM folder WHERE drive = $1 AND id = $2 LIMIT 1` 37 | 38 | func (d *bds) GetFolder(driveID string, folderID string) (*datastore.Folder, error) { 39 | f := new(datastore.Folder) 40 | 41 | row := d.DB.QueryRow(sqlSelectFolder, driveID, folderID) 42 | err := row.Scan(&f.ID, &f.Name, &f.Trashed, &f.Parent) 43 | 44 | switch { 45 | case errors.Is(err, sql.ErrNoRows): 46 | return nil, fmt.Errorf("%v: folder not found: %w", folderID, sql.ErrNoRows) 47 | case err != nil: 48 | return nil, err 49 | default: 50 | break 51 | } 52 | 53 | return f, nil 54 | } 55 | 56 | const sqlSelectDrive = `SELECT d.id, f.name, d.pageToken FROM drive d JOIN folder f ON f.id = d.id WHERE d.id = $1 LIMIT 1` 57 | 58 | func (d *bds) GetDrive(driveID string) (*datastore.Drive, error) { 59 | drv := new(datastore.Drive) 60 | 61 | row := d.DB.QueryRow(sqlSelectDrive, driveID) 62 | err := row.Scan(&drv.ID, &drv.Name, &drv.PageToken) 63 | 64 | switch { 65 | case errors.Is(err, sql.ErrNoRows): 66 | return nil, fmt.Errorf("%v: drive not found: %w", driveID, sql.ErrNoRows) 67 | case err != nil: 68 | return nil, err 69 | default: 70 | break 71 | } 72 | 73 | return drv, nil 74 | } 75 | -------------------------------------------------------------------------------- /targets/emby/emby.go: -------------------------------------------------------------------------------- 1 | package emby 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | 7 | "github.com/rs/zerolog" 8 | 9 | "github.com/saltydk/autoscan" 10 | ) 11 | 12 | type Config struct { 13 | URL string `yaml:"url"` 14 | Token string `yaml:"token"` 15 | Rewrite []autoscan.Rewrite `yaml:"rewrite"` 16 | Verbosity string `yaml:"verbosity"` 17 | } 18 | 19 | type target struct { 20 | url string 21 | token string 22 | libraries []library 23 | 24 | log zerolog.Logger 25 | rewrite autoscan.Rewriter 26 | api apiClient 27 | } 28 | 29 | func New(c Config) (autoscan.Target, error) { 30 | l := autoscan.GetLogger(c.Verbosity).With(). 31 | Str("target", "emby"). 32 | Str("url", c.URL). 33 | Logger() 34 | 35 | rewriter, err := autoscan.NewRewriter(c.Rewrite) 36 | if err != nil { 37 | return nil, err 38 | } 39 | 40 | api := newAPIClient(c.URL, c.Token, l) 41 | 42 | libraries, err := api.Libraries() 43 | if err != nil { 44 | return nil, err 45 | } 46 | 47 | l.Debug(). 48 | Interface("libraries", libraries). 49 | Msg("Retrieved libraries") 50 | 51 | return &target{ 52 | url: c.URL, 53 | token: c.Token, 54 | libraries: libraries, 55 | 56 | log: l, 57 | rewrite: rewriter, 58 | api: api, 59 | }, nil 60 | } 61 | 62 | func (t target) Available() error { 63 | return t.api.Available() 64 | } 65 | 66 | func (t target) Scan(scan autoscan.Scan) error { 67 | // determine library for this scan 68 | scanFolder := t.rewrite(scan.Folder) 69 | 70 | lib, err := t.getScanLibrary(scanFolder) 71 | if err != nil { 72 | t.log.Warn(). 73 | Err(err). 74 | Msg("No target libraries found") 75 | 76 | return nil 77 | } 78 | 79 | l := t.log.With(). 80 | Str("path", scanFolder). 81 | Str("library", lib.Name). 82 | Logger() 83 | 84 | // send scan request 85 | l.Trace().Msg("Sending scan request") 86 | 87 | if err := t.api.Scan(scanFolder); err != nil { 88 | return err 89 | } 90 | 91 | l.Info().Msg("Scan moved to target") 92 | return nil 93 | } 94 | 95 | func (t target) getScanLibrary(folder string) (*library, error) { 96 | for _, l := range t.libraries { 97 | if strings.HasPrefix(folder, l.Path) { 98 | return &l, nil 99 | } 100 | } 101 | 102 | return nil, fmt.Errorf("%v: failed determining library", folder) 103 | } 104 | -------------------------------------------------------------------------------- /targets/jellyfin/jellyfin.go: -------------------------------------------------------------------------------- 1 | package jellyfin 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | 7 | "github.com/rs/zerolog" 8 | 9 | "github.com/saltydk/autoscan" 10 | ) 11 | 12 | type Config struct { 13 | URL string `yaml:"url"` 14 | Token string `yaml:"token"` 15 | Rewrite []autoscan.Rewrite `yaml:"rewrite"` 16 | Verbosity string `yaml:"verbosity"` 17 | } 18 | 19 | type target struct { 20 | url string 21 | token string 22 | libraries []library 23 | 24 | log zerolog.Logger 25 | rewrite autoscan.Rewriter 26 | api apiClient 27 | } 28 | 29 | func New(c Config) (autoscan.Target, error) { 30 | l := autoscan.GetLogger(c.Verbosity).With(). 31 | Str("target", "jellyfin"). 32 | Str("url", c.URL). 33 | Logger() 34 | 35 | rewriter, err := autoscan.NewRewriter(c.Rewrite) 36 | if err != nil { 37 | return nil, err 38 | } 39 | 40 | api := newAPIClient(c.URL, c.Token, l) 41 | 42 | libraries, err := api.Libraries() 43 | if err != nil { 44 | return nil, err 45 | } 46 | 47 | l.Debug(). 48 | Interface("libraries", libraries). 49 | Msg("Retrieved libraries") 50 | 51 | return &target{ 52 | url: c.URL, 53 | token: c.Token, 54 | libraries: libraries, 55 | 56 | log: l, 57 | rewrite: rewriter, 58 | api: api, 59 | }, nil 60 | } 61 | 62 | func (t target) Available() error { 63 | return t.api.Available() 64 | } 65 | 66 | func (t target) Scan(scan autoscan.Scan) error { 67 | // determine library for this scan 68 | scanFolder := t.rewrite(scan.Folder) 69 | 70 | lib, err := t.getScanLibrary(scanFolder) 71 | if err != nil { 72 | t.log.Warn(). 73 | Err(err). 74 | Msg("No target libraries found") 75 | 76 | return nil 77 | } 78 | 79 | l := t.log.With(). 80 | Str("path", scanFolder). 81 | Str("library", lib.Name). 82 | Logger() 83 | 84 | // send scan request 85 | l.Trace().Msg("Sending scan request") 86 | 87 | if err := t.api.Scan(scanFolder); err != nil { 88 | return err 89 | } 90 | 91 | l.Info().Msg("Scan moved to target") 92 | return nil 93 | } 94 | 95 | func (t target) getScanLibrary(folder string) (*library, error) { 96 | for _, l := range t.libraries { 97 | if strings.HasPrefix(folder, l.Path) { 98 | return &l, nil 99 | } 100 | } 101 | 102 | return nil, fmt.Errorf("%v: failed determining library", folder) 103 | } 104 | -------------------------------------------------------------------------------- /triggers/manual/manual.go: -------------------------------------------------------------------------------- 1 | package manual 2 | 3 | import ( 4 | _ "embed" 5 | "net/http" 6 | "path" 7 | "time" 8 | 9 | "github.com/rs/zerolog/hlog" 10 | 11 | "github.com/saltydk/autoscan" 12 | ) 13 | 14 | type Config struct { 15 | Rewrite []autoscan.Rewrite `yaml:"rewrite"` 16 | Priority int `yaml:"priority"` 17 | Verbosity string `yaml:"verbosity"` 18 | } 19 | 20 | var ( 21 | //go:embed "template.html" 22 | template []byte 23 | ) 24 | 25 | // New creates an autoscan-compatible HTTP Trigger for manual webhooks. 26 | func New(c Config) (autoscan.HTTPTrigger, error) { 27 | rewriter, err := autoscan.NewRewriter(c.Rewrite) 28 | if err != nil { 29 | return nil, err 30 | } 31 | 32 | trigger := func(callback autoscan.ProcessorFunc) http.Handler { 33 | return handler{ 34 | callback: callback, 35 | priority: c.Priority, 36 | rewrite: rewriter, 37 | } 38 | } 39 | 40 | return trigger, nil 41 | } 42 | 43 | type handler struct { 44 | priority int 45 | rewrite autoscan.Rewriter 46 | callback autoscan.ProcessorFunc 47 | } 48 | 49 | func (h handler) ServeHTTP(rw http.ResponseWriter, r *http.Request) { 50 | var err error 51 | rlog := hlog.FromRequest(r) 52 | 53 | query := r.URL.Query() 54 | directories := query["dir"] 55 | 56 | switch r.Method { 57 | case "GET": 58 | rw.Header().Set("Content-Type", "text/html") 59 | _, _ = rw.Write(template) 60 | return 61 | case "HEAD": 62 | rw.WriteHeader(http.StatusOK) 63 | return 64 | } 65 | 66 | if len(directories) == 0 { 67 | rlog.Error().Msg("Manual webhook should receive at least one directory") 68 | rw.WriteHeader(http.StatusBadRequest) 69 | return 70 | } 71 | 72 | rlog.Trace().Interface("dirs", directories).Msg("Received directories") 73 | 74 | scans := make([]autoscan.Scan, 0) 75 | 76 | for _, dir := range directories { 77 | // Rewrite the path based on the provided rewriter. 78 | folderPath := h.rewrite(path.Clean(dir)) 79 | 80 | scans = append(scans, autoscan.Scan{ 81 | Folder: folderPath, 82 | Priority: h.priority, 83 | Time: now(), 84 | }) 85 | } 86 | 87 | err = h.callback(scans...) 88 | if err != nil { 89 | rlog.Error().Err(err).Msg("Processor could not process scans") 90 | rw.WriteHeader(http.StatusInternalServerError) 91 | return 92 | } 93 | 94 | rw.WriteHeader(http.StatusOK) 95 | for _, scan := range scans { 96 | rlog.Info(). 97 | Str("path", scan.Folder). 98 | Msg("Scan moved to processor") 99 | } 100 | } 101 | 102 | var now = time.Now 103 | -------------------------------------------------------------------------------- /targets/autoscan/api.go: -------------------------------------------------------------------------------- 1 | package autoscan 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | "net/url" 7 | 8 | "github.com/rs/zerolog" 9 | 10 | "github.com/saltydk/autoscan" 11 | ) 12 | 13 | type apiClient struct { 14 | client *http.Client 15 | log zerolog.Logger 16 | baseURL string 17 | user string 18 | pass string 19 | } 20 | 21 | func newAPIClient(baseURL string, user string, pass string, log zerolog.Logger) apiClient { 22 | return apiClient{ 23 | client: &http.Client{}, 24 | log: log, 25 | baseURL: baseURL, 26 | user: user, 27 | pass: pass, 28 | } 29 | } 30 | 31 | func (c apiClient) do(req *http.Request) (*http.Response, error) { 32 | res, err := c.client.Do(req) 33 | if err != nil { 34 | return nil, fmt.Errorf("%v: %w", err, autoscan.ErrTargetUnavailable) 35 | } 36 | 37 | if res.StatusCode >= 200 && res.StatusCode < 300 { 38 | return res, nil 39 | } 40 | 41 | c.log.Trace(). 42 | Stringer("request_url", res.Request.URL). 43 | Int("response_status", res.StatusCode). 44 | Msg("Request failed") 45 | 46 | // statusCode not in the 2xx range, close response 47 | res.Body.Close() 48 | 49 | switch res.StatusCode { 50 | case 401: 51 | return nil, fmt.Errorf("invalid basic auth: %s: %w", res.Status, autoscan.ErrFatal) 52 | case 404, 500, 502, 503, 504: 53 | return nil, fmt.Errorf("%s: %w", res.Status, autoscan.ErrTargetUnavailable) 54 | default: 55 | return nil, fmt.Errorf("%s: %w", res.Status, autoscan.ErrFatal) 56 | } 57 | } 58 | 59 | func (c apiClient) Available() error { 60 | // create request 61 | req, err := http.NewRequest("HEAD", autoscan.JoinURL(c.baseURL, "triggers", "manual"), nil) 62 | if err != nil { 63 | return fmt.Errorf("failed creating head request: %v: %w", err, autoscan.ErrFatal) 64 | } 65 | 66 | if c.user != "" && c.pass != "" { 67 | req.SetBasicAuth(c.user, c.pass) 68 | } 69 | 70 | // send request 71 | res, err := c.do(req) 72 | if err != nil { 73 | return fmt.Errorf("availability: %w", err) 74 | } 75 | 76 | defer res.Body.Close() 77 | return nil 78 | } 79 | 80 | func (c apiClient) Scan(path string) error { 81 | // create request 82 | req, err := http.NewRequest("POST", autoscan.JoinURL(c.baseURL, "triggers", "manual"), nil) 83 | if err != nil { 84 | return fmt.Errorf("failed creating scan request: %v: %w", err, autoscan.ErrFatal) 85 | } 86 | 87 | if c.user != "" && c.pass != "" { 88 | req.SetBasicAuth(c.user, c.pass) 89 | } 90 | 91 | q := url.Values{} 92 | q.Add("dir", path) 93 | req.URL.RawQuery = q.Encode() 94 | 95 | // send request 96 | res, err := c.do(req) 97 | if err != nil { 98 | return fmt.Errorf("scan: %w", err) 99 | } 100 | 101 | defer res.Body.Close() 102 | return nil 103 | } 104 | -------------------------------------------------------------------------------- /autoscan_test.go: -------------------------------------------------------------------------------- 1 | package autoscan 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func TestRewriter(t *testing.T) { 8 | type Test struct { 9 | Name string 10 | Rewrites []Rewrite 11 | Input string 12 | Expected string 13 | } 14 | 15 | var testCases = []Test{ 16 | { 17 | Name: "One parameter with wildcard", 18 | Input: "/mnt/unionfs/Media/Movies/Example Movie/movie.mkv", 19 | Expected: "/data/Movies/Example Movie/movie.mkv", 20 | Rewrites: []Rewrite{{ 21 | From: "/mnt/unionfs/Media/", 22 | To: "/data/", 23 | }}, 24 | }, 25 | { 26 | Name: "One parameter with glob thingy", 27 | Input: "/Media/Movies/test.mkv", 28 | Expected: "/data/Movies/test.mkv", 29 | Rewrites: []Rewrite{{ 30 | From: "/Media/(.*)", 31 | To: "/data/$1", 32 | }}, 33 | }, 34 | { 35 | Name: "No wildcard", 36 | Input: "/Media/whatever", 37 | Expected: "/whatever", 38 | Rewrites: []Rewrite{{ 39 | From: "^/Media/", 40 | To: "/", 41 | }}, 42 | }, 43 | { 44 | Name: "Unicode (PAS issue #73)", 45 | Input: "/media/b33f/saitoh183/private/Videos/FrenchTV/L'échappée/Season 03", 46 | Expected: "/Videos/FrenchTV/L'échappée/Season 03", 47 | Rewrites: []Rewrite{{ 48 | From: "/media/b33f/saitoh183/private/", 49 | To: "/", 50 | }}, 51 | }, 52 | { 53 | Name: "Returns input when no rules are given", 54 | Input: "/mnt/unionfs/test/example.mp4", 55 | Expected: "/mnt/unionfs/test/example.mp4", 56 | }, 57 | { 58 | Name: "Returns input when rule does not match", 59 | Input: "/test/example.mp4", 60 | Expected: "/test/example.mp4", 61 | Rewrites: []Rewrite{{ 62 | From: "^/Media/", 63 | To: "/mnt/unionfs/Media/", 64 | }}, 65 | }, 66 | { 67 | Name: "Uses second rule if first one does not match", 68 | Input: "/test/example.mp4", 69 | Expected: "/mnt/unionfs/example.mp4", 70 | Rewrites: []Rewrite{ 71 | {From: "^/Media/", To: "/mnt/unionfs/Media/"}, 72 | {From: "^/test/", To: "/mnt/unionfs/"}, 73 | }, 74 | }, 75 | { 76 | Name: "Hotio", 77 | Input: "/movies4k/example.mp4", 78 | Expected: "/mnt/unionfs/movies4k/example.mp4", 79 | Rewrites: []Rewrite{ 80 | {From: "^/movies/", To: "/mnt/unionfs/movies/"}, 81 | {From: "^/movies4k/", To: "/mnt/unionfs/movies4k/"}, 82 | }, 83 | }, 84 | } 85 | 86 | for _, tc := range testCases { 87 | t.Run(tc.Name, func(t *testing.T) { 88 | rewriter, err := NewRewriter(tc.Rewrites) 89 | 90 | if err != nil { 91 | t.Fatal(err) 92 | } 93 | 94 | result := rewriter(tc.Input) 95 | if result != tc.Expected { 96 | t.Errorf("%s does not equal %s", result, tc.Expected) 97 | } 98 | }) 99 | } 100 | 101 | } 102 | -------------------------------------------------------------------------------- /triggers/lidarr/lidarr.go: -------------------------------------------------------------------------------- 1 | package lidarr 2 | 3 | import ( 4 | "encoding/json" 5 | "net/http" 6 | "path" 7 | "strings" 8 | "time" 9 | 10 | "github.com/rs/zerolog/hlog" 11 | 12 | "github.com/saltydk/autoscan" 13 | ) 14 | 15 | type Config struct { 16 | Name string `yaml:"name"` 17 | Priority int `yaml:"priority"` 18 | Rewrite []autoscan.Rewrite `yaml:"rewrite"` 19 | Verbosity string `yaml:"verbosity"` 20 | } 21 | 22 | // New creates an autoscan-compatible HTTP Trigger for Lidarr webhooks. 23 | func New(c Config) (autoscan.HTTPTrigger, error) { 24 | rewriter, err := autoscan.NewRewriter(c.Rewrite) 25 | if err != nil { 26 | return nil, err 27 | } 28 | 29 | trigger := func(callback autoscan.ProcessorFunc) http.Handler { 30 | return handler{ 31 | callback: callback, 32 | priority: c.Priority, 33 | rewrite: rewriter, 34 | } 35 | } 36 | 37 | return trigger, nil 38 | } 39 | 40 | type handler struct { 41 | priority int 42 | rewrite autoscan.Rewriter 43 | callback autoscan.ProcessorFunc 44 | } 45 | 46 | type lidarrEvent struct { 47 | Type string `json:"eventType"` 48 | Upgrade bool `json:"isUpgrade"` 49 | 50 | Files []struct { 51 | Path string 52 | } `json:"trackFiles"` 53 | } 54 | 55 | func (h handler) ServeHTTP(rw http.ResponseWriter, r *http.Request) { 56 | var err error 57 | l := hlog.FromRequest(r) 58 | 59 | event := new(lidarrEvent) 60 | err = json.NewDecoder(r.Body).Decode(event) 61 | if err != nil { 62 | l.Error().Err(err).Msg("Failed decoding request") 63 | rw.WriteHeader(http.StatusBadRequest) 64 | return 65 | } 66 | 67 | l.Trace().Interface("event", event).Msg("Received JSON body") 68 | 69 | if strings.EqualFold(event.Type, "Test") { 70 | l.Info().Msg("Received test event") 71 | rw.WriteHeader(http.StatusOK) 72 | return 73 | } 74 | 75 | if !strings.EqualFold(event.Type, "Download") || len(event.Files) == 0 { 76 | l.Error().Msg("Required fields are missing") 77 | rw.WriteHeader(http.StatusBadRequest) 78 | return 79 | } 80 | 81 | unique := make(map[string]bool) 82 | scans := make([]autoscan.Scan, 0) 83 | 84 | for _, f := range event.Files { 85 | folderPath := path.Dir(h.rewrite(f.Path)) 86 | if _, ok := unique[folderPath]; ok { 87 | continue 88 | } 89 | 90 | // add scan 91 | unique[folderPath] = true 92 | scans = append(scans, autoscan.Scan{ 93 | Folder: folderPath, 94 | Priority: h.priority, 95 | Time: now(), 96 | }) 97 | } 98 | 99 | err = h.callback(scans...) 100 | if err != nil { 101 | l.Error().Err(err).Msg("Processor could not process scans") 102 | rw.WriteHeader(http.StatusInternalServerError) 103 | return 104 | } 105 | 106 | rw.WriteHeader(http.StatusOK) 107 | l.Info(). 108 | Str("path", scans[0].Folder). 109 | Str("event", event.Type). 110 | Msg("Scan moved to processor") 111 | } 112 | 113 | var now = time.Now 114 | -------------------------------------------------------------------------------- /processor/processor.go: -------------------------------------------------------------------------------- 1 | package processor 2 | 3 | import ( 4 | "database/sql" 5 | "fmt" 6 | "os" 7 | "sync/atomic" 8 | "time" 9 | 10 | "github.com/saltydk/autoscan" 11 | "github.com/saltydk/autoscan/migrate" 12 | 13 | "golang.org/x/sync/errgroup" 14 | ) 15 | 16 | type Config struct { 17 | Anchors []string 18 | MinimumAge time.Duration 19 | 20 | Db *sql.DB 21 | Mg *migrate.Migrator 22 | } 23 | 24 | func New(c Config) (*Processor, error) { 25 | store, err := newDatastore(c.Db, c.Mg) 26 | if err != nil { 27 | return nil, err 28 | } 29 | 30 | proc := &Processor{ 31 | anchors: c.Anchors, 32 | minimumAge: c.MinimumAge, 33 | store: store, 34 | } 35 | return proc, nil 36 | } 37 | 38 | type Processor struct { 39 | anchors []string 40 | minimumAge time.Duration 41 | store *datastore 42 | processed int64 43 | } 44 | 45 | func (p *Processor) Add(scans ...autoscan.Scan) error { 46 | return p.store.Upsert(scans) 47 | } 48 | 49 | // ScansRemaining returns the amount of scans remaining 50 | func (p *Processor) ScansRemaining() (int, error) { 51 | return p.store.GetScansRemaining() 52 | } 53 | 54 | // ScansProcessed returns the amount of scans processed 55 | func (p *Processor) ScansProcessed() int64 { 56 | return atomic.LoadInt64(&p.processed) 57 | } 58 | 59 | // CheckAvailability checks whether all targets are available. 60 | // If one target is not available, the error will return. 61 | func (p *Processor) CheckAvailability(targets []autoscan.Target) error { 62 | g := new(errgroup.Group) 63 | 64 | for _, target := range targets { 65 | target := target 66 | g.Go(func() error { 67 | return target.Available() 68 | }) 69 | } 70 | 71 | return g.Wait() 72 | } 73 | 74 | func (p *Processor) callTargets(targets []autoscan.Target, scan autoscan.Scan) error { 75 | g := new(errgroup.Group) 76 | 77 | for _, target := range targets { 78 | target := target 79 | g.Go(func() error { 80 | return target.Scan(scan) 81 | }) 82 | } 83 | 84 | return g.Wait() 85 | } 86 | 87 | func (p *Processor) Process(targets []autoscan.Target) error { 88 | scan, err := p.store.GetAvailableScan(p.minimumAge) 89 | if err != nil { 90 | return err 91 | } 92 | 93 | // Check whether all anchors are present 94 | for _, anchor := range p.anchors { 95 | if !fileExists(anchor) { 96 | return fmt.Errorf("%s: %w", anchor, autoscan.ErrAnchorUnavailable) 97 | } 98 | } 99 | 100 | // Fatal or Target Unavailable -> return original error 101 | err = p.callTargets(targets, scan) 102 | if err != nil { 103 | return err 104 | } 105 | 106 | err = p.store.Delete(scan) 107 | if err != nil { 108 | return err 109 | } 110 | 111 | atomic.AddInt64(&p.processed, 1) 112 | return nil 113 | } 114 | 115 | var fileExists = func(fileName string) bool { 116 | info, err := os.Stat(fileName) 117 | if err != nil { 118 | return false 119 | } 120 | 121 | return !info.IsDir() 122 | } 123 | -------------------------------------------------------------------------------- /triggers/readarr/readarr_test.go: -------------------------------------------------------------------------------- 1 | package readarr 2 | 3 | import ( 4 | "errors" 5 | "net/http" 6 | "net/http/httptest" 7 | "os" 8 | "reflect" 9 | "testing" 10 | "time" 11 | 12 | "github.com/saltydk/autoscan" 13 | ) 14 | 15 | func TestHandler(t *testing.T) { 16 | type Given struct { 17 | Config Config 18 | Fixture string 19 | } 20 | 21 | type Expected struct { 22 | Scans []autoscan.Scan 23 | StatusCode int 24 | } 25 | 26 | type Test struct { 27 | Name string 28 | Given Given 29 | Expected Expected 30 | } 31 | 32 | standardConfig := Config{ 33 | Name: "readarr", 34 | Priority: 5, 35 | Rewrite: []autoscan.Rewrite{{ 36 | From: "/Books/*", 37 | To: "/mnt/unionfs/Media/Books/$1", 38 | }}, 39 | } 40 | 41 | currentTime := time.Now() 42 | now = func() time.Time { 43 | return currentTime 44 | } 45 | 46 | var testCases = []Test{ 47 | { 48 | "Scan has all the correct fields", 49 | Given{ 50 | Config: standardConfig, 51 | Fixture: "testdata/sanderson.json", 52 | }, 53 | Expected{ 54 | StatusCode: 200, 55 | Scans: []autoscan.Scan{{ 56 | Folder: "/mnt/unionfs/Media/Books/Brandon Sanderson/The Way of Kings (2010)", 57 | Priority: 5, 58 | Time: currentTime, 59 | }}, 60 | }, 61 | }, 62 | { 63 | "Returns bad request on invalid JSON", 64 | Given{ 65 | Config: standardConfig, 66 | Fixture: "testdata/invalid.json", 67 | }, 68 | Expected{ 69 | StatusCode: 400, 70 | }, 71 | }, 72 | { 73 | "Returns 200 on Test event without emitting a scan", 74 | Given{ 75 | Config: standardConfig, 76 | Fixture: "testdata/test.json", 77 | }, 78 | Expected{ 79 | StatusCode: 200, 80 | }, 81 | }, 82 | } 83 | 84 | for _, tc := range testCases { 85 | t.Run(tc.Name, func(t *testing.T) { 86 | callback := func(scans ...autoscan.Scan) error { 87 | if !reflect.DeepEqual(tc.Expected.Scans, scans) { 88 | t.Logf("want: %v", tc.Expected.Scans) 89 | t.Logf("got: %v", scans) 90 | t.Errorf("Scans do not equal") 91 | return errors.New("Scans do not equal") 92 | } 93 | 94 | return nil 95 | } 96 | 97 | trigger, err := New(tc.Given.Config) 98 | if err != nil { 99 | t.Fatalf("Could not create Readarr Trigger: %v", err) 100 | } 101 | 102 | server := httptest.NewServer(trigger(callback)) 103 | defer server.Close() 104 | 105 | request, err := os.Open(tc.Given.Fixture) 106 | if err != nil { 107 | t.Fatalf("Could not open the fixture: %s", tc.Given.Fixture) 108 | } 109 | 110 | res, err := http.Post(server.URL, "application/json", request) 111 | if err != nil { 112 | t.Fatalf("Request failed: %v", err) 113 | } 114 | 115 | defer res.Body.Close() 116 | if res.StatusCode != tc.Expected.StatusCode { 117 | t.Errorf("Status codes do not match: %d vs %d", res.StatusCode, tc.Expected.StatusCode) 118 | } 119 | }) 120 | } 121 | } 122 | -------------------------------------------------------------------------------- /triggers/bernard/postprocess.go: -------------------------------------------------------------------------------- 1 | package bernard 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/l3uddz/bernard" 7 | "github.com/l3uddz/bernard/datastore" 8 | "github.com/l3uddz/bernard/datastore/sqlite" 9 | ) 10 | 11 | func NewPostProcessBernardDiff(driveID string, store *bds, diff *sqlite.Difference) bernard.Hook { 12 | hook := func(drive datastore.Drive, files []datastore.File, folders []datastore.Folder, removed []string) error { 13 | // dont include removes for files already known as trashed 14 | for i := 0; i < len(diff.RemovedFiles); i++ { 15 | df := diff.RemovedFiles[i] 16 | 17 | ef, err := store.GetFile(driveID, df.ID) 18 | if err != nil { 19 | return fmt.Errorf("retrieving file (id: %v): %w", df.ID, err) 20 | } 21 | 22 | switch { 23 | case ef.Trashed && df.Trashed: 24 | // this removed file was already known as trashed (removed to us) 25 | diff.RemovedFiles = append(diff.RemovedFiles[:i], diff.RemovedFiles[i+1:]...) 26 | i-- 27 | } 28 | } 29 | 30 | // dont include removes for folders already known as trashed 31 | for i := 0; i < len(diff.RemovedFolders); i++ { 32 | df := diff.RemovedFolders[i] 33 | 34 | ef, err := store.GetFolder(driveID, df.ID) 35 | if err != nil { 36 | return fmt.Errorf("retrieving folder (id: %v): %w", df.ID, err) 37 | } 38 | 39 | switch { 40 | case ef.Trashed && df.Trashed: 41 | // this removed folder was already known as trashed (removed to us) 42 | diff.RemovedFolders = append(diff.RemovedFolders[:i], diff.RemovedFolders[i+1:]...) 43 | i-- 44 | } 45 | } 46 | 47 | // remove changed files that were trashed or un-trashed 48 | for i := 0; i < len(diff.ChangedFiles); i++ { 49 | df := diff.ChangedFiles[i].New 50 | ef := diff.ChangedFiles[i].Old 51 | 52 | switch { 53 | case ef.Trashed && !df.Trashed: 54 | // existing state was trashed, but new state is not 55 | diff.AddedFiles = append(diff.AddedFiles, df) 56 | diff.ChangedFiles = append(diff.ChangedFiles[:i], diff.ChangedFiles[i+1:]...) 57 | i-- 58 | case !ef.Trashed && df.Trashed: 59 | // new state is trashed, existing state is not 60 | diff.RemovedFiles = append(diff.RemovedFiles, df) 61 | diff.ChangedFiles = append(diff.ChangedFiles[:i], diff.ChangedFiles[i+1:]...) 62 | i-- 63 | } 64 | } 65 | 66 | for i := 0; i < len(diff.ChangedFolders); i++ { 67 | df := diff.ChangedFolders[i].New 68 | ef := diff.ChangedFolders[i].Old 69 | 70 | switch { 71 | case ef.Trashed && !df.Trashed: 72 | // existing state was trashed, but new state is not 73 | diff.AddedFolders = append(diff.AddedFolders, df) 74 | diff.ChangedFolders = append(diff.ChangedFolders[:i], diff.ChangedFolders[i+1:]...) 75 | i-- 76 | case !ef.Trashed && df.Trashed: 77 | // new state is trashed, existing state is not 78 | diff.RemovedFolders = append(diff.RemovedFolders, df) 79 | diff.ChangedFolders = append(diff.ChangedFolders[:i], diff.ChangedFolders[i+1:]...) 80 | i-- 81 | } 82 | } 83 | 84 | return nil 85 | } 86 | 87 | return hook 88 | } 89 | -------------------------------------------------------------------------------- /triggers/readarr/readarr.go: -------------------------------------------------------------------------------- 1 | package readarr 2 | 3 | import ( 4 | "encoding/json" 5 | "net/http" 6 | "path" 7 | "strings" 8 | "time" 9 | 10 | "github.com/rs/zerolog/hlog" 11 | 12 | "github.com/saltydk/autoscan" 13 | ) 14 | 15 | type Config struct { 16 | Name string `yaml:"name"` 17 | Priority int `yaml:"priority"` 18 | Rewrite []autoscan.Rewrite `yaml:"rewrite"` 19 | Verbosity string `yaml:"verbosity"` 20 | } 21 | 22 | // New creates an autoscan-compatible HTTP Trigger for Readarr webhooks. 23 | func New(c Config) (autoscan.HTTPTrigger, error) { 24 | rewriter, err := autoscan.NewRewriter(c.Rewrite) 25 | if err != nil { 26 | return nil, err 27 | } 28 | 29 | trigger := func(callback autoscan.ProcessorFunc) http.Handler { 30 | return handler{ 31 | callback: callback, 32 | priority: c.Priority, 33 | rewrite: rewriter, 34 | } 35 | } 36 | 37 | return trigger, nil 38 | } 39 | 40 | type handler struct { 41 | priority int 42 | rewrite autoscan.Rewriter 43 | callback autoscan.ProcessorFunc 44 | } 45 | 46 | type readarrEvent struct { 47 | Type string `json:"eventType"` 48 | Upgrade bool `json:"isUpgrade"` 49 | 50 | Files []struct { 51 | Path string 52 | } `json:"bookFiles"` 53 | } 54 | 55 | func (h handler) ServeHTTP(rw http.ResponseWriter, r *http.Request) { 56 | var err error 57 | l := hlog.FromRequest(r) 58 | 59 | event := new(readarrEvent) 60 | err = json.NewDecoder(r.Body).Decode(event) 61 | if err != nil { 62 | l.Error().Err(err).Msg("Failed decoding request") 63 | rw.WriteHeader(http.StatusBadRequest) 64 | return 65 | } 66 | 67 | l.Trace().Interface("event", event).Msg("Received JSON body") 68 | 69 | if strings.EqualFold(event.Type, "Test") { 70 | l.Info().Msg("Received test event") 71 | rw.WriteHeader(http.StatusOK) 72 | return 73 | } 74 | 75 | //Only handle test and download. Everything else is ignored. 76 | if !strings.EqualFold(event.Type, "Download") || len(event.Files) == 0 { 77 | l.Error().Msg("Required fields are missing") 78 | rw.WriteHeader(http.StatusBadRequest) 79 | return 80 | } 81 | 82 | unique := make(map[string]bool) 83 | scans := make([]autoscan.Scan, 0) 84 | 85 | for _, f := range event.Files { 86 | folderPath := path.Dir(h.rewrite(f.Path)) 87 | if _, ok := unique[folderPath]; ok { 88 | continue 89 | } 90 | 91 | // add scan 92 | unique[folderPath] = true 93 | scans = append(scans, autoscan.Scan{ 94 | Folder: folderPath, 95 | Priority: h.priority, 96 | Time: now(), 97 | }) 98 | } 99 | 100 | err = h.callback(scans...) 101 | if err != nil { 102 | l.Error().Err(err).Msg("Processor could not process scans") 103 | rw.WriteHeader(http.StatusInternalServerError) 104 | return 105 | } 106 | 107 | rw.WriteHeader(http.StatusOK) 108 | l.Info(). 109 | Str("path", scans[0].Folder). 110 | Str("event", event.Type). 111 | Msg("Scan moved to processor") 112 | } 113 | 114 | var now = time.Now 115 | -------------------------------------------------------------------------------- /triggers/manual/manual_test.go: -------------------------------------------------------------------------------- 1 | package manual 2 | 3 | import ( 4 | "errors" 5 | "net/http" 6 | "net/http/httptest" 7 | "net/url" 8 | "reflect" 9 | "testing" 10 | "time" 11 | 12 | "github.com/saltydk/autoscan" 13 | ) 14 | 15 | func TestHandler(t *testing.T) { 16 | type Given struct { 17 | Config Config 18 | Query url.Values 19 | } 20 | 21 | type Expected struct { 22 | Scans []autoscan.Scan 23 | StatusCode int 24 | } 25 | 26 | type Test struct { 27 | Name string 28 | Given Given 29 | Expected Expected 30 | } 31 | 32 | standardConfig := Config{ 33 | Priority: 5, 34 | Rewrite: []autoscan.Rewrite{{ 35 | From: "/Movies/*", 36 | To: "/mnt/unionfs/Media/Movies/$1", 37 | }}, 38 | } 39 | 40 | currentTime := time.Now() 41 | now = func() time.Time { 42 | return currentTime 43 | } 44 | 45 | var testCases = []Test{ 46 | { 47 | "Returns bad request when no directories are given", 48 | Given{ 49 | Config: standardConfig, 50 | Query: url.Values{ 51 | "dir": []string{}, 52 | }, 53 | }, 54 | Expected{ 55 | StatusCode: 400, 56 | }, 57 | }, 58 | { 59 | "Returns 200 when given multiple directories", 60 | Given{ 61 | Config: standardConfig, 62 | Query: url.Values{ 63 | "dir": []string{ 64 | "/Movies/Interstellar (2014)", 65 | "/Movies/Parasite (2019)", 66 | }, 67 | }, 68 | }, 69 | Expected{ 70 | StatusCode: 200, 71 | Scans: []autoscan.Scan{ 72 | { 73 | Folder: "/mnt/unionfs/Media/Movies/Interstellar (2014)", 74 | Priority: 5, 75 | Time: currentTime, 76 | }, 77 | { 78 | Folder: "/mnt/unionfs/Media/Movies/Parasite (2019)", 79 | Priority: 5, 80 | Time: currentTime, 81 | }, 82 | }, 83 | }, 84 | }, 85 | } 86 | 87 | for _, tc := range testCases { 88 | t.Run(tc.Name, func(t *testing.T) { 89 | callback := func(scans ...autoscan.Scan) error { 90 | if !reflect.DeepEqual(tc.Expected.Scans, scans) { 91 | t.Logf("want: %v", tc.Expected.Scans) 92 | t.Logf("got: %v", scans) 93 | t.Errorf("Scans do not equal") 94 | return errors.New("Scans do not equal") 95 | } 96 | 97 | return nil 98 | } 99 | 100 | trigger, err := New(tc.Given.Config) 101 | if err != nil { 102 | t.Fatalf("Could not create Manual Trigger: %v", err) 103 | } 104 | 105 | server := httptest.NewServer(trigger(callback)) 106 | defer server.Close() 107 | 108 | req, err := http.NewRequest("POST", server.URL, nil) 109 | if err != nil { 110 | t.Fatalf("Failed creating request: %v", err) 111 | } 112 | 113 | req.URL.RawQuery = tc.Given.Query.Encode() 114 | 115 | res, err := http.DefaultClient.Do(req) 116 | if err != nil { 117 | t.Fatalf("Request failed: %v", err) 118 | } 119 | 120 | defer res.Body.Close() 121 | if res.StatusCode != tc.Expected.StatusCode { 122 | t.Errorf("Status codes do not match: %d vs %d", res.StatusCode, tc.Expected.StatusCode) 123 | } 124 | }) 125 | } 126 | } 127 | -------------------------------------------------------------------------------- /triggers/radarr/radarr.go: -------------------------------------------------------------------------------- 1 | package radarr 2 | 3 | import ( 4 | "encoding/json" 5 | "net/http" 6 | "path" 7 | "strings" 8 | "time" 9 | 10 | "github.com/rs/zerolog/hlog" 11 | 12 | "github.com/saltydk/autoscan" 13 | ) 14 | 15 | type Config struct { 16 | Name string `yaml:"name"` 17 | Priority int `yaml:"priority"` 18 | Rewrite []autoscan.Rewrite `yaml:"rewrite"` 19 | Verbosity string `yaml:"verbosity"` 20 | } 21 | 22 | // New creates an autoscan-compatible HTTP Trigger for Radarr webhooks. 23 | func New(c Config) (autoscan.HTTPTrigger, error) { 24 | rewriter, err := autoscan.NewRewriter(c.Rewrite) 25 | if err != nil { 26 | return nil, err 27 | } 28 | 29 | trigger := func(callback autoscan.ProcessorFunc) http.Handler { 30 | return handler{ 31 | callback: callback, 32 | priority: c.Priority, 33 | rewrite: rewriter, 34 | } 35 | } 36 | 37 | return trigger, nil 38 | } 39 | 40 | type handler struct { 41 | priority int 42 | rewrite autoscan.Rewriter 43 | callback autoscan.ProcessorFunc 44 | } 45 | 46 | type radarrEvent struct { 47 | Type string `json:"eventType"` 48 | 49 | File struct { 50 | RelativePath string 51 | } `json:"movieFile"` 52 | 53 | Movie struct { 54 | FolderPath string 55 | } `json:"movie"` 56 | } 57 | 58 | func (h handler) ServeHTTP(rw http.ResponseWriter, r *http.Request) { 59 | var err error 60 | rlog := hlog.FromRequest(r) 61 | 62 | event := new(radarrEvent) 63 | err = json.NewDecoder(r.Body).Decode(event) 64 | if err != nil { 65 | rlog.Error().Err(err).Msg("Failed decoding request") 66 | rw.WriteHeader(http.StatusBadRequest) 67 | return 68 | } 69 | 70 | rlog.Trace().Interface("event", event).Msg("Received JSON body") 71 | 72 | if strings.EqualFold(event.Type, "Test") { 73 | rlog.Info().Msg("Received test event") 74 | rw.WriteHeader(http.StatusOK) 75 | return 76 | } 77 | 78 | var folderPath string 79 | 80 | if strings.EqualFold(event.Type, "Download") || strings.EqualFold(event.Type, "MovieFileDelete") { 81 | if event.File.RelativePath == "" || event.Movie.FolderPath == "" { 82 | rlog.Error().Msg("Required fields are missing") 83 | rw.WriteHeader(http.StatusBadRequest) 84 | return 85 | } 86 | 87 | folderPath = path.Dir(path.Join(event.Movie.FolderPath, event.File.RelativePath)) 88 | } 89 | 90 | if strings.EqualFold(event.Type, "MovieDelete") || strings.EqualFold(event.Type, "Rename") { 91 | if event.Movie.FolderPath == "" { 92 | rlog.Error().Msg("Required fields are missing") 93 | rw.WriteHeader(http.StatusBadRequest) 94 | return 95 | } 96 | 97 | folderPath = event.Movie.FolderPath 98 | } 99 | 100 | scan := autoscan.Scan{ 101 | Folder: h.rewrite(folderPath), 102 | Priority: h.priority, 103 | Time: now(), 104 | } 105 | 106 | err = h.callback(scan) 107 | if err != nil { 108 | rlog.Error().Err(err).Msg("Processor could not process scan") 109 | rw.WriteHeader(http.StatusInternalServerError) 110 | return 111 | } 112 | 113 | rlog.Info(). 114 | Str("path", folderPath). 115 | Str("event", event.Type). 116 | Msg("Scan moved to processor") 117 | 118 | rw.WriteHeader(http.StatusOK) 119 | } 120 | 121 | var now = time.Now 122 | -------------------------------------------------------------------------------- /triggers/a_train/a_train.go: -------------------------------------------------------------------------------- 1 | package a_train 2 | 3 | import ( 4 | "encoding/json" 5 | "net/http" 6 | "time" 7 | 8 | "github.com/go-chi/chi/v5" 9 | "github.com/rs/zerolog/hlog" 10 | 11 | "github.com/saltydk/autoscan" 12 | ) 13 | 14 | type Drive struct { 15 | ID string `yaml:"id"` 16 | Rewrite []autoscan.Rewrite `yaml:"rewrite"` 17 | } 18 | 19 | type Config struct { 20 | Drives []Drive `yaml:"drives"` 21 | Priority int `yaml:"priority"` 22 | Rewrite []autoscan.Rewrite `yaml:"rewrite"` 23 | Verbosity string `yaml:"verbosity"` 24 | } 25 | 26 | type ATrainRewriter = func(drive string, input string) string 27 | 28 | // // New creates an autoscan-compatible HTTP Trigger for A-Train webhooks. 29 | func New(c Config) (autoscan.HTTPTrigger, error) { 30 | rewrites := make(map[string]autoscan.Rewriter) 31 | for _, drive := range c.Drives { 32 | rewriter, err := autoscan.NewRewriter(append(drive.Rewrite, c.Rewrite...)) 33 | if err != nil { 34 | return nil, err 35 | } 36 | 37 | rewrites[drive.ID] = rewriter 38 | } 39 | 40 | globalRewriter, err := autoscan.NewRewriter(c.Rewrite) 41 | if err != nil { 42 | return nil, err 43 | } 44 | 45 | rewriter := func(drive string, input string) string { 46 | driveRewriter, ok := rewrites[drive] 47 | if !ok { 48 | return globalRewriter(input) 49 | } 50 | 51 | return driveRewriter(input) 52 | } 53 | 54 | trigger := func(callback autoscan.ProcessorFunc) http.Handler { 55 | return handler{ 56 | callback: callback, 57 | priority: c.Priority, 58 | rewrite: rewriter, 59 | } 60 | } 61 | 62 | return trigger, nil 63 | } 64 | 65 | type handler struct { 66 | priority int 67 | rewrite ATrainRewriter 68 | callback autoscan.ProcessorFunc 69 | } 70 | 71 | type atrainEvent struct { 72 | Created []string 73 | Deleted []string 74 | } 75 | 76 | func (h handler) ServeHTTP(rw http.ResponseWriter, r *http.Request) { 77 | var err error 78 | rlog := hlog.FromRequest(r) 79 | 80 | drive := chi.URLParam(r, "drive") 81 | 82 | event := new(atrainEvent) 83 | err = json.NewDecoder(r.Body).Decode(event) 84 | if err != nil { 85 | rlog.Error().Err(err).Msg("Failed decoding request") 86 | rw.WriteHeader(http.StatusBadRequest) 87 | return 88 | } 89 | 90 | rlog.Trace().Interface("event", event).Msg("Received JSON body") 91 | 92 | scans := make([]autoscan.Scan, 0) 93 | 94 | for _, path := range event.Created { 95 | scans = append(scans, autoscan.Scan{ 96 | Folder: h.rewrite(drive, path), 97 | Priority: h.priority, 98 | Time: now(), 99 | }) 100 | } 101 | 102 | for _, path := range event.Deleted { 103 | scans = append(scans, autoscan.Scan{ 104 | Folder: h.rewrite(drive, path), 105 | Priority: h.priority, 106 | Time: now(), 107 | }) 108 | } 109 | 110 | err = h.callback(scans...) 111 | if err != nil { 112 | rlog.Error().Err(err).Msg("Processor could not process scans") 113 | rw.WriteHeader(http.StatusInternalServerError) 114 | return 115 | } 116 | 117 | for _, scan := range scans { 118 | rlog.Info().Str("path", scan.Folder).Msg("Scan moved to processor") 119 | } 120 | 121 | rw.WriteHeader(http.StatusOK) 122 | } 123 | 124 | var now = time.Now 125 | -------------------------------------------------------------------------------- /targets/plex/plex.go: -------------------------------------------------------------------------------- 1 | package plex 2 | 3 | import ( 4 | "fmt" 5 | "strconv" 6 | "strings" 7 | 8 | "github.com/rs/zerolog" 9 | 10 | "github.com/saltydk/autoscan" 11 | ) 12 | 13 | type Config struct { 14 | URL string `yaml:"url"` 15 | Token string `yaml:"token"` 16 | Rewrite []autoscan.Rewrite `yaml:"rewrite"` 17 | Verbosity string `yaml:"verbosity"` 18 | } 19 | 20 | type target struct { 21 | url string 22 | token string 23 | libraries []library 24 | 25 | log zerolog.Logger 26 | rewrite autoscan.Rewriter 27 | api *apiClient 28 | } 29 | 30 | func New(c Config) (autoscan.Target, error) { 31 | l := autoscan.GetLogger(c.Verbosity).With(). 32 | Str("target", "plex"). 33 | Str("url", c.URL).Logger() 34 | 35 | rewriter, err := autoscan.NewRewriter(c.Rewrite) 36 | if err != nil { 37 | return nil, err 38 | } 39 | 40 | api := newAPIClient(c.URL, c.Token, l) 41 | 42 | version, err := api.Version() 43 | if err != nil { 44 | return nil, err 45 | } 46 | 47 | l.Debug().Msgf("Plex version: %s", version) 48 | if !isSupportedVersion(version) { 49 | return nil, fmt.Errorf("plex running unsupported version %s: %w", version, autoscan.ErrFatal) 50 | } 51 | 52 | libraries, err := api.Libraries() 53 | if err != nil { 54 | return nil, err 55 | } 56 | 57 | l.Debug(). 58 | Interface("libraries", libraries). 59 | Msg("Retrieved libraries") 60 | 61 | return &target{ 62 | url: c.URL, 63 | token: c.Token, 64 | libraries: libraries, 65 | 66 | log: l, 67 | rewrite: rewriter, 68 | api: api, 69 | }, nil 70 | } 71 | 72 | func (t target) Available() error { 73 | _, err := t.api.Version() 74 | return err 75 | } 76 | 77 | func (t target) Scan(scan autoscan.Scan) error { 78 | // determine library for this scan 79 | scanFolder := t.rewrite(scan.Folder) 80 | 81 | libs, err := t.getScanLibrary(scanFolder) 82 | if err != nil { 83 | t.log.Warn(). 84 | Err(err). 85 | Msg("No target libraries found") 86 | 87 | return nil 88 | } 89 | 90 | // send scan request 91 | for _, lib := range libs { 92 | l := t.log.With(). 93 | Str("path", scanFolder). 94 | Str("library", lib.Name). 95 | Logger() 96 | 97 | l.Trace().Msg("Sending scan request") 98 | 99 | if err := t.api.Scan(scanFolder, lib.ID); err != nil { 100 | return err 101 | } 102 | 103 | l.Info().Msg("Scan moved to target") 104 | } 105 | 106 | return nil 107 | } 108 | 109 | func (t target) getScanLibrary(folder string) ([]library, error) { 110 | libraries := make([]library, 0) 111 | 112 | for _, l := range t.libraries { 113 | if strings.HasPrefix(folder, l.Path) { 114 | libraries = append(libraries, l) 115 | } 116 | } 117 | 118 | if len(libraries) == 0 { 119 | return nil, fmt.Errorf("%v: failed determining libraries", folder) 120 | } 121 | 122 | return libraries, nil 123 | } 124 | 125 | func isSupportedVersion(version string) bool { 126 | parts := strings.Split(version, ".") 127 | if len(parts) < 2 { 128 | return false 129 | } 130 | 131 | major, _ := strconv.Atoi(parts[0]) 132 | minor, _ := strconv.Atoi(parts[1]) 133 | 134 | if major >= 2 || (major == 1 && minor >= 20) { 135 | return true 136 | } 137 | 138 | return false 139 | } 140 | -------------------------------------------------------------------------------- /migrate/util.go: -------------------------------------------------------------------------------- 1 | package migrate 2 | 3 | import ( 4 | "database/sql" 5 | "embed" 6 | "fmt" 7 | "path/filepath" 8 | "sort" 9 | ) 10 | 11 | type migration struct { 12 | Version int `regroup:"Version"` 13 | Name string `regroup:"Name"` 14 | Filename string 15 | Schema string 16 | } 17 | 18 | func (m *Migrator) verify() error { 19 | if _, err := m.db.Exec(sqlSchema); err != nil { 20 | return fmt.Errorf("schema: %w", err) 21 | } 22 | return nil 23 | } 24 | 25 | func (m *Migrator) versions(component string) (map[int]bool, error) { 26 | rows, err := m.db.Query(sqlVersions, component) 27 | if err != nil { 28 | return nil, fmt.Errorf("query: %w", err) 29 | } 30 | defer rows.Close() 31 | 32 | versions := make(map[int]bool, 0) 33 | for rows.Next() { 34 | var version int 35 | if err := rows.Scan(&version); err != nil { 36 | return nil, fmt.Errorf("scan: %w", err) 37 | } 38 | 39 | versions[version] = true 40 | } 41 | 42 | return versions, nil 43 | } 44 | 45 | func (m *Migrator) exec(component string, migration *migration) (err error) { 46 | // begin tx 47 | tx, err := m.db.Begin() 48 | if err != nil { 49 | return fmt.Errorf("begin: %w", err) 50 | } 51 | 52 | // commit - rollback 53 | defer func(tx *sql.Tx) { 54 | // roll back 55 | if err != nil { 56 | if errRb := tx.Rollback(); errRb != nil { 57 | err = fmt.Errorf("rollback: %v: %w", errRb, err) 58 | } 59 | return 60 | } 61 | 62 | // commit 63 | if errCm := tx.Commit(); err != nil { 64 | err = fmt.Errorf("commit: %w", errCm) 65 | } 66 | }(tx) 67 | 68 | // exec migration 69 | if _, err := tx.Exec(migration.Schema); err != nil { 70 | return fmt.Errorf("exec: %w", err) 71 | } 72 | 73 | // insert migration version 74 | if _, err := tx.Exec(sqlInsertVersion, component, migration.Version); err != nil { 75 | return fmt.Errorf("schema_migration: %w", err) 76 | } 77 | 78 | return nil 79 | } 80 | 81 | func (m *Migrator) parse(fs *embed.FS) ([]*migration, error) { 82 | // parse migrations from filesystem 83 | files, err := fs.ReadDir(m.dir) 84 | if err != nil { 85 | return nil, fmt.Errorf("read migrations: %w", err) 86 | } 87 | 88 | // parse migrations 89 | migrations := make([]*migration, 0) 90 | for _, f := range files { 91 | // skip dirs 92 | if f.IsDir() { 93 | continue 94 | } 95 | 96 | // parse migration 97 | md := new(migration) 98 | if err := m.re.MatchToTarget(f.Name(), md); err != nil { 99 | return nil, fmt.Errorf("parse migration: %w", err) 100 | } 101 | 102 | b, err := fs.ReadFile(filepath.Join(m.dir, f.Name())) 103 | if err != nil { 104 | return nil, fmt.Errorf("read migration: %w", err) 105 | } 106 | md.Schema = string(b) 107 | md.Filename = f.Name() 108 | 109 | // set migration 110 | migrations = append(migrations, md) 111 | } 112 | 113 | // sort migrations 114 | sort.Slice(migrations, func(i, j int) bool { 115 | return migrations[i].Version < migrations[j].Version 116 | }) 117 | 118 | return migrations, nil 119 | } 120 | 121 | const ( 122 | sqlSchema = `CREATE TABLE IF NOT EXISTS schema_migration (component VARCHAR(255) NOT NULL, version INTEGER NOT NULL, PRIMARY KEY (component, version))` 123 | sqlVersions = `SELECT version FROM schema_migration WHERE component = ?` 124 | sqlInsertVersion = `INSERT INTO schema_migration (component, version) VALUES (?, ?)` 125 | ) 126 | -------------------------------------------------------------------------------- /triggers/lidarr/lidarr_test.go: -------------------------------------------------------------------------------- 1 | package lidarr 2 | 3 | import ( 4 | "errors" 5 | "net/http" 6 | "net/http/httptest" 7 | "os" 8 | "reflect" 9 | "testing" 10 | "time" 11 | 12 | "github.com/saltydk/autoscan" 13 | ) 14 | 15 | func TestHandler(t *testing.T) { 16 | type Given struct { 17 | Config Config 18 | Fixture string 19 | } 20 | 21 | type Expected struct { 22 | Scans []autoscan.Scan 23 | StatusCode int 24 | } 25 | 26 | type Test struct { 27 | Name string 28 | Given Given 29 | Expected Expected 30 | } 31 | 32 | standardConfig := Config{ 33 | Name: "lidarr", 34 | Priority: 5, 35 | Rewrite: []autoscan.Rewrite{{ 36 | From: "/Music/*", 37 | To: "/mnt/unionfs/Media/Music/$1", 38 | }}, 39 | } 40 | 41 | currentTime := time.Now() 42 | now = func() time.Time { 43 | return currentTime 44 | } 45 | 46 | var testCases = []Test{ 47 | { 48 | "Scan has all the correct fields", 49 | Given{ 50 | Config: standardConfig, 51 | Fixture: "testdata/marshmello.json", 52 | }, 53 | Expected{ 54 | StatusCode: 200, 55 | Scans: []autoscan.Scan{{ 56 | Folder: "/mnt/unionfs/Media/Music/Marshmello/Joytime III (2019)", 57 | Priority: 5, 58 | Time: currentTime, 59 | }}, 60 | }, 61 | }, 62 | { 63 | "Multiple folders", 64 | Given{ 65 | Config: standardConfig, 66 | Fixture: "testdata/blink-182.json", 67 | }, 68 | Expected{ 69 | StatusCode: 200, 70 | Scans: []autoscan.Scan{ 71 | { 72 | Folder: "/mnt/unionfs/Media/Music/blink‐182/California (2016)/CD 01", 73 | Priority: 5, 74 | Time: currentTime, 75 | }, 76 | { 77 | Folder: "/mnt/unionfs/Media/Music/blink‐182/California (2016)/CD 02", 78 | Priority: 5, 79 | Time: currentTime, 80 | }}, 81 | }, 82 | }, 83 | { 84 | "Returns bad request on invalid JSON", 85 | Given{ 86 | Config: standardConfig, 87 | Fixture: "testdata/invalid.json", 88 | }, 89 | Expected{ 90 | StatusCode: 400, 91 | }, 92 | }, 93 | { 94 | "Returns 200 on Test event without emitting a scan", 95 | Given{ 96 | Config: standardConfig, 97 | Fixture: "testdata/test.json", 98 | }, 99 | Expected{ 100 | StatusCode: 200, 101 | }, 102 | }, 103 | } 104 | 105 | for _, tc := range testCases { 106 | t.Run(tc.Name, func(t *testing.T) { 107 | callback := func(scans ...autoscan.Scan) error { 108 | if !reflect.DeepEqual(tc.Expected.Scans, scans) { 109 | t.Logf("want: %v", tc.Expected.Scans) 110 | t.Logf("got: %v", scans) 111 | t.Errorf("Scans do not equal") 112 | return errors.New("Scans do not equal") 113 | } 114 | 115 | return nil 116 | } 117 | 118 | trigger, err := New(tc.Given.Config) 119 | if err != nil { 120 | t.Fatalf("Could not create Lidarr Trigger: %v", err) 121 | } 122 | 123 | server := httptest.NewServer(trigger(callback)) 124 | defer server.Close() 125 | 126 | request, err := os.Open(tc.Given.Fixture) 127 | if err != nil { 128 | t.Fatalf("Could not open the fixture: %s", tc.Given.Fixture) 129 | } 130 | 131 | res, err := http.Post(server.URL, "application/json", request) 132 | if err != nil { 133 | t.Fatalf("Request failed: %v", err) 134 | } 135 | 136 | defer res.Body.Close() 137 | if res.StatusCode != tc.Expected.StatusCode { 138 | t.Errorf("Status codes do not match: %d vs %d", res.StatusCode, tc.Expected.StatusCode) 139 | } 140 | }) 141 | } 142 | } 143 | -------------------------------------------------------------------------------- /processor/datastore.go: -------------------------------------------------------------------------------- 1 | package processor 2 | 3 | import ( 4 | "database/sql" 5 | "embed" 6 | "errors" 7 | "fmt" 8 | "time" 9 | 10 | "github.com/saltydk/autoscan" 11 | "github.com/saltydk/autoscan/migrate" 12 | 13 | // sqlite3 driver 14 | _ "modernc.org/sqlite" 15 | ) 16 | 17 | type datastore struct { 18 | *sql.DB 19 | } 20 | 21 | var ( 22 | //go:embed migrations 23 | migrations embed.FS 24 | ) 25 | 26 | func newDatastore(db *sql.DB, mg *migrate.Migrator) (*datastore, error) { 27 | // migrations 28 | if err := mg.Migrate(&migrations, "processor"); err != nil { 29 | return nil, fmt.Errorf("migrate: %w", err) 30 | } 31 | 32 | return &datastore{db}, nil 33 | } 34 | 35 | const sqlUpsert = ` 36 | INSERT INTO scan (folder, priority, time) 37 | VALUES (?, ?, ?) 38 | ON CONFLICT (folder) DO UPDATE SET 39 | priority = MAX(excluded.priority, scan.priority), 40 | time = excluded.time 41 | ` 42 | 43 | func (store *datastore) upsert(tx *sql.Tx, scan autoscan.Scan) error { 44 | _, err := tx.Exec(sqlUpsert, scan.Folder, scan.Priority, scan.Time) 45 | return err 46 | } 47 | 48 | func (store *datastore) Upsert(scans []autoscan.Scan) error { 49 | tx, err := store.Begin() 50 | if err != nil { 51 | return err 52 | } 53 | 54 | for _, scan := range scans { 55 | if err = store.upsert(tx, scan); err != nil { 56 | if rollbackErr := tx.Rollback(); rollbackErr != nil { 57 | panic(rollbackErr) 58 | } 59 | 60 | return err 61 | } 62 | } 63 | 64 | return tx.Commit() 65 | } 66 | 67 | const sqlGetScansRemaining = `SELECT COUNT(folder) FROM scan` 68 | 69 | func (store *datastore) GetScansRemaining() (int, error) { 70 | row := store.QueryRow(sqlGetScansRemaining) 71 | 72 | remaining := 0 73 | err := row.Scan(&remaining) 74 | switch { 75 | case errors.Is(err, sql.ErrNoRows): 76 | return remaining, nil 77 | case err != nil: 78 | return remaining, fmt.Errorf("get remaining scans: %v: %w", err, autoscan.ErrFatal) 79 | } 80 | 81 | return remaining, nil 82 | } 83 | 84 | const sqlGetAvailableScan = ` 85 | SELECT folder, priority, time FROM scan 86 | WHERE time < ? 87 | ORDER BY priority DESC, time ASC 88 | LIMIT 1 89 | ` 90 | 91 | func (store *datastore) GetAvailableScan(minAge time.Duration) (autoscan.Scan, error) { 92 | row := store.QueryRow(sqlGetAvailableScan, now().Add(-1*minAge)) 93 | 94 | scan := autoscan.Scan{} 95 | err := row.Scan(&scan.Folder, &scan.Priority, &scan.Time) 96 | switch { 97 | case errors.Is(err, sql.ErrNoRows): 98 | return scan, autoscan.ErrNoScans 99 | case err != nil: 100 | return scan, fmt.Errorf("get matching: %s: %w", err, autoscan.ErrFatal) 101 | } 102 | 103 | return scan, nil 104 | } 105 | 106 | const sqlGetAll = ` 107 | SELECT folder, priority, time FROM scan 108 | ` 109 | 110 | func (store *datastore) GetAll() (scans []autoscan.Scan, err error) { 111 | rows, err := store.Query(sqlGetAll) 112 | if err != nil { 113 | return scans, err 114 | } 115 | 116 | defer rows.Close() 117 | for rows.Next() { 118 | scan := autoscan.Scan{} 119 | err = rows.Scan(&scan.Folder, &scan.Priority, &scan.Time) 120 | if err != nil { 121 | return scans, err 122 | } 123 | 124 | scans = append(scans, scan) 125 | } 126 | 127 | return scans, rows.Err() 128 | } 129 | 130 | const sqlDelete = ` 131 | DELETE FROM scan WHERE folder=? 132 | ` 133 | 134 | func (store *datastore) Delete(scan autoscan.Scan) error { 135 | _, err := store.Exec(sqlDelete, scan.Folder) 136 | if err != nil { 137 | return fmt.Errorf("delete: %s: %w", err, autoscan.ErrFatal) 138 | } 139 | 140 | return nil 141 | } 142 | 143 | var now = time.Now 144 | -------------------------------------------------------------------------------- /triggers/manual/template.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | autoscan 5 | 6 | 7 | 8 | 9 | 10 |
11 |
12 |
13 |

autoscan

14 |

Path to scan

15 |
16 |
17 | 21 |
23 |
24 |
25 | 26 |
27 |
28 |
29 | 30 | 31 | 32 | 79 | 80 | -------------------------------------------------------------------------------- /cmd/autoscan/router.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | "time" 7 | 8 | "github.com/rs/zerolog/hlog" 9 | "github.com/rs/zerolog/log" 10 | 11 | "github.com/go-chi/chi/v5" 12 | "github.com/go-chi/chi/v5/middleware" 13 | 14 | "github.com/saltydk/autoscan/processor" 15 | "github.com/saltydk/autoscan/triggers/a_train" 16 | "github.com/saltydk/autoscan/triggers/lidarr" 17 | "github.com/saltydk/autoscan/triggers/manual" 18 | "github.com/saltydk/autoscan/triggers/radarr" 19 | "github.com/saltydk/autoscan/triggers/readarr" 20 | "github.com/saltydk/autoscan/triggers/sonarr" 21 | ) 22 | 23 | func pattern(name string) string { 24 | return fmt.Sprintf("/%s", name) 25 | } 26 | 27 | func createCredentials(c config) map[string]string { 28 | creds := make(map[string]string) 29 | creds[c.Auth.Username] = c.Auth.Password 30 | return creds 31 | } 32 | 33 | func getRouter(c config, proc *processor.Processor) chi.Router { 34 | r := chi.NewRouter() 35 | 36 | // Middleware 37 | r.Use(middleware.Recoverer) 38 | 39 | // Logging-related middleware 40 | r.Use(hlog.NewHandler(log.Logger)) 41 | r.Use(hlog.RequestIDHandler("id", "request-id")) 42 | r.Use(hlog.URLHandler("url")) 43 | r.Use(hlog.MethodHandler("method")) 44 | r.Use(hlog.AccessHandler(func(r *http.Request, status, size int, duration time.Duration) { 45 | hlog.FromRequest(r).Debug(). 46 | Int("status", status). 47 | Dur("duration", duration). 48 | Msg("Request processed") 49 | })) 50 | 51 | // Health check 52 | r.Get("/health", healthHandler) 53 | 54 | // HTTP-Triggers 55 | r.Route("/triggers", func(r chi.Router) { 56 | // Use Basic Auth middleware if username and password are set. 57 | if c.Auth.Username != "" && c.Auth.Password != "" { 58 | r.Use(middleware.BasicAuth("Autoscan 1.x", createCredentials(c))) 59 | } 60 | 61 | // A-Train HTTP-trigger 62 | r.Route("/a-train", func(r chi.Router) { 63 | trigger, err := a_train.New(c.Triggers.ATrain) 64 | if err != nil { 65 | log.Fatal().Err(err).Str("trigger", "a-train").Msg("Failed initialising trigger") 66 | } 67 | 68 | r.Post("/{drive}", trigger(proc.Add).ServeHTTP) 69 | }) 70 | 71 | // Mixed-style Manual HTTP-trigger 72 | r.Route("/manual", func(r chi.Router) { 73 | trigger, err := manual.New(c.Triggers.Manual) 74 | if err != nil { 75 | log.Fatal().Err(err).Str("trigger", "manual").Msg("Failed initialising trigger") 76 | } 77 | 78 | r.HandleFunc("/", trigger(proc.Add).ServeHTTP) 79 | }) 80 | 81 | // OLD-style HTTP-triggers. Can be converted to the /{trigger}/{id} format in a 2.0 release. 82 | for _, t := range c.Triggers.Lidarr { 83 | trigger, err := lidarr.New(t) 84 | if err != nil { 85 | log.Fatal().Err(err).Str("trigger", t.Name).Msg("Failed initialising trigger") 86 | } 87 | 88 | r.Post(pattern(t.Name), trigger(proc.Add).ServeHTTP) 89 | } 90 | 91 | for _, t := range c.Triggers.Radarr { 92 | trigger, err := radarr.New(t) 93 | if err != nil { 94 | log.Fatal().Err(err).Str("trigger", t.Name).Msg("Failed initialising trigger") 95 | } 96 | 97 | r.Post(pattern(t.Name), trigger(proc.Add).ServeHTTP) 98 | } 99 | 100 | for _, t := range c.Triggers.Readarr { 101 | trigger, err := readarr.New(t) 102 | if err != nil { 103 | log.Fatal().Err(err).Str("trigger", t.Name).Msg("Failed initialising trigger") 104 | } 105 | 106 | r.Post(pattern(t.Name), trigger(proc.Add).ServeHTTP) 107 | } 108 | 109 | for _, t := range c.Triggers.Sonarr { 110 | trigger, err := sonarr.New(t) 111 | if err != nil { 112 | log.Fatal().Err(err).Str("trigger", t.Name).Msg("Failed initialising trigger") 113 | } 114 | 115 | r.Post(pattern(t.Name), trigger(proc.Add).ServeHTTP) 116 | } 117 | }) 118 | 119 | return r 120 | } 121 | 122 | // Other Handlers 123 | func healthHandler(rw http.ResponseWriter, r *http.Request) { 124 | rw.WriteHeader(http.StatusOK) 125 | } 126 | -------------------------------------------------------------------------------- /triggers/a_train/a_train_test.go: -------------------------------------------------------------------------------- 1 | package a_train 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "net/http" 7 | "net/http/httptest" 8 | "os" 9 | "reflect" 10 | "testing" 11 | "time" 12 | 13 | "github.com/go-chi/chi/v5" 14 | 15 | "github.com/saltydk/autoscan" 16 | ) 17 | 18 | func TestHandler(t *testing.T) { 19 | type Given struct { 20 | Config Config 21 | ID string 22 | Fixture string 23 | } 24 | 25 | type Expected struct { 26 | Scans []autoscan.Scan 27 | StatusCode int 28 | } 29 | 30 | type Test struct { 31 | Name string 32 | Given Given 33 | Expected Expected 34 | } 35 | 36 | standardConfig := Config{ 37 | Drives: []Drive{{ 38 | ID: "1234VA", 39 | Rewrite: []autoscan.Rewrite{{ 40 | From: "^/TV/*", 41 | To: "/mnt/unionfs/Media/TV/$1", 42 | }}, 43 | }}, 44 | Priority: 5, 45 | Rewrite: []autoscan.Rewrite{{ 46 | From: "^/Movies/*", 47 | To: "/mnt/unionfs/Media/Movies/$1", 48 | }}, 49 | } 50 | 51 | currentTime := time.Now() 52 | now = func() time.Time { 53 | return currentTime 54 | } 55 | 56 | var testCases = []Test{ 57 | { 58 | "Multiple created and deleted", 59 | Given{ 60 | Config: standardConfig, 61 | ID: "1234VA", 62 | Fixture: "testdata/full.json", 63 | }, 64 | Expected{ 65 | StatusCode: 200, 66 | Scans: []autoscan.Scan{ 67 | { 68 | Folder: "/mnt/unionfs/Media/Movies/Interstellar (2014)", 69 | Priority: 5, 70 | Time: currentTime, 71 | }, 72 | { 73 | Folder: "/mnt/unionfs/Media/TV/Legion/Season 1", 74 | Priority: 5, 75 | Time: currentTime, 76 | }, 77 | { 78 | Folder: "/mnt/unionfs/Media/Movies/Wonder Woman 1984 (2020)", 79 | Priority: 5, 80 | Time: currentTime, 81 | }, 82 | { 83 | Folder: "/mnt/unionfs/Media/Movies/Mortal Kombat (2021)", 84 | Priority: 5, 85 | Time: currentTime, 86 | }, 87 | }, 88 | }, 89 | }, 90 | { 91 | "Duplicate (parent is given in both create and delete)", 92 | Given{ 93 | Config: standardConfig, 94 | ID: "anotherVA", 95 | Fixture: "testdata/modified.json", 96 | }, 97 | Expected{ 98 | StatusCode: 200, 99 | Scans: []autoscan.Scan{ 100 | { 101 | Folder: "/TV/Legion/Season 1", 102 | Priority: 5, 103 | Time: currentTime, 104 | }, 105 | { 106 | Folder: "/TV/Legion/Season 1", 107 | Priority: 5, 108 | Time: currentTime, 109 | }, 110 | }, 111 | }, 112 | }, 113 | } 114 | 115 | for _, tc := range testCases { 116 | t.Run(tc.Name, func(t *testing.T) { 117 | callback := func(scans ...autoscan.Scan) error { 118 | if !reflect.DeepEqual(tc.Expected.Scans, scans) { 119 | t.Log(scans) 120 | t.Log(tc.Expected.Scans) 121 | t.Errorf("Scans do not equal") 122 | return errors.New("Scans do not equal") 123 | } 124 | 125 | return nil 126 | } 127 | 128 | trigger, err := New(tc.Given.Config) 129 | if err != nil { 130 | t.Fatalf("Could not create A-Train Trigger: %v", err) 131 | } 132 | 133 | r := chi.NewRouter() 134 | r.Post("/triggers/a-train/{drive}", trigger(callback).ServeHTTP) 135 | 136 | server := httptest.NewServer(r) 137 | defer server.Close() 138 | 139 | request, err := os.Open(tc.Given.Fixture) 140 | if err != nil { 141 | t.Fatalf("Could not open the fixture: %s", tc.Given.Fixture) 142 | } 143 | 144 | url := fmt.Sprintf("%s/triggers/a-train/%s", server.URL, tc.Given.ID) 145 | res, err := http.Post(url, "application/json", request) 146 | if err != nil { 147 | t.Fatalf("Request failed: %v", err) 148 | } 149 | 150 | defer res.Body.Close() 151 | if res.StatusCode != tc.Expected.StatusCode { 152 | t.Errorf("Status codes do not match: %d vs %d", res.StatusCode, tc.Expected.StatusCode) 153 | } 154 | }) 155 | } 156 | } 157 | -------------------------------------------------------------------------------- /autoscan.go: -------------------------------------------------------------------------------- 1 | package autoscan 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "net/http" 7 | "regexp" 8 | "time" 9 | ) 10 | 11 | // A Scan is at the core of Autoscan. 12 | // It defines which path to scan and with which (trigger-given) priority. 13 | // 14 | // The Scan is used across Triggers, Targets and the Processor. 15 | type Scan struct { 16 | Folder string 17 | Priority int 18 | Time time.Time 19 | } 20 | 21 | type ProcessorFunc func(...Scan) error 22 | 23 | type Trigger func(ProcessorFunc) 24 | 25 | // A HTTPTrigger is a Trigger which does not run in the background, 26 | // and instead returns a http.Handler. 27 | // 28 | // This http.Handler should be added to the autoscan router in cmd/autoscan. 29 | type HTTPTrigger func(ProcessorFunc) http.Handler 30 | 31 | // A Target receives a Scan from the Processor and translates the Scan 32 | // into a format understood by the target. 33 | type Target interface { 34 | Scan(Scan) error 35 | Available() error 36 | } 37 | 38 | var ( 39 | // ErrTargetUnavailable may occur when a Target goes offline 40 | // or suffers from fatal errors. In this case, the processor 41 | // will halt operations until the target is back online. 42 | ErrTargetUnavailable = errors.New("target unavailable") 43 | 44 | // ErrFatal indicates a severe problem related to development. 45 | ErrFatal = errors.New("fatal error") 46 | 47 | // ErrNoScans is not an error. It only indicates whether the CLI 48 | // should sleep longer depending on the processor output. 49 | ErrNoScans = errors.New("no scans currently available") 50 | 51 | // ErrAnchorUnavailable indicates that an Anchor file is 52 | // not available on the file system. Processing should halt 53 | // until all anchors are available. 54 | ErrAnchorUnavailable = errors.New("anchor file is unavailable") 55 | ) 56 | 57 | type Rewrite struct { 58 | From string `yaml:"from"` 59 | To string `yaml:"to"` 60 | } 61 | 62 | type Rewriter func(string) string 63 | 64 | func NewRewriter(rewriteRules []Rewrite) (Rewriter, error) { 65 | var rewrites []regexp.Regexp 66 | for _, rule := range rewriteRules { 67 | re, err := regexp.Compile(rule.From) 68 | if err != nil { 69 | return nil, err 70 | } 71 | 72 | rewrites = append(rewrites, *re) 73 | } 74 | 75 | rewriter := func(input string) string { 76 | for i, r := range rewrites { 77 | if r.MatchString(input) { 78 | return r.ReplaceAllString(input, rewriteRules[i].To) 79 | } 80 | } 81 | 82 | return input 83 | } 84 | 85 | return rewriter, nil 86 | } 87 | 88 | type Filterer func(string) bool 89 | 90 | func NewFilterer(includes []string, excludes []string) (Filterer, error) { 91 | reIncludes := make([]regexp.Regexp, 0) 92 | reExcludes := make([]regexp.Regexp, 0) 93 | 94 | // compile patterns 95 | for _, pattern := range includes { 96 | re, err := regexp.Compile(pattern) 97 | if err != nil { 98 | return nil, fmt.Errorf("compiling include: %v: %w", pattern, err) 99 | } 100 | reIncludes = append(reIncludes, *re) 101 | } 102 | 103 | for _, pattern := range excludes { 104 | re, err := regexp.Compile(pattern) 105 | if err != nil { 106 | return nil, fmt.Errorf("compiling exclude: %v: %w", pattern, err) 107 | } 108 | reExcludes = append(reExcludes, *re) 109 | } 110 | 111 | incSize := len(reIncludes) 112 | excSize := len(reExcludes) 113 | 114 | // create filterer 115 | var fn Filterer = func(string) bool { return true } 116 | 117 | if incSize > 0 || excSize > 0 { 118 | fn = func(path string) bool { 119 | // check excludes 120 | for _, re := range reExcludes { 121 | if re.MatchString(path) { 122 | return false 123 | } 124 | } 125 | 126 | // no includes (but excludes did not match) 127 | if incSize == 0 { 128 | return true 129 | } 130 | 131 | // check includes 132 | for _, re := range reIncludes { 133 | if re.MatchString(path) { 134 | return true 135 | } 136 | } 137 | 138 | // no includes passed 139 | return false 140 | } 141 | } 142 | 143 | return fn, nil 144 | } 145 | -------------------------------------------------------------------------------- /triggers/radarr/radarr_test.go: -------------------------------------------------------------------------------- 1 | package radarr 2 | 3 | import ( 4 | "errors" 5 | "net/http" 6 | "net/http/httptest" 7 | "os" 8 | "reflect" 9 | "testing" 10 | "time" 11 | 12 | "github.com/saltydk/autoscan" 13 | ) 14 | 15 | func TestHandler(t *testing.T) { 16 | type Given struct { 17 | Config Config 18 | Fixture string 19 | } 20 | 21 | type Expected struct { 22 | Scans []autoscan.Scan 23 | StatusCode int 24 | } 25 | 26 | type Test struct { 27 | Name string 28 | Given Given 29 | Expected Expected 30 | } 31 | 32 | standardConfig := Config{ 33 | Name: "radarr", 34 | Priority: 5, 35 | Rewrite: []autoscan.Rewrite{{ 36 | From: "/Movies/*", 37 | To: "/mnt/unionfs/Media/Movies/$1", 38 | }}, 39 | } 40 | 41 | currentTime := time.Now() 42 | now = func() time.Time { 43 | return currentTime 44 | } 45 | 46 | var testCases = []Test{ 47 | { 48 | "Download Event", 49 | Given{ 50 | Config: standardConfig, 51 | Fixture: "testdata/interstellar.json", 52 | }, 53 | Expected{ 54 | StatusCode: 200, 55 | Scans: []autoscan.Scan{ 56 | { 57 | Folder: "/mnt/unionfs/Media/Movies/Interstellar (2014)", 58 | Priority: 5, 59 | Time: currentTime, 60 | }, 61 | }, 62 | }, 63 | }, 64 | { 65 | "MovieFileDelete Event", 66 | Given{ 67 | Config: standardConfig, 68 | Fixture: "testdata/movie_file_delete.json", 69 | }, 70 | Expected{ 71 | StatusCode: 200, 72 | Scans: []autoscan.Scan{ 73 | { 74 | Folder: "/mnt/unionfs/Media/Movies/Tenet (2020)", 75 | Priority: 5, 76 | Time: currentTime, 77 | }, 78 | }, 79 | }, 80 | }, 81 | { 82 | "MovieDelete Event", 83 | Given{ 84 | Config: standardConfig, 85 | Fixture: "testdata/movie_delete.json", 86 | }, 87 | Expected{ 88 | StatusCode: 200, 89 | Scans: []autoscan.Scan{ 90 | { 91 | Folder: "/mnt/unionfs/Media/Movies/Wonder Woman 1984 (2020)", 92 | Priority: 5, 93 | Time: currentTime, 94 | }, 95 | }, 96 | }, 97 | }, 98 | { 99 | "Rename Event", 100 | Given{ 101 | Config: standardConfig, 102 | Fixture: "testdata/rename.json", 103 | }, 104 | Expected{ 105 | StatusCode: 200, 106 | Scans: []autoscan.Scan{ 107 | { 108 | Folder: "/mnt/unionfs/Media/Movies/Deadpool (2016)", 109 | Priority: 5, 110 | Time: currentTime, 111 | }, 112 | }, 113 | }, 114 | }, 115 | { 116 | "Returns bad request on invalid JSON", 117 | Given{ 118 | Config: standardConfig, 119 | Fixture: "testdata/invalid.json", 120 | }, 121 | Expected{ 122 | StatusCode: 400, 123 | }, 124 | }, 125 | { 126 | "Returns 200 on Test event without emitting a scan", 127 | Given{ 128 | Config: standardConfig, 129 | Fixture: "testdata/test.json", 130 | }, 131 | Expected{ 132 | StatusCode: 200, 133 | }, 134 | }, 135 | } 136 | 137 | for _, tc := range testCases { 138 | t.Run(tc.Name, func(t *testing.T) { 139 | callback := func(scans ...autoscan.Scan) error { 140 | if !reflect.DeepEqual(tc.Expected.Scans, scans) { 141 | t.Log(scans) 142 | t.Log(tc.Expected.Scans) 143 | t.Errorf("Scans do not equal") 144 | return errors.New("Scans do not equal") 145 | } 146 | 147 | return nil 148 | } 149 | 150 | trigger, err := New(tc.Given.Config) 151 | if err != nil { 152 | t.Fatalf("Could not create Radarr Trigger: %v", err) 153 | } 154 | 155 | server := httptest.NewServer(trigger(callback)) 156 | defer server.Close() 157 | 158 | request, err := os.Open(tc.Given.Fixture) 159 | if err != nil { 160 | t.Fatalf("Could not open the fixture: %s", tc.Given.Fixture) 161 | } 162 | 163 | res, err := http.Post(server.URL, "application/json", request) 164 | if err != nil { 165 | t.Fatalf("Request failed: %v", err) 166 | } 167 | 168 | defer res.Body.Close() 169 | if res.StatusCode != tc.Expected.StatusCode { 170 | t.Errorf("Status codes do not match: %d vs %d", res.StatusCode, tc.Expected.StatusCode) 171 | } 172 | }) 173 | } 174 | } 175 | -------------------------------------------------------------------------------- /targets/plex/api.go: -------------------------------------------------------------------------------- 1 | package plex 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "net/http" 7 | "net/url" 8 | "strconv" 9 | 10 | "github.com/rs/zerolog" 11 | 12 | "github.com/saltydk/autoscan" 13 | ) 14 | 15 | type apiClient struct { 16 | client *http.Client 17 | log zerolog.Logger 18 | baseURL string 19 | token string 20 | } 21 | 22 | func newAPIClient(baseURL string, token string, log zerolog.Logger) *apiClient { 23 | return &apiClient{ 24 | client: &http.Client{}, 25 | log: log, 26 | baseURL: baseURL, 27 | token: token, 28 | } 29 | } 30 | 31 | func (c apiClient) do(req *http.Request) (*http.Response, error) { 32 | req.Header.Set("X-Plex-Token", c.token) 33 | req.Header.Set("Accept", "application/json") // Force JSON Response. 34 | 35 | res, err := c.client.Do(req) 36 | if err != nil { 37 | return nil, fmt.Errorf("%v: %w", err, autoscan.ErrTargetUnavailable) 38 | } 39 | 40 | if res.StatusCode >= 200 && res.StatusCode < 300 { 41 | return res, nil 42 | } 43 | 44 | c.log.Trace(). 45 | Stringer("request_url", res.Request.URL). 46 | Int("response_status", res.StatusCode). 47 | Msg("Request failed") 48 | 49 | // statusCode not in the 2xx range, close response 50 | res.Body.Close() 51 | 52 | switch res.StatusCode { 53 | case 401: 54 | return nil, fmt.Errorf("invalid plex token: %s: %w", res.Status, autoscan.ErrFatal) 55 | case 404, 500, 502, 503, 504: 56 | return nil, fmt.Errorf("%s: %w", res.Status, autoscan.ErrTargetUnavailable) 57 | default: 58 | return nil, fmt.Errorf("%s: %w", res.Status, autoscan.ErrFatal) 59 | } 60 | } 61 | 62 | func (c apiClient) Version() (string, error) { 63 | reqURL := autoscan.JoinURL(c.baseURL) 64 | req, err := http.NewRequest("GET", reqURL, nil) 65 | if err != nil { 66 | return "", fmt.Errorf("failed creating version request: %v: %w", err, autoscan.ErrFatal) 67 | } 68 | 69 | res, err := c.do(req) 70 | if err != nil { 71 | return "", fmt.Errorf("version: %w", err) 72 | } 73 | 74 | defer res.Body.Close() 75 | 76 | type Response struct { 77 | MediaContainer struct { 78 | Version string 79 | } 80 | } 81 | 82 | resp := new(Response) 83 | if err := json.NewDecoder(res.Body).Decode(resp); err != nil { 84 | return "", fmt.Errorf("failed decoding version response: %v: %w", err, autoscan.ErrFatal) 85 | } 86 | 87 | return resp.MediaContainer.Version, nil 88 | } 89 | 90 | type library struct { 91 | ID int 92 | Name string 93 | Path string 94 | } 95 | 96 | func (c apiClient) Libraries() ([]library, error) { 97 | reqURL := autoscan.JoinURL(c.baseURL, "library", "sections") 98 | req, err := http.NewRequest("GET", reqURL, nil) 99 | if err != nil { 100 | return nil, fmt.Errorf("failed creating libraries request: %v: %w", err, autoscan.ErrFatal) 101 | } 102 | 103 | res, err := c.do(req) 104 | if err != nil { 105 | return nil, fmt.Errorf("libraries: %w", err) 106 | } 107 | 108 | defer res.Body.Close() 109 | 110 | type Response struct { 111 | MediaContainer struct { 112 | Libraries []struct { 113 | ID int `json:"key,string"` 114 | Name string `json:"title"` 115 | Sections []struct { 116 | Path string `json:"path"` 117 | } `json:"Location"` 118 | } `json:"Directory"` 119 | } `json:"MediaContainer"` 120 | } 121 | 122 | resp := new(Response) 123 | if err := json.NewDecoder(res.Body).Decode(resp); err != nil { 124 | return nil, fmt.Errorf("failed decoding libraries response: %v: %w", err, autoscan.ErrFatal) 125 | } 126 | 127 | // process response 128 | libraries := make([]library, 0) 129 | for _, lib := range resp.MediaContainer.Libraries { 130 | for _, folder := range lib.Sections { 131 | libPath := folder.Path 132 | 133 | // Add trailing slash if there is none. 134 | if len(libPath) > 0 && libPath[len(libPath)-1] != '/' { 135 | libPath += "/" 136 | } 137 | 138 | libraries = append(libraries, library{ 139 | Name: lib.Name, 140 | ID: lib.ID, 141 | Path: libPath, 142 | }) 143 | } 144 | } 145 | 146 | return libraries, nil 147 | } 148 | 149 | func (c apiClient) Scan(path string, libraryID int) error { 150 | reqURL := autoscan.JoinURL(c.baseURL, "library", "sections", strconv.Itoa(libraryID), "refresh") 151 | req, err := http.NewRequest("GET", reqURL, nil) 152 | if err != nil { 153 | return fmt.Errorf("failed creating scan request: %v: %w", err, autoscan.ErrFatal) 154 | } 155 | 156 | q := url.Values{} 157 | q.Add("path", path) 158 | req.URL.RawQuery = q.Encode() 159 | 160 | res, err := c.do(req) 161 | if err != nil { 162 | return fmt.Errorf("scan: %w", err) 163 | } 164 | 165 | res.Body.Close() 166 | return nil 167 | } 168 | -------------------------------------------------------------------------------- /triggers/sonarr/sonarr_test.go: -------------------------------------------------------------------------------- 1 | package sonarr 2 | 3 | import ( 4 | "errors" 5 | "net/http" 6 | "net/http/httptest" 7 | "os" 8 | "reflect" 9 | "testing" 10 | "time" 11 | 12 | "github.com/saltydk/autoscan" 13 | ) 14 | 15 | func TestHandler(t *testing.T) { 16 | type Given struct { 17 | Config Config 18 | Fixture string 19 | } 20 | 21 | type Expected struct { 22 | Scans []autoscan.Scan 23 | StatusCode int 24 | } 25 | 26 | type Test struct { 27 | Name string 28 | Given Given 29 | Expected Expected 30 | } 31 | 32 | standardConfig := Config{ 33 | Name: "sonarr", 34 | Priority: 5, 35 | Rewrite: []autoscan.Rewrite{{ 36 | From: "/TV/*", 37 | To: "/mnt/unionfs/Media/TV/$1", 38 | }}, 39 | } 40 | 41 | currentTime := time.Now() 42 | now = func() time.Time { 43 | return currentTime 44 | } 45 | 46 | var testCases = []Test{ 47 | { 48 | "Scan has all the correct fields on Download event", 49 | Given{ 50 | Config: standardConfig, 51 | Fixture: "testdata/westworld.json", 52 | }, 53 | Expected{ 54 | StatusCode: 200, 55 | Scans: []autoscan.Scan{ 56 | { 57 | Folder: "/mnt/unionfs/Media/TV/Westworld/Season 1", 58 | Priority: 5, 59 | Time: currentTime, 60 | }, 61 | }, 62 | }, 63 | }, 64 | { 65 | "Scan on EpisodeFileDelete", 66 | Given{ 67 | Config: standardConfig, 68 | Fixture: "testdata/episode_delete.json", 69 | }, 70 | Expected{ 71 | StatusCode: 200, 72 | Scans: []autoscan.Scan{ 73 | { 74 | Folder: "/mnt/unionfs/Media/TV/Westworld/Season 2", 75 | Priority: 5, 76 | Time: currentTime, 77 | }, 78 | }, 79 | }, 80 | }, 81 | { 82 | "Picks up the Rename event without duplicates", 83 | Given{ 84 | Config: standardConfig, 85 | Fixture: "testdata/rename.json", 86 | }, 87 | Expected{ 88 | StatusCode: 200, 89 | Scans: []autoscan.Scan{ 90 | { 91 | Folder: "/mnt/unionfs/Media/TV/Westworld/Season 1", 92 | Priority: 5, 93 | Time: currentTime, 94 | }, 95 | { 96 | Folder: "/mnt/unionfs/Media/TV/Westworld [imdb:tt0475784]/Season 1", 97 | Priority: 5, 98 | Time: currentTime, 99 | }, 100 | { 101 | Folder: "/mnt/unionfs/Media/TV/Westworld/Season 2", 102 | Priority: 5, 103 | Time: currentTime, 104 | }, 105 | { 106 | Folder: "/mnt/unionfs/Media/TV/Westworld [imdb:tt0475784]/Season 2", 107 | Priority: 5, 108 | Time: currentTime, 109 | }, 110 | }, 111 | }, 112 | }, 113 | { 114 | "Scans show folder on SeriesDelete event", 115 | Given{ 116 | Config: standardConfig, 117 | Fixture: "testdata/series_delete.json", 118 | }, 119 | Expected{ 120 | StatusCode: 200, 121 | Scans: []autoscan.Scan{ 122 | { 123 | Folder: "/mnt/unionfs/Media/TV/Westworld", 124 | Priority: 5, 125 | Time: currentTime, 126 | }, 127 | }, 128 | }, 129 | }, 130 | { 131 | "Returns bad request on invalid JSON", 132 | Given{ 133 | Config: standardConfig, 134 | Fixture: "testdata/invalid.json", 135 | }, 136 | Expected{ 137 | StatusCode: 400, 138 | }, 139 | }, 140 | { 141 | "Returns 200 on Test event without emitting a scan", 142 | Given{ 143 | Config: standardConfig, 144 | Fixture: "testdata/test.json", 145 | }, 146 | Expected{ 147 | StatusCode: 200, 148 | }, 149 | }, 150 | } 151 | 152 | for _, tc := range testCases { 153 | t.Run(tc.Name, func(t *testing.T) { 154 | callback := func(scans ...autoscan.Scan) error { 155 | if !reflect.DeepEqual(tc.Expected.Scans, scans) { 156 | t.Log(scans) 157 | t.Log(tc.Expected.Scans) 158 | t.Errorf("Scans do not equal") 159 | return errors.New("Scans do not equal") 160 | } 161 | 162 | return nil 163 | } 164 | 165 | trigger, err := New(tc.Given.Config) 166 | if err != nil { 167 | t.Fatalf("Could not create Sonarr Trigger: %v", err) 168 | } 169 | 170 | server := httptest.NewServer(trigger(callback)) 171 | defer server.Close() 172 | 173 | request, err := os.Open(tc.Given.Fixture) 174 | if err != nil { 175 | t.Fatalf("Could not open the fixture: %s", tc.Given.Fixture) 176 | } 177 | 178 | res, err := http.Post(server.URL, "application/json", request) 179 | if err != nil { 180 | t.Fatalf("Request failed: %v", err) 181 | } 182 | 183 | defer res.Body.Close() 184 | if res.StatusCode != tc.Expected.StatusCode { 185 | t.Errorf("Status codes do not match: %d vs %d", res.StatusCode, tc.Expected.StatusCode) 186 | } 187 | }) 188 | } 189 | } 190 | -------------------------------------------------------------------------------- /targets/jellyfin/api.go: -------------------------------------------------------------------------------- 1 | package jellyfin 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "fmt" 7 | "net/http" 8 | 9 | "github.com/rs/zerolog" 10 | 11 | "github.com/saltydk/autoscan" 12 | ) 13 | 14 | type apiClient struct { 15 | client *http.Client 16 | log zerolog.Logger 17 | baseURL string 18 | token string 19 | } 20 | 21 | func newAPIClient(baseURL string, token string, log zerolog.Logger) apiClient { 22 | return apiClient{ 23 | client: &http.Client{}, 24 | log: log, 25 | baseURL: baseURL, 26 | token: token, 27 | } 28 | } 29 | 30 | func (c apiClient) do(req *http.Request) (*http.Response, error) { 31 | req.Header.Set("X-Emby-Token", c.token) 32 | req.Header.Set("Accept", "application/json") // Force JSON Response. 33 | 34 | res, err := c.client.Do(req) 35 | if err != nil { 36 | return nil, fmt.Errorf("%v: %w", err, autoscan.ErrTargetUnavailable) 37 | } 38 | 39 | if res.StatusCode >= 200 && res.StatusCode < 300 { 40 | return res, nil 41 | } 42 | 43 | c.log.Trace(). 44 | Stringer("request_url", res.Request.URL). 45 | Int("response_status", res.StatusCode). 46 | Msg("Request failed") 47 | 48 | // statusCode not in the 2xx range, close response 49 | res.Body.Close() 50 | 51 | switch res.StatusCode { 52 | case 401: 53 | return nil, fmt.Errorf("invalid jellyfin token: %s: %w", res.Status, autoscan.ErrFatal) 54 | case 404, 500, 502, 503, 504: 55 | return nil, fmt.Errorf("%s: %w", res.Status, autoscan.ErrTargetUnavailable) 56 | default: 57 | return nil, fmt.Errorf("%s: %w", res.Status, autoscan.ErrFatal) 58 | } 59 | } 60 | 61 | func (c apiClient) Available() error { 62 | // create request 63 | reqURL := autoscan.JoinURL(c.baseURL, "System", "Info") 64 | req, err := http.NewRequest("GET", reqURL, nil) 65 | if err != nil { 66 | return fmt.Errorf("failed creating availability request: %v: %w", err, autoscan.ErrFatal) 67 | } 68 | 69 | // send request 70 | res, err := c.do(req) 71 | if err != nil { 72 | return fmt.Errorf("availability: %w", err) 73 | } 74 | 75 | defer res.Body.Close() 76 | return nil 77 | } 78 | 79 | type library struct { 80 | Name string 81 | Path string 82 | } 83 | 84 | func (c apiClient) Libraries() ([]library, error) { 85 | // create request 86 | reqURL := autoscan.JoinURL(c.baseURL, "Library", "VirtualFolders") 87 | req, err := http.NewRequest("GET", reqURL, nil) 88 | if err != nil { 89 | return nil, fmt.Errorf("failed creating libraries request: %v: %w", err, autoscan.ErrFatal) 90 | } 91 | 92 | // send request 93 | res, err := c.do(req) 94 | if err != nil { 95 | return nil, fmt.Errorf("libraries: %w", err) 96 | } 97 | 98 | defer res.Body.Close() 99 | 100 | // decode response 101 | type Response struct { 102 | Name string `json:"Name"` 103 | Locations []string `json:"Locations"` 104 | } 105 | 106 | resp := make([]Response, 0) 107 | if err := json.NewDecoder(res.Body).Decode(&resp); err != nil { 108 | return nil, fmt.Errorf("failed decoding libraries request response: %v: %w", err, autoscan.ErrFatal) 109 | } 110 | 111 | // process response 112 | libraries := make([]library, 0) 113 | for _, lib := range resp { 114 | for _, folder := range lib.Locations { 115 | libPath := folder 116 | 117 | // Add trailing slash if there is none. 118 | if len(libPath) > 0 && libPath[len(libPath)-1] != '/' { 119 | libPath += "/" 120 | } 121 | 122 | libraries = append(libraries, library{ 123 | Name: lib.Name, 124 | Path: libPath, 125 | }) 126 | } 127 | } 128 | 129 | return libraries, nil 130 | } 131 | 132 | type scanRequest struct { 133 | Path string `json:"path"` 134 | UpdateType string `json:"updateType"` 135 | } 136 | 137 | func (c apiClient) Scan(path string) error { 138 | // create request payload 139 | type Payload struct { 140 | Updates []scanRequest `json:"Updates"` 141 | } 142 | 143 | payload := &Payload{ 144 | Updates: []scanRequest{ 145 | { 146 | Path: path, 147 | UpdateType: "Modified", 148 | }, 149 | }, 150 | } 151 | 152 | b, err := json.Marshal(payload) 153 | if err != nil { 154 | return fmt.Errorf("failed encoding scan request payload: %v: %w", err, autoscan.ErrFatal) 155 | } 156 | 157 | // create request 158 | reqURL := autoscan.JoinURL(c.baseURL, "Library", "Media", "Updated") 159 | req, err := http.NewRequest("POST", reqURL, bytes.NewBuffer(b)) 160 | if err != nil { 161 | return fmt.Errorf("failed creating scan request: %v: %w", err, autoscan.ErrFatal) 162 | } 163 | 164 | req.Header.Set("Content-Type", "application/json") 165 | 166 | // send request 167 | res, err := c.do(req) 168 | if err != nil { 169 | return fmt.Errorf("scan: %w", err) 170 | } 171 | 172 | defer res.Body.Close() 173 | return nil 174 | } 175 | -------------------------------------------------------------------------------- /targets/emby/api.go: -------------------------------------------------------------------------------- 1 | package emby 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "fmt" 7 | "net/http" 8 | 9 | "github.com/rs/zerolog" 10 | 11 | "github.com/saltydk/autoscan" 12 | ) 13 | 14 | type apiClient struct { 15 | client *http.Client 16 | log zerolog.Logger 17 | baseURL string 18 | token string 19 | } 20 | 21 | func newAPIClient(baseURL string, token string, log zerolog.Logger) apiClient { 22 | return apiClient{ 23 | client: &http.Client{}, 24 | log: log, 25 | baseURL: baseURL, 26 | token: token, 27 | } 28 | } 29 | 30 | func (c apiClient) do(req *http.Request) (*http.Response, error) { 31 | req.Header.Set("X-Emby-Token", c.token) 32 | req.Header.Set("Accept", "application/json") // Force JSON Response. 33 | 34 | res, err := c.client.Do(req) 35 | if err != nil { 36 | return nil, fmt.Errorf("%v: %w", err, autoscan.ErrTargetUnavailable) 37 | } 38 | 39 | if res.StatusCode >= 200 && res.StatusCode < 300 { 40 | return res, nil 41 | } 42 | 43 | c.log.Trace(). 44 | Stringer("request_url", res.Request.URL). 45 | Int("response_status", res.StatusCode). 46 | Msg("Request failed") 47 | 48 | // statusCode not in the 2xx range, close response 49 | res.Body.Close() 50 | 51 | switch res.StatusCode { 52 | case 401: 53 | return nil, fmt.Errorf("invalid emby token: %s: %w", res.Status, autoscan.ErrFatal) 54 | case 404, 500, 502, 503, 504: 55 | return nil, fmt.Errorf("%s: %w", res.Status, autoscan.ErrTargetUnavailable) 56 | default: 57 | return nil, fmt.Errorf("%s: %w", res.Status, autoscan.ErrFatal) 58 | } 59 | } 60 | 61 | func (c apiClient) Available() error { 62 | // create request 63 | reqURL := autoscan.JoinURL(c.baseURL, "emby", "System", "Info") 64 | req, err := http.NewRequest("GET", reqURL, nil) 65 | if err != nil { 66 | return fmt.Errorf("failed creating availability request: %v: %w", err, autoscan.ErrFatal) 67 | } 68 | 69 | // send request 70 | res, err := c.do(req) 71 | if err != nil { 72 | return fmt.Errorf("availability: %w", err) 73 | } 74 | 75 | defer res.Body.Close() 76 | return nil 77 | } 78 | 79 | type library struct { 80 | Name string 81 | Path string 82 | } 83 | 84 | func (c apiClient) Libraries() ([]library, error) { 85 | // create request 86 | reqURL := autoscan.JoinURL(c.baseURL, "emby", "Library", "SelectableMediaFolders") 87 | req, err := http.NewRequest("GET", reqURL, nil) 88 | if err != nil { 89 | return nil, fmt.Errorf("failed creating libraries request: %v: %w", err, autoscan.ErrFatal) 90 | } 91 | 92 | // send request 93 | res, err := c.do(req) 94 | if err != nil { 95 | return nil, fmt.Errorf("libraries: %w", err) 96 | } 97 | 98 | defer res.Body.Close() 99 | 100 | // decode response 101 | type Response struct { 102 | Name string `json:"Name"` 103 | Folders []struct { 104 | Path string `json:"Path"` 105 | } `json:"SubFolders"` 106 | } 107 | 108 | resp := make([]Response, 0) 109 | if err := json.NewDecoder(res.Body).Decode(&resp); err != nil { 110 | return nil, fmt.Errorf("failed decoding libraries request response: %v: %w", err, autoscan.ErrFatal) 111 | } 112 | 113 | // process response 114 | libraries := make([]library, 0) 115 | for _, lib := range resp { 116 | for _, folder := range lib.Folders { 117 | libPath := folder.Path 118 | 119 | // Add trailing slash if there is none. 120 | if len(libPath) > 0 && libPath[len(libPath)-1] != '/' { 121 | libPath += "/" 122 | } 123 | 124 | libraries = append(libraries, library{ 125 | Name: lib.Name, 126 | Path: libPath, 127 | }) 128 | } 129 | } 130 | 131 | return libraries, nil 132 | } 133 | 134 | type scanRequest struct { 135 | Path string `json:"path"` 136 | UpdateType string `json:"updateType"` 137 | } 138 | 139 | func (c apiClient) Scan(path string) error { 140 | // create request payload 141 | type Payload struct { 142 | Updates []scanRequest `json:"Updates"` 143 | } 144 | 145 | payload := &Payload{ 146 | Updates: []scanRequest{ 147 | { 148 | Path: path, 149 | UpdateType: "Created", 150 | }, 151 | }, 152 | } 153 | 154 | b, err := json.Marshal(payload) 155 | if err != nil { 156 | return fmt.Errorf("failed encoding scan request payload: %v: %w", err, autoscan.ErrFatal) 157 | } 158 | 159 | // create request 160 | reqURL := autoscan.JoinURL(c.baseURL, "Library", "Media", "Updated") 161 | req, err := http.NewRequest("POST", reqURL, bytes.NewBuffer(b)) 162 | if err != nil { 163 | return fmt.Errorf("failed creating scan request: %v: %w", err, autoscan.ErrFatal) 164 | } 165 | 166 | req.Header.Set("Content-Type", "application/json") 167 | 168 | // send request 169 | res, err := c.do(req) 170 | if err != nil { 171 | return fmt.Errorf("scan: %w", err) 172 | } 173 | 174 | defer res.Body.Close() 175 | return nil 176 | } 177 | -------------------------------------------------------------------------------- /triggers/sonarr/sonarr.go: -------------------------------------------------------------------------------- 1 | package sonarr 2 | 3 | import ( 4 | "encoding/json" 5 | "net/http" 6 | "path" 7 | "strings" 8 | "time" 9 | 10 | "github.com/rs/zerolog/hlog" 11 | 12 | "github.com/saltydk/autoscan" 13 | ) 14 | 15 | type Config struct { 16 | Name string `yaml:"name"` 17 | Priority int `yaml:"priority"` 18 | Rewrite []autoscan.Rewrite `yaml:"rewrite"` 19 | Verbosity string `yaml:"verbosity"` 20 | } 21 | 22 | // New creates an autoscan-compatible HTTP Trigger for Sonarr webhooks. 23 | func New(c Config) (autoscan.HTTPTrigger, error) { 24 | rewriter, err := autoscan.NewRewriter(c.Rewrite) 25 | if err != nil { 26 | return nil, err 27 | } 28 | 29 | trigger := func(callback autoscan.ProcessorFunc) http.Handler { 30 | return handler{ 31 | callback: callback, 32 | priority: c.Priority, 33 | rewrite: rewriter, 34 | } 35 | } 36 | 37 | return trigger, nil 38 | } 39 | 40 | type handler struct { 41 | priority int 42 | rewrite autoscan.Rewriter 43 | callback autoscan.ProcessorFunc 44 | } 45 | 46 | type sonarrEvent struct { 47 | Type string `json:"eventType"` 48 | 49 | File struct { 50 | RelativePath string 51 | } `json:"episodeFile"` 52 | 53 | Series struct { 54 | Path string 55 | } `json:"series"` 56 | 57 | RenamedFiles []struct { 58 | // use PreviousPath as the Series.Path might have changed. 59 | PreviousPath string 60 | RelativePath string 61 | } `json:"renamedEpisodeFiles"` 62 | } 63 | 64 | func (h handler) ServeHTTP(rw http.ResponseWriter, r *http.Request) { 65 | var err error 66 | rlog := hlog.FromRequest(r) 67 | 68 | event := new(sonarrEvent) 69 | err = json.NewDecoder(r.Body).Decode(event) 70 | if err != nil { 71 | rlog.Error().Err(err).Msg("Failed decoding request") 72 | rw.WriteHeader(http.StatusBadRequest) 73 | return 74 | } 75 | 76 | rlog.Trace().Interface("event", event).Msg("Received JSON body") 77 | 78 | if strings.EqualFold(event.Type, "Test") { 79 | rlog.Info().Msg("Received test event") 80 | rw.WriteHeader(http.StatusOK) 81 | return 82 | } 83 | 84 | var paths []string 85 | 86 | // a Download event is either an upgrade or a new file. 87 | // the EpisodeFileDelete event shares the same request format as Download. 88 | if strings.EqualFold(event.Type, "Download") || strings.EqualFold(event.Type, "EpisodeFileDelete") { 89 | if event.File.RelativePath == "" || event.Series.Path == "" { 90 | rlog.Error().Msg("Required fields are missing") 91 | rw.WriteHeader(http.StatusBadRequest) 92 | return 93 | } 94 | 95 | // Use path.Dir to get the directory in which the file is located 96 | folderPath := path.Dir(path.Join(event.Series.Path, event.File.RelativePath)) 97 | paths = append(paths, folderPath) 98 | } 99 | 100 | // An entire show has been deleted 101 | if strings.EqualFold(event.Type, "SeriesDelete") { 102 | if event.Series.Path == "" { 103 | rlog.Error().Msg("Required fields are missing") 104 | rw.WriteHeader(http.StatusBadRequest) 105 | return 106 | } 107 | 108 | // Scan the folder of the show 109 | paths = append(paths, event.Series.Path) 110 | } 111 | 112 | if strings.EqualFold(event.Type, "Rename") { 113 | if event.Series.Path == "" { 114 | rlog.Error().Msg("Required fields are missing") 115 | rw.WriteHeader(http.StatusBadRequest) 116 | return 117 | } 118 | 119 | // Keep track of which paths we have already added to paths. 120 | encountered := make(map[string]bool) 121 | 122 | for _, renamedFile := range event.RenamedFiles { 123 | previousPath := path.Dir(renamedFile.PreviousPath) 124 | currentPath := path.Dir(path.Join(event.Series.Path, renamedFile.RelativePath)) 125 | 126 | // if previousPath not in paths, then add it. 127 | if _, ok := encountered[previousPath]; !ok { 128 | encountered[previousPath] = true 129 | paths = append(paths, previousPath) 130 | } 131 | 132 | // if currentPath not in paths, then add it. 133 | if _, ok := encountered[currentPath]; !ok { 134 | encountered[currentPath] = true 135 | paths = append(paths, currentPath) 136 | } 137 | } 138 | } 139 | 140 | var scans []autoscan.Scan 141 | 142 | for _, folderPath := range paths { 143 | folderPath := h.rewrite(folderPath) 144 | 145 | scan := autoscan.Scan{ 146 | Folder: folderPath, 147 | Priority: h.priority, 148 | Time: now(), 149 | } 150 | 151 | scans = append(scans, scan) 152 | } 153 | 154 | err = h.callback(scans...) 155 | if err != nil { 156 | rlog.Error().Err(err).Msg("Processor could not process scans") 157 | rw.WriteHeader(http.StatusInternalServerError) 158 | return 159 | } 160 | 161 | for _, scan := range scans { 162 | rlog.Info(). 163 | Str("path", scan.Folder). 164 | Str("event", event.Type). 165 | Msg("Scan moved to processor") 166 | } 167 | 168 | rw.WriteHeader(http.StatusOK) 169 | } 170 | 171 | var now = time.Now 172 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: Build 2 | 3 | on: 4 | push: 5 | branches: 6 | - '*' 7 | tags: 8 | - 'v*' 9 | pull_request: 10 | branches: 11 | - master 12 | 13 | jobs: 14 | build: 15 | runs-on: ubuntu-latest 16 | steps: 17 | # dependencies 18 | - name: goreleaser 19 | uses: goreleaser/goreleaser-action@v6 20 | with: 21 | install-only: true 22 | version: 1.7.0 23 | 24 | - name: goreleaser info 25 | run: goreleaser -v 26 | 27 | - name: task 28 | uses: arduino/setup-task@v2 29 | 30 | - name: task info 31 | run: task --version 32 | 33 | - name: qemu 34 | if: github.event.pull_request.head.repo.fork == false 35 | uses: docker/setup-qemu-action@v3 36 | 37 | - name: buildx 38 | if: github.event.pull_request.head.repo.fork == false 39 | uses: docker/setup-buildx-action@v3 40 | 41 | # checkout 42 | - name: checkout 43 | uses: actions/checkout@v4 44 | with: 45 | fetch-depth: 0 46 | 47 | # setup go 48 | - name: go 49 | uses: actions/setup-go@v5 50 | with: 51 | go-version: 1.19 52 | 53 | - name: go info 54 | run: | 55 | go version 56 | go env 57 | 58 | # cache 59 | - name: cache-go 60 | uses: actions/cache@v4 61 | with: 62 | path: | 63 | ~/.cache/go-build 64 | ~/go/pkg/mod 65 | key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} 66 | restore-keys: | 67 | ${{ runner.os }}-go-cache-mod 68 | 69 | - name: cache-task 70 | uses: actions/cache@v4 71 | with: 72 | path: .task/**/* 73 | key: ${{ runner.os }}-go-task 74 | 75 | # vendor 76 | - name: vendor 77 | run: | 78 | task vendor 79 | 80 | # test 81 | - name: tests 82 | run: | 83 | task test 84 | 85 | # git status 86 | - name: git status 87 | run: git status 88 | 89 | # build 90 | - name: build 91 | if: startsWith(github.ref, 'refs/tags/') == false 92 | run: | 93 | task snapshot 94 | 95 | # publish 96 | - name: publish 97 | if: startsWith(github.ref, 'refs/tags/') 98 | env: 99 | GITHUB_TOKEN: ${{ secrets.GH_TOKEN }} 100 | GITHUB_REF: ${{ github.ref }} 101 | run: | 102 | task publish 103 | 104 | # artifacts 105 | - name: artifact_linux 106 | uses: actions/upload-artifact@v4 107 | with: 108 | name: build_linux 109 | path: dist/*linux* 110 | 111 | - name: artifact_darwin 112 | uses: actions/upload-artifact@v4 113 | with: 114 | name: build_darwin 115 | path: dist/*darwin* 116 | 117 | # docker login 118 | - name: docker login 119 | if: github.event.pull_request.head.repo.fork == false 120 | env: 121 | DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} 122 | DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} 123 | run: | 124 | echo "${DOCKER_PASSWORD}" | docker login --username "${DOCKER_USERNAME}" --password-stdin 125 | 126 | # docker build (latest & tag) 127 | - name: release tag 128 | if: startsWith(github.ref, 'refs/tags/') == true && github.event.pull_request.head.repo.fork == false 129 | uses: little-core-labs/get-git-tag@v3.0.2 130 | id: releasetag 131 | with: 132 | tagRegex: "v?(.+)" 133 | 134 | - name: docker - build release 135 | if: startsWith(github.ref, 'refs/tags/') == true && github.event.pull_request.head.repo.fork == false 136 | uses: docker/build-push-action@v6 137 | with: 138 | context: . 139 | file: ./docker/Dockerfile 140 | platforms: linux/amd64,linux/arm64,linux/arm/v7 141 | pull: true 142 | push: true 143 | cache-from: type=gha 144 | cache-to: type=gha,mode=max 145 | tags: | 146 | saltydk/autoscan:${{ steps.releasetag.outputs.tag }} 147 | saltydk/autoscan:latest 148 | 149 | # docker build (branch) 150 | - name: branch name 151 | if: startsWith(github.ref, 'refs/tags/') == false && github.event.pull_request.head.repo.fork == false 152 | id: branch-name 153 | uses: tj-actions/branch-names@v8 154 | 155 | - name: docker tag 156 | if: startsWith(github.ref, 'refs/tags/') == false && github.event.pull_request.head.repo.fork == false 157 | uses: frabert/replace-string-action@v2.5 158 | id: dockertag 159 | with: 160 | pattern: '[:\.\/]+' 161 | string: "${{ steps.branch-name.outputs.current_branch }}" 162 | replace-with: '-' 163 | flags: 'g' 164 | 165 | - name: docker - build branch 166 | if: startsWith(github.ref, 'refs/tags/') == false && github.event.pull_request.head.repo.fork == false 167 | uses: docker/build-push-action@v6 168 | with: 169 | context: . 170 | file: ./docker/Dockerfile 171 | platforms: linux/amd64,linux/arm64,linux/arm/v7 172 | pull: true 173 | push: true 174 | cache-from: type=gha 175 | cache-to: type=gha,mode=max 176 | tags: | 177 | saltydk/autoscan:${{ steps.dockertag.outputs.replaced }} 178 | 179 | # cleanup 180 | - name: cleanup 181 | if: github.event.pull_request.head.repo.fork == false 182 | run: | 183 | rm -f ${HOME}/.docker/config.json 184 | -------------------------------------------------------------------------------- /processor/datastore_test.go: -------------------------------------------------------------------------------- 1 | package processor 2 | 3 | import ( 4 | "database/sql" 5 | "errors" 6 | "reflect" 7 | "testing" 8 | "time" 9 | 10 | "github.com/saltydk/autoscan" 11 | "github.com/saltydk/autoscan/migrate" 12 | 13 | // sqlite3 driver 14 | _ "modernc.org/sqlite" 15 | ) 16 | 17 | const sqlGetScan = ` 18 | SELECT folder, priority, time FROM scan 19 | WHERE folder = ? 20 | ` 21 | 22 | func (store *datastore) GetScan(folder string) (autoscan.Scan, error) { 23 | row := store.QueryRow(sqlGetScan, folder) 24 | 25 | scan := autoscan.Scan{} 26 | err := row.Scan(&scan.Folder, &scan.Priority, &scan.Time) 27 | 28 | return scan, err 29 | } 30 | 31 | func getDatastore(t *testing.T) *datastore { 32 | db, err := sql.Open("sqlite", ":memory:") 33 | if err != nil { 34 | t.Fatal(err) 35 | } 36 | 37 | mg, err := migrate.New(db, "migrations") 38 | if err != nil { 39 | t.Fatal(err) 40 | } 41 | 42 | ds, err := newDatastore(db, mg) 43 | if err != nil { 44 | t.Fatal(err) 45 | } 46 | 47 | return ds 48 | } 49 | 50 | func TestUpsert(t *testing.T) { 51 | type Test struct { 52 | Name string 53 | Scans []autoscan.Scan 54 | WantScan autoscan.Scan 55 | } 56 | 57 | var testCases = []Test{ 58 | { 59 | Name: "All fields", 60 | Scans: []autoscan.Scan{ 61 | { 62 | Folder: "testfolder/test", 63 | Priority: 5, 64 | Time: time.Time{}.Add(1), 65 | }, 66 | }, 67 | WantScan: autoscan.Scan{ 68 | Folder: "testfolder/test", 69 | Priority: 5, 70 | Time: time.Time{}.Add(1), 71 | }, 72 | }, 73 | { 74 | Name: "Priority shall increase but not decrease", 75 | Scans: []autoscan.Scan{ 76 | { 77 | Priority: 2, 78 | Time: time.Time{}.Add(1), 79 | }, 80 | { 81 | Priority: 5, 82 | Time: time.Time{}.Add(2), 83 | }, 84 | { 85 | Priority: 3, 86 | Time: time.Time{}.Add(3), 87 | }, 88 | }, 89 | WantScan: autoscan.Scan{ 90 | Priority: 5, 91 | Time: time.Time{}.Add(3), 92 | }, 93 | }, 94 | } 95 | 96 | for _, tc := range testCases { 97 | t.Run(tc.Name, func(t *testing.T) { 98 | store := getDatastore(t) 99 | err := store.Upsert(tc.Scans) 100 | if err != nil { 101 | t.Fatal(err) 102 | } 103 | 104 | scan, err := store.GetScan(tc.WantScan.Folder) 105 | if err != nil { 106 | t.Fatal(err) 107 | } 108 | 109 | if !reflect.DeepEqual(tc.WantScan, scan) { 110 | t.Log(scan) 111 | t.Errorf("Scans do not equal") 112 | } 113 | }) 114 | } 115 | } 116 | 117 | func TestGetAvailableScan(t *testing.T) { 118 | type Test struct { 119 | Name string 120 | Now time.Time 121 | MinAge time.Duration 122 | GiveScans []autoscan.Scan 123 | WantErr error 124 | WantScan autoscan.Scan 125 | } 126 | 127 | testTime := time.Now().UTC() 128 | 129 | var testCases = []Test{ 130 | { 131 | Name: "Retrieves no folders if all folders are too young", 132 | Now: testTime, 133 | MinAge: 2 * time.Minute, 134 | GiveScans: []autoscan.Scan{ 135 | {Folder: "1", Time: testTime.Add(-1 * time.Minute)}, 136 | {Folder: "2", Time: testTime.Add(-1 * time.Minute)}, 137 | }, 138 | WantErr: autoscan.ErrNoScans, 139 | }, 140 | { 141 | Name: "Retrieves folder if older than minimum age", 142 | Now: testTime, 143 | MinAge: 5 * time.Minute, 144 | GiveScans: []autoscan.Scan{ 145 | {Folder: "1", Time: testTime.Add(-6 * time.Minute)}, 146 | }, 147 | WantScan: autoscan.Scan{ 148 | Folder: "1", Time: testTime.Add(-6 * time.Minute), 149 | }, 150 | }, 151 | { 152 | Name: "Returns all fields", 153 | Now: testTime, 154 | MinAge: 5 * time.Minute, 155 | GiveScans: []autoscan.Scan{ 156 | { 157 | Folder: "Amazing folder", 158 | Priority: 69, 159 | Time: testTime.Add(-6 * time.Minute), 160 | }, 161 | }, 162 | WantScan: autoscan.Scan{ 163 | Folder: "Amazing folder", 164 | Priority: 69, 165 | Time: testTime.Add(-6 * time.Minute), 166 | }, 167 | }, 168 | } 169 | 170 | for _, tc := range testCases { 171 | t.Run(tc.Name, func(t *testing.T) { 172 | store := getDatastore(t) 173 | err := store.Upsert(tc.GiveScans) 174 | if err != nil { 175 | t.Fatal(err) 176 | } 177 | 178 | now = func() time.Time { 179 | return tc.Now 180 | } 181 | 182 | scan, err := store.GetAvailableScan(tc.MinAge) 183 | if !errors.Is(err, tc.WantErr) { 184 | t.Fatal(err) 185 | } 186 | 187 | if !reflect.DeepEqual(scan, tc.WantScan) { 188 | t.Log(scan) 189 | t.Log(tc.WantScan) 190 | t.Errorf("Scan does not match") 191 | } 192 | }) 193 | } 194 | } 195 | 196 | func TestDelete(t *testing.T) { 197 | type Test struct { 198 | Name string 199 | GiveScans []autoscan.Scan 200 | GiveDelete autoscan.Scan 201 | WantScans []autoscan.Scan 202 | } 203 | 204 | var testCases = []Test{ 205 | { 206 | Name: "Only deletes specific folder, not other folders", 207 | GiveScans: []autoscan.Scan{ 208 | {Folder: "1"}, 209 | {Folder: "2"}, 210 | }, 211 | GiveDelete: autoscan.Scan{ 212 | Folder: "1", 213 | }, 214 | WantScans: []autoscan.Scan{ 215 | {Folder: "2"}, 216 | }, 217 | }, 218 | } 219 | 220 | for _, tc := range testCases { 221 | t.Run(tc.Name, func(t *testing.T) { 222 | store := getDatastore(t) 223 | err := store.Upsert(tc.GiveScans) 224 | if err != nil { 225 | t.Fatal(err) 226 | } 227 | 228 | err = store.Delete(tc.GiveDelete) 229 | if err != nil { 230 | t.Fatal(err) 231 | } 232 | 233 | scans, err := store.GetAll() 234 | if err != nil { 235 | t.Fatal(err) 236 | } 237 | 238 | if !reflect.DeepEqual(scans, tc.WantScans) { 239 | t.Log(scans) 240 | t.Errorf("Scans do not match") 241 | } 242 | }) 243 | } 244 | } 245 | -------------------------------------------------------------------------------- /triggers/bernard/paths.go: -------------------------------------------------------------------------------- 1 | package bernard 2 | 3 | import ( 4 | "fmt" 5 | "path/filepath" 6 | 7 | "github.com/l3uddz/bernard" 8 | "github.com/l3uddz/bernard/datastore" 9 | "github.com/l3uddz/bernard/datastore/sqlite" 10 | ) 11 | 12 | type Paths struct { 13 | NewFolders []string 14 | OldFolders []string 15 | } 16 | 17 | func NewPathsHook(driveID string, store *bds, diff *sqlite.Difference) (bernard.Hook, *Paths) { 18 | var paths Paths 19 | 20 | hook := func(drive datastore.Drive, files []datastore.File, folders []datastore.Folder, removed []string) error { 21 | // get folders from diff (that we are interested in) 22 | parents, err := getDiffFolders(store, driveID, diff) 23 | if err != nil { 24 | return fmt.Errorf("getting parents: %w", err) 25 | } 26 | 27 | // get roots from folders 28 | rootNewFolders, _ := datastore.RootFolders(parents.New) 29 | rootOldFolders, _ := datastore.RootFolders(parents.Old) 30 | 31 | // get new/changed paths 32 | for _, folder := range rootNewFolders { 33 | p, err := getFolderPath(store, driveID, folder.ID, parents.FolderMaps.Current) 34 | if err != nil { 35 | return fmt.Errorf("building folder path: %v: %w", folder.ID, err) 36 | } 37 | 38 | paths.NewFolders = append(paths.NewFolders, p) 39 | } 40 | 41 | // get removed paths 42 | for _, folder := range rootOldFolders { 43 | p, err := getFolderPath(store, driveID, folder.ID, parents.FolderMaps.Old) 44 | if err != nil { 45 | return fmt.Errorf("building old folder path: %v: %w", folder.ID, err) 46 | } 47 | 48 | paths.OldFolders = append(paths.OldFolders, p) 49 | } 50 | 51 | return nil 52 | } 53 | 54 | return hook, &paths 55 | } 56 | 57 | type diffFolderMaps struct { 58 | Current map[string]datastore.Folder 59 | Old map[string]datastore.Folder 60 | } 61 | 62 | func getDiffFolderMaps(diff *sqlite.Difference) *diffFolderMaps { 63 | currentFolders := make(map[string]datastore.Folder) 64 | oldFolders := make(map[string]datastore.Folder) 65 | 66 | for i, f := range diff.AddedFolders { 67 | currentFolders[f.ID] = diff.AddedFolders[i] 68 | oldFolders[f.ID] = diff.AddedFolders[i] 69 | } 70 | 71 | for i, f := range diff.ChangedFolders { 72 | currentFolders[f.New.ID] = diff.ChangedFolders[i].New 73 | oldFolders[f.Old.ID] = diff.ChangedFolders[i].Old 74 | } 75 | 76 | for i, f := range diff.RemovedFolders { 77 | oldFolders[f.ID] = diff.RemovedFolders[i] 78 | } 79 | 80 | return &diffFolderMaps{ 81 | Current: currentFolders, 82 | Old: oldFolders, 83 | } 84 | } 85 | 86 | type Parents struct { 87 | New []datastore.Folder 88 | Old []datastore.Folder 89 | FolderMaps *diffFolderMaps 90 | } 91 | 92 | func getDiffFolders(store *bds, driveId string, diff *sqlite.Difference) (*Parents, error) { 93 | folderMaps := getDiffFolderMaps(diff) 94 | 95 | newParents := make(map[string]datastore.Folder) 96 | oldParents := make(map[string]datastore.Folder) 97 | 98 | // changed folders 99 | for _, folder := range diff.ChangedFolders { 100 | newParents[folder.New.ID] = folder.New 101 | oldParents[folder.Old.ID] = folder.Old 102 | } 103 | 104 | // removed folders 105 | for _, folder := range diff.RemovedFolders { 106 | oldParents[folder.ID] = folder 107 | } 108 | 109 | // added files 110 | for _, file := range diff.AddedFiles { 111 | folder, err := getFolder(store, driveId, file.Parent, folderMaps.Current) 112 | if err != nil { 113 | return nil, fmt.Errorf("added file: %w", err) 114 | } 115 | 116 | newParents[folder.ID] = *folder 117 | } 118 | 119 | // changed files 120 | for _, file := range diff.ChangedFiles { 121 | // current 122 | currentFolder, err := getFolder(store, driveId, file.New.Parent, folderMaps.Current) 123 | if err != nil { 124 | return nil, fmt.Errorf("changed new file: %w", err) 125 | } 126 | 127 | newParents[currentFolder.ID] = *currentFolder 128 | 129 | // old 130 | oldFolder, err := getFolder(store, driveId, file.Old.Parent, folderMaps.Old) 131 | if err != nil { 132 | return nil, fmt.Errorf("changed old file: %w", err) 133 | } 134 | 135 | oldParents[oldFolder.ID] = *oldFolder 136 | } 137 | 138 | // removed files 139 | for _, file := range diff.RemovedFiles { 140 | oldFolder, err := getFolder(store, driveId, file.Parent, folderMaps.Old) 141 | if err != nil { 142 | return nil, fmt.Errorf("removed file: %w", err) 143 | } 144 | 145 | oldParents[oldFolder.ID] = *oldFolder 146 | } 147 | 148 | // create Parents object 149 | p := &Parents{ 150 | New: make([]datastore.Folder, 0), 151 | Old: make([]datastore.Folder, 0), 152 | FolderMaps: folderMaps, 153 | } 154 | 155 | for _, folder := range newParents { 156 | p.New = append(p.New, folder) 157 | } 158 | 159 | for _, folder := range oldParents { 160 | p.Old = append(p.Old, folder) 161 | } 162 | 163 | return p, nil 164 | } 165 | 166 | func getFolder(store *bds, driveId string, folderId string, folderMap map[string]datastore.Folder) (*datastore.Folder, error) { 167 | // find folder in map 168 | if folder, ok := folderMap[folderId]; ok { 169 | return &folder, nil 170 | } 171 | 172 | if folderId == driveId { 173 | folder := datastore.Folder{ 174 | ID: driveId, 175 | Name: "", 176 | Parent: "", 177 | Trashed: false, 178 | } 179 | 180 | folderMap[driveId] = folder 181 | return &folder, nil 182 | } 183 | 184 | // search datastore 185 | folder, err := store.GetFolder(driveId, folderId) 186 | if err != nil { 187 | return nil, fmt.Errorf("could not get folder: %v: %w", folderId, err) 188 | } 189 | 190 | // add folder to map 191 | folderMap[folder.ID] = *folder 192 | 193 | return folder, nil 194 | } 195 | 196 | func getFolderPath(store *bds, driveId string, folderId string, folderMap map[string]datastore.Folder) (string, error) { 197 | path := "" 198 | 199 | // folderId == driveId 200 | if folderId == driveId { 201 | return "/", nil 202 | } 203 | 204 | // get top folder 205 | topFolder, ok := folderMap[folderId] 206 | if !ok { 207 | f, err := store.GetFolder(driveId, folderId) 208 | if err != nil { 209 | return filepath.Join("/", path), fmt.Errorf("could not get folder %v: %w", folderId, err) 210 | } 211 | 212 | topFolder = *f 213 | } 214 | 215 | // set logic variables 216 | path = topFolder.Name 217 | nextFolderId := topFolder.Parent 218 | 219 | // get folder paths 220 | for nextFolderId != "" && nextFolderId != driveId { 221 | f, ok := folderMap[nextFolderId] 222 | if !ok { 223 | df, err := store.GetFolder(driveId, nextFolderId) 224 | if err != nil { 225 | return filepath.Join("/", path), fmt.Errorf("could not get folder %v: %w", nextFolderId, err) 226 | } 227 | 228 | f = *df 229 | folderMap[f.ID] = f 230 | } 231 | 232 | path = filepath.Join(f.Name, path) 233 | nextFolderId = f.Parent 234 | } 235 | 236 | return filepath.Join("/", path), nil 237 | } 238 | -------------------------------------------------------------------------------- /triggers/inotify/inotify.go: -------------------------------------------------------------------------------- 1 | package inotify 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path/filepath" 7 | "strings" 8 | "sync" 9 | "time" 10 | 11 | "github.com/fsnotify/fsnotify" 12 | "github.com/rs/zerolog" 13 | 14 | "github.com/saltydk/autoscan" 15 | ) 16 | 17 | type Config struct { 18 | Priority int `yaml:"priority"` 19 | Verbosity string `yaml:"verbosity"` 20 | Rewrite []autoscan.Rewrite `yaml:"rewrite"` 21 | Include []string `yaml:"include"` 22 | Exclude []string `yaml:"exclude"` 23 | Paths []struct { 24 | Path string `yaml:"path"` 25 | Rewrite []autoscan.Rewrite `yaml:"rewrite"` 26 | Include []string `yaml:"include"` 27 | Exclude []string `yaml:"exclude"` 28 | } `yaml:"paths"` 29 | } 30 | 31 | type daemon struct { 32 | callback autoscan.ProcessorFunc 33 | paths []path 34 | watcher *fsnotify.Watcher 35 | queue *queue 36 | log zerolog.Logger 37 | } 38 | 39 | type path struct { 40 | Path string 41 | Rewriter autoscan.Rewriter 42 | Allowed autoscan.Filterer 43 | } 44 | 45 | func New(c Config) (autoscan.Trigger, error) { 46 | l := autoscan.GetLogger(c.Verbosity).With(). 47 | Str("trigger", "inotify"). 48 | Logger() 49 | 50 | var paths []path 51 | for _, p := range c.Paths { 52 | p := p 53 | 54 | rewriter, err := autoscan.NewRewriter(append(p.Rewrite, c.Rewrite...)) 55 | if err != nil { 56 | return nil, err 57 | } 58 | 59 | filterer, err := autoscan.NewFilterer(append(p.Include, c.Include...), append(p.Exclude, c.Exclude...)) 60 | if err != nil { 61 | return nil, err 62 | } 63 | 64 | paths = append(paths, path{ 65 | Path: p.Path, 66 | Rewriter: rewriter, 67 | Allowed: filterer, 68 | }) 69 | } 70 | 71 | trigger := func(callback autoscan.ProcessorFunc) { 72 | d := daemon{ 73 | log: l, 74 | callback: callback, 75 | paths: paths, 76 | queue: newQueue(callback, l, c.Priority), 77 | } 78 | 79 | // start job(s) 80 | if err := d.startMonitoring(); err != nil { 81 | l.Error(). 82 | Err(err). 83 | Msg("Failed initialising jobs") 84 | return 85 | } 86 | } 87 | 88 | return trigger, nil 89 | } 90 | 91 | func (d *daemon) startMonitoring() error { 92 | // create watcher 93 | watcher, err := fsnotify.NewWatcher() 94 | if err != nil { 95 | return fmt.Errorf("create watcher: %w", err) 96 | } 97 | d.watcher = watcher 98 | 99 | // setup watcher 100 | for _, p := range d.paths { 101 | if err := filepath.Walk(p.Path, d.walkFunc); err != nil { 102 | _ = d.watcher.Close() 103 | return err 104 | } 105 | } 106 | 107 | // start worker 108 | go d.worker() 109 | 110 | return nil 111 | } 112 | 113 | func (d *daemon) walkFunc(path string, fi os.FileInfo, err error) error { 114 | // handle error 115 | if err != nil { 116 | return fmt.Errorf("walk func: %v: %w", path, err) 117 | } 118 | 119 | // ignore non-directory 120 | if !fi.Mode().IsDir() { 121 | return nil 122 | } 123 | 124 | if err := d.watcher.Add(path); err != nil { 125 | return fmt.Errorf("watch directory: %v: %w", path, err) 126 | } 127 | 128 | d.log.Trace(). 129 | Str("path", path). 130 | Msg("Watching directory") 131 | 132 | return nil 133 | } 134 | 135 | func (d *daemon) getPathObject(path string) (*path, error) { 136 | for _, p := range d.paths { 137 | if strings.HasPrefix(path, p.Path) { 138 | return &p, nil 139 | } 140 | } 141 | 142 | return nil, fmt.Errorf("path object not found: %v", path) 143 | } 144 | 145 | func (d *daemon) worker() { 146 | // close watcher 147 | defer d.watcher.Close() 148 | 149 | // process events 150 | for { 151 | select { 152 | case event := <-d.watcher.Events: 153 | // new filesystem event 154 | d.log.Trace(). 155 | Interface("event", event). 156 | Msg("Filesystem event") 157 | 158 | switch { 159 | case event.Op&fsnotify.Create == fsnotify.Create: 160 | // create 161 | fi, err := os.Stat(event.Name) 162 | if err != nil { 163 | d.log.Error(). 164 | Err(err). 165 | Str("path", event.Name). 166 | Msg("Failed retrieving filesystem info") 167 | continue 168 | } 169 | 170 | // watch new directories 171 | if fi.IsDir() { 172 | if err := filepath.Walk(event.Name, d.walkFunc); err != nil { 173 | d.log.Error(). 174 | Err(err). 175 | Str("path", event.Name). 176 | Msg("Failed watching new directory") 177 | } 178 | 179 | continue 180 | } 181 | 182 | case event.Op&fsnotify.Rename == fsnotify.Rename, event.Op&fsnotify.Remove == fsnotify.Remove: 183 | // renamed / removed 184 | default: 185 | // ignore this event 186 | continue 187 | } 188 | 189 | // get path object 190 | p, err := d.getPathObject(event.Name) 191 | if err != nil { 192 | d.log.Error(). 193 | Err(err). 194 | Str("path", event.Name). 195 | Msg("Failed determining path object") 196 | continue 197 | } 198 | 199 | // rewrite 200 | rewritten := p.Rewriter(event.Name) 201 | 202 | // filter 203 | if !p.Allowed(rewritten) { 204 | continue 205 | } 206 | 207 | // get directory where path has an extension 208 | if filepath.Ext(rewritten) != "" { 209 | // there was most likely a file extension, use the directory 210 | rewritten = filepath.Dir(rewritten) 211 | } 212 | 213 | // move to queue 214 | d.queue.inputs <- rewritten 215 | 216 | case err := <-d.watcher.Errors: 217 | d.log.Error(). 218 | Err(err). 219 | Msg("Failed receiving filesystem events") 220 | } 221 | } 222 | } 223 | 224 | type queue struct { 225 | callback autoscan.ProcessorFunc 226 | log zerolog.Logger 227 | priority int 228 | inputs chan string 229 | scans map[string]time.Time 230 | lock *sync.Mutex 231 | } 232 | 233 | func newQueue(cb autoscan.ProcessorFunc, log zerolog.Logger, priority int) *queue { 234 | q := &queue{ 235 | callback: cb, 236 | log: log, 237 | priority: priority, 238 | inputs: make(chan string), 239 | scans: make(map[string]time.Time), 240 | lock: &sync.Mutex{}, 241 | } 242 | 243 | go q.worker() 244 | 245 | return q 246 | } 247 | 248 | func (q *queue) add(path string) { 249 | // acquire lock 250 | q.lock.Lock() 251 | defer q.lock.Unlock() 252 | 253 | // queue scan task 254 | q.scans[path] = time.Now().Add(10 * time.Second) 255 | } 256 | 257 | func (q *queue) worker() { 258 | for { 259 | select { 260 | case path, ok := <-q.inputs: 261 | if !ok { 262 | // channel closed 263 | return 264 | } 265 | 266 | // add path to queue 267 | q.add(path) 268 | 269 | default: 270 | // process queue 271 | q.process() 272 | } 273 | } 274 | } 275 | 276 | func (q *queue) process() { 277 | // acquire lock 278 | q.lock.Lock() 279 | defer q.lock.Unlock() 280 | 281 | // sleep if no scans queued 282 | if len(q.scans) == 0 { 283 | time.Sleep(100 * time.Millisecond) 284 | return 285 | } 286 | 287 | // move scans to processor 288 | for p, t := range q.scans { 289 | // time has not elapsed 290 | if time.Now().Before(t) { 291 | continue 292 | } 293 | 294 | // move to processor 295 | err := q.callback(autoscan.Scan{ 296 | Folder: filepath.Clean(p), 297 | Priority: q.priority, 298 | Time: time.Now(), 299 | }) 300 | 301 | if err != nil { 302 | q.log.Error(). 303 | Err(err). 304 | Str("path", p). 305 | Msg("Failed moving scan to processor") 306 | } else { 307 | q.log.Info(). 308 | Str("path", p). 309 | Msg("Scan moved to processor") 310 | } 311 | 312 | // remove queued scan 313 | delete(q.scans, p) 314 | } 315 | } 316 | -------------------------------------------------------------------------------- /triggers/bernard/bernard.go: -------------------------------------------------------------------------------- 1 | package bernard 2 | 3 | import ( 4 | "database/sql" 5 | "errors" 6 | "fmt" 7 | "path/filepath" 8 | "time" 9 | 10 | lowe "github.com/l3uddz/bernard" 11 | ds "github.com/l3uddz/bernard/datastore" 12 | "github.com/l3uddz/bernard/datastore/sqlite" 13 | "github.com/m-rots/stubbs" 14 | "github.com/robfig/cron/v3" 15 | "github.com/rs/zerolog" 16 | 17 | "github.com/saltydk/autoscan" 18 | ) 19 | 20 | const ( 21 | maxSyncRetries = 5 22 | ) 23 | 24 | type Config struct { 25 | AccountPath string `yaml:"account"` 26 | CronSchedule string `yaml:"cron"` 27 | Priority int `yaml:"priority"` 28 | TimeOffset time.Duration `yaml:"time-offset"` 29 | Verbosity string `yaml:"verbosity"` 30 | Rewrite []autoscan.Rewrite `yaml:"rewrite"` 31 | Include []string `yaml:"include"` 32 | Exclude []string `yaml:"exclude"` 33 | Drives []struct { 34 | ID string `yaml:"id"` 35 | TimeOffset time.Duration `yaml:"time-offset"` 36 | Rewrite []autoscan.Rewrite `yaml:"rewrite"` 37 | Include []string `yaml:"include"` 38 | Exclude []string `yaml:"exclude"` 39 | } `yaml:"drives"` 40 | } 41 | 42 | func New(c Config, db *sql.DB) (autoscan.Trigger, error) { 43 | l := autoscan.GetLogger(c.Verbosity).With(). 44 | Str("trigger", "bernard"). 45 | Logger() 46 | 47 | const scope = "https://www.googleapis.com/auth/drive.readonly" 48 | auth, err := stubbs.FromFile(c.AccountPath, []string{scope}) 49 | if err != nil { 50 | return nil, fmt.Errorf("%v: %w", err, autoscan.ErrFatal) 51 | } 52 | 53 | store, err := sqlite.FromDB(db) 54 | if err != nil { 55 | return nil, fmt.Errorf("%v: %w", err, autoscan.ErrFatal) 56 | } 57 | 58 | limiter, err := getRateLimiter(auth.Email()) 59 | if err != nil { 60 | return nil, fmt.Errorf("%v: %w", err, autoscan.ErrFatal) 61 | } 62 | 63 | bernard := lowe.New(auth, store, 64 | lowe.WithPreRequestHook(limiter.Wait), 65 | lowe.WithSafeSleep(120*time.Second)) 66 | 67 | var drives []drive 68 | for _, d := range c.Drives { 69 | d := d 70 | 71 | rewriter, err := autoscan.NewRewriter(append(d.Rewrite, c.Rewrite...)) 72 | if err != nil { 73 | return nil, err 74 | } 75 | 76 | filterer, err := autoscan.NewFilterer(append(d.Include, c.Include...), append(d.Exclude, c.Exclude...)) 77 | if err != nil { 78 | return nil, err 79 | } 80 | 81 | scanTime := func() time.Time { 82 | if d.TimeOffset.Seconds() > 0 { 83 | return time.Now().Add(d.TimeOffset) 84 | } 85 | return time.Now().Add(c.TimeOffset) 86 | } 87 | 88 | drives = append(drives, drive{ 89 | ID: d.ID, 90 | Rewriter: rewriter, 91 | Allowed: filterer, 92 | ScanTime: scanTime, 93 | }) 94 | } 95 | 96 | trigger := func(callback autoscan.ProcessorFunc) { 97 | d := daemon{ 98 | log: l, 99 | callback: callback, 100 | cronSchedule: c.CronSchedule, 101 | priority: c.Priority, 102 | drives: drives, 103 | bernard: bernard, 104 | store: &bds{store}, 105 | limiter: limiter, 106 | } 107 | 108 | // start job(s) 109 | if err := d.startAutoSync(); err != nil { 110 | l.Error(). 111 | Err(err). 112 | Msg("Failed initialising cron jobs") 113 | return 114 | } 115 | } 116 | 117 | return trigger, nil 118 | } 119 | 120 | type drive struct { 121 | ID string 122 | Rewriter autoscan.Rewriter 123 | Allowed autoscan.Filterer 124 | ScanTime func() time.Time 125 | } 126 | 127 | type daemon struct { 128 | callback autoscan.ProcessorFunc 129 | cronSchedule string 130 | priority int 131 | drives []drive 132 | bernard *lowe.Bernard 133 | store *bds 134 | log zerolog.Logger 135 | limiter *rateLimiter 136 | } 137 | 138 | type syncJob struct { 139 | log zerolog.Logger 140 | attempts int 141 | errors []error 142 | 143 | cron *cron.Cron 144 | jobID cron.EntryID 145 | fn func() error 146 | } 147 | 148 | func (s *syncJob) Run() { 149 | // increase attempt counter 150 | s.attempts++ 151 | 152 | // run job 153 | err := s.fn() 154 | 155 | // handle job response 156 | switch { 157 | case err == nil: 158 | // job completed successfully 159 | s.attempts = 0 160 | s.errors = s.errors[:0] 161 | return 162 | 163 | case errors.Is(err, lowe.ErrInvalidCredentials), errors.Is(err, ds.ErrDataAnomaly), errors.Is(err, lowe.ErrNetwork): 164 | //retryable error occurred 165 | s.log.Trace(). 166 | Err(err). 167 | Int("attempts", s.attempts). 168 | Msg("Retryable error occurred while syncing drive") 169 | s.errors = append(s.errors, err) 170 | 171 | case errors.Is(err, autoscan.ErrFatal): 172 | // fatal error occurred, we cannot recover from this safely 173 | s.log.Error(). 174 | Err(err). 175 | Msg("Fatal error occurred while syncing drive, drive has been stopped...") 176 | 177 | s.cron.Remove(s.jobID) 178 | return 179 | 180 | case err != nil: 181 | // an un-expected/un-handled error occurred, this should be retryable with the same retry logic 182 | s.log.Warn(). 183 | Err(err). 184 | Int("attempts", s.attempts). 185 | Msg("Unexpected error occurred while syncing drive") 186 | s.errors = append(s.errors, err) 187 | } 188 | 189 | // abort if max retries reached 190 | if s.attempts >= maxSyncRetries { 191 | s.log.Error(). 192 | Errs("error", s.errors). 193 | Int("attempts", s.attempts). 194 | Msg("Consecutive errors occurred while syncing drive, drive has been stopped...") 195 | 196 | s.cron.Remove(s.jobID) 197 | } 198 | } 199 | 200 | func newSyncJob(c *cron.Cron, log zerolog.Logger, job func() error) *syncJob { 201 | return &syncJob{ 202 | log: log, 203 | attempts: 0, 204 | errors: make([]error, 0), 205 | cron: c, 206 | fn: job, 207 | } 208 | } 209 | 210 | func (d daemon) startAutoSync() error { 211 | c := cron.New() 212 | 213 | for _, drive := range d.drives { 214 | drive := drive 215 | fullSync := false 216 | l := d.withDriveLog(drive.ID) 217 | 218 | // full sync required? 219 | _, err := d.store.PageToken(drive.ID) 220 | switch { 221 | case errors.Is(err, ds.ErrFullSync): 222 | fullSync = true 223 | case err != nil: 224 | return fmt.Errorf("%v: determining if full sync required: %v: %w", 225 | drive.ID, err, autoscan.ErrFatal) 226 | } 227 | 228 | // create job 229 | job := newSyncJob(c, l, func() error { 230 | // acquire lock 231 | if err := d.limiter.Acquire(1); err != nil { 232 | return fmt.Errorf("%v: acquiring sync semaphore: %v: %w", 233 | drive.ID, err, autoscan.ErrFatal) 234 | } 235 | defer d.limiter.Release(1) 236 | 237 | // full sync 238 | if fullSync { 239 | l.Info().Msg("Starting full sync") 240 | start := time.Now() 241 | 242 | if err := d.bernard.FullSync(drive.ID); err != nil { 243 | return fmt.Errorf("%v: performing full sync: %w", drive.ID, err) 244 | } 245 | 246 | l.Info().Msgf("Finished full sync in %s", time.Since(start)) 247 | fullSync = false 248 | return nil 249 | } 250 | 251 | // create partial sync 252 | dh, diff := d.store.NewDifferencesHook() 253 | ph := NewPostProcessBernardDiff(drive.ID, d.store, diff) 254 | ch, paths := NewPathsHook(drive.ID, d.store, diff) 255 | 256 | l.Trace().Msg("Running partial sync") 257 | start := time.Now() 258 | 259 | // do partial sync 260 | err := d.bernard.PartialSync(drive.ID, dh, ph, ch) 261 | if err != nil { 262 | return fmt.Errorf("%v: performing partial sync: %w", drive.ID, err) 263 | } 264 | 265 | l.Trace(). 266 | Int("new", len(paths.NewFolders)). 267 | Int("old", len(paths.OldFolders)). 268 | Msgf("Partial sync finished in %s", time.Since(start)) 269 | 270 | // translate paths to scan task 271 | task := d.getScanTask(&(drive), paths) 272 | 273 | // move scans to processor 274 | if len(task.scans) > 0 { 275 | l.Trace(). 276 | Interface("scans", task.scans). 277 | Msg("Scans moving to processor") 278 | 279 | err := d.callback(task.scans...) 280 | if err != nil { 281 | return fmt.Errorf("%v: moving scans to processor: %v: %w", 282 | drive.ID, err, autoscan.ErrFatal) 283 | } 284 | 285 | l.Info(). 286 | Int("added", task.added). 287 | Int("removed", task.removed). 288 | Msg("Scan moved to processor") 289 | } 290 | 291 | return nil 292 | }) 293 | 294 | id, err := c.AddJob(d.cronSchedule, cron.NewChain(cron.SkipIfStillRunning(cron.DiscardLogger)).Then(job)) 295 | if err != nil { 296 | return fmt.Errorf("%v: creating auto sync job for drive: %w", drive.ID, err) 297 | } 298 | 299 | job.jobID = id 300 | } 301 | 302 | c.Start() 303 | return nil 304 | } 305 | 306 | type scanTask struct { 307 | scans []autoscan.Scan 308 | added int 309 | removed int 310 | } 311 | 312 | func (d daemon) getScanTask(drive *drive, paths *Paths) *scanTask { 313 | pathMap := make(map[string]int) 314 | task := &scanTask{ 315 | scans: make([]autoscan.Scan, 0), 316 | added: 0, 317 | removed: 0, 318 | } 319 | 320 | for _, p := range paths.NewFolders { 321 | // rewrite path 322 | rewritten := drive.Rewriter(p) 323 | 324 | // check if path already seen 325 | if _, ok := pathMap[rewritten]; ok { 326 | // already a scan task present 327 | continue 328 | } else { 329 | pathMap[rewritten] = 1 330 | } 331 | 332 | // is this path allowed? 333 | if !drive.Allowed(rewritten) { 334 | continue 335 | } 336 | 337 | // add scan task 338 | task.scans = append(task.scans, autoscan.Scan{ 339 | Folder: filepath.Clean(rewritten), 340 | Priority: d.priority, 341 | Time: drive.ScanTime(), 342 | }) 343 | 344 | task.added++ 345 | } 346 | 347 | for _, p := range paths.OldFolders { 348 | // rewrite path 349 | rewritten := drive.Rewriter(p) 350 | 351 | // check if path already seen 352 | if _, ok := pathMap[rewritten]; ok { 353 | // already a scan task present 354 | continue 355 | } else { 356 | pathMap[rewritten] = 1 357 | } 358 | 359 | // is this path allowed? 360 | if !drive.Allowed(rewritten) { 361 | continue 362 | } 363 | 364 | // add scan task 365 | task.scans = append(task.scans, autoscan.Scan{ 366 | Folder: filepath.Clean(rewritten), 367 | Priority: d.priority, 368 | Time: drive.ScanTime(), 369 | }) 370 | 371 | task.removed++ 372 | } 373 | 374 | return task 375 | } 376 | 377 | func (d daemon) withDriveLog(driveID string) zerolog.Logger { 378 | drive, err := d.store.GetDrive(driveID) 379 | if err != nil { 380 | return d.log.With().Str("drive_id", driveID).Logger() 381 | } 382 | 383 | return d.log.With().Str("drive_id", driveID).Str("drive_name", drive.Name).Logger() 384 | } 385 | -------------------------------------------------------------------------------- /cmd/autoscan/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "database/sql" 5 | "errors" 6 | "fmt" 7 | "io" 8 | "net/http" 9 | "os" 10 | "path/filepath" 11 | "strings" 12 | "time" 13 | 14 | "github.com/alecthomas/kong" 15 | "github.com/natefinch/lumberjack" 16 | "github.com/rs/zerolog" 17 | "github.com/rs/zerolog/log" 18 | "gopkg.in/yaml.v2" 19 | 20 | "github.com/saltydk/autoscan" 21 | "github.com/saltydk/autoscan/migrate" 22 | "github.com/saltydk/autoscan/processor" 23 | ast "github.com/saltydk/autoscan/targets/autoscan" 24 | "github.com/saltydk/autoscan/targets/emby" 25 | "github.com/saltydk/autoscan/targets/jellyfin" 26 | "github.com/saltydk/autoscan/targets/plex" 27 | "github.com/saltydk/autoscan/triggers/a_train" 28 | "github.com/saltydk/autoscan/triggers/bernard" 29 | "github.com/saltydk/autoscan/triggers/inotify" 30 | "github.com/saltydk/autoscan/triggers/lidarr" 31 | "github.com/saltydk/autoscan/triggers/manual" 32 | "github.com/saltydk/autoscan/triggers/radarr" 33 | "github.com/saltydk/autoscan/triggers/readarr" 34 | "github.com/saltydk/autoscan/triggers/sonarr" 35 | 36 | // sqlite3 driver 37 | _ "modernc.org/sqlite" 38 | ) 39 | 40 | type config struct { 41 | // General configuration 42 | Host []string `yaml:"host"` 43 | Port int `yaml:"port"` 44 | MinimumAge time.Duration `yaml:"minimum-age"` 45 | ScanDelay time.Duration `yaml:"scan-delay"` 46 | ScanStats time.Duration `yaml:"scan-stats"` 47 | Anchors []string `yaml:"anchors"` 48 | 49 | // Authentication for autoscan.HTTPTrigger 50 | Auth struct { 51 | Username string `yaml:"username"` 52 | Password string `yaml:"password"` 53 | } `yaml:"authentication"` 54 | 55 | // autoscan.HTTPTrigger 56 | Triggers struct { 57 | Manual manual.Config `yaml:"manual"` 58 | ATrain a_train.Config `yaml:"a-train"` 59 | Bernard []bernard.Config `yaml:"bernard"` 60 | Inotify []inotify.Config `yaml:"inotify"` 61 | Lidarr []lidarr.Config `yaml:"lidarr"` 62 | Radarr []radarr.Config `yaml:"radarr"` 63 | Readarr []readarr.Config `yaml:"readarr"` 64 | Sonarr []sonarr.Config `yaml:"sonarr"` 65 | } `yaml:"triggers"` 66 | 67 | // autoscan.Target 68 | Targets struct { 69 | Autoscan []ast.Config `yaml:"autoscan"` 70 | Emby []emby.Config `yaml:"emby"` 71 | Jellyfin []jellyfin.Config `yaml:"jellyfin"` 72 | Plex []plex.Config `yaml:"plex"` 73 | } `yaml:"targets"` 74 | } 75 | 76 | var ( 77 | // release variables 78 | Version string 79 | Timestamp string 80 | GitCommit string 81 | 82 | // CLI 83 | cli struct { 84 | globals 85 | 86 | // flags 87 | Config string `type:"path" default:"${config_file}" env:"AUTOSCAN_CONFIG" help:"Config file path"` 88 | Database string `type:"path" default:"${database_file}" env:"AUTOSCAN_DATABASE" help:"Database file path"` 89 | Log string `type:"path" default:"${log_file}" env:"AUTOSCAN_LOG" help:"Log file path"` 90 | Verbosity int `type:"counter" default:"0" short:"v" env:"AUTOSCAN_VERBOSITY" help:"Log level verbosity"` 91 | } 92 | ) 93 | 94 | type globals struct { 95 | Version versionFlag `name:"version" help:"Print version information and quit"` 96 | } 97 | 98 | type versionFlag string 99 | 100 | func (v versionFlag) Decode(ctx *kong.DecodeContext) error { return nil } 101 | func (v versionFlag) IsBool() bool { return true } 102 | func (v versionFlag) BeforeApply(app *kong.Kong, vars kong.Vars) error { 103 | fmt.Println(vars["version"]) 104 | app.Exit(0) 105 | return nil 106 | } 107 | 108 | func main() { 109 | // parse cli 110 | ctx := kong.Parse(&cli, 111 | kong.Name("autoscan"), 112 | kong.Description("Scan media into target media servers"), 113 | kong.UsageOnError(), 114 | kong.ConfigureHelp(kong.HelpOptions{ 115 | Summary: true, 116 | Compact: true, 117 | }), 118 | kong.Vars{ 119 | "version": fmt.Sprintf("%s (%s@%s)", Version, GitCommit, Timestamp), 120 | "config_file": filepath.Join(defaultConfigDirectory("autoscan", "config.yml"), "config.yml"), 121 | "log_file": filepath.Join(defaultConfigDirectory("autoscan", "config.yml"), "activity.log"), 122 | "database_file": filepath.Join(defaultConfigDirectory("autoscan", "config.yml"), "autoscan.db"), 123 | }, 124 | ) 125 | 126 | if err := ctx.Validate(); err != nil { 127 | fmt.Println("Failed parsing cli:", err) 128 | os.Exit(1) 129 | } 130 | 131 | // logger 132 | logger := log.Output(io.MultiWriter(zerolog.ConsoleWriter{ 133 | TimeFormat: time.Stamp, 134 | Out: os.Stderr, 135 | }, zerolog.ConsoleWriter{ 136 | TimeFormat: time.Stamp, 137 | Out: &lumberjack.Logger{ 138 | Filename: cli.Log, 139 | MaxSize: 5, 140 | MaxAge: 14, 141 | MaxBackups: 5, 142 | }, 143 | NoColor: true, 144 | })) 145 | 146 | switch { 147 | case cli.Verbosity == 1: 148 | log.Logger = logger.Level(zerolog.DebugLevel) 149 | case cli.Verbosity > 1: 150 | log.Logger = logger.Level(zerolog.TraceLevel) 151 | default: 152 | log.Logger = logger.Level(zerolog.InfoLevel) 153 | } 154 | 155 | // datastore 156 | db, err := sql.Open("sqlite", cli.Database) 157 | if err != nil { 158 | log.Fatal(). 159 | Err(err). 160 | Msg("Failed opening datastore") 161 | } 162 | db.SetMaxOpenConns(1) 163 | 164 | // config 165 | file, err := os.Open(cli.Config) 166 | if err != nil { 167 | log.Fatal(). 168 | Err(err). 169 | Msg("Failed opening config") 170 | } 171 | defer file.Close() 172 | 173 | // set default values 174 | c := config{ 175 | MinimumAge: 10 * time.Minute, 176 | ScanDelay: 5 * time.Second, 177 | ScanStats: 1 * time.Hour, 178 | Host: []string{""}, 179 | Port: 3030, 180 | } 181 | 182 | decoder := yaml.NewDecoder(file) 183 | decoder.SetStrict(true) 184 | err = decoder.Decode(&c) 185 | if err != nil { 186 | log.Fatal(). 187 | Err(err). 188 | Msg("Failed decoding config") 189 | } 190 | 191 | // migrator 192 | mg, err := migrate.New(db, "migrations") 193 | if err != nil { 194 | log.Fatal(). 195 | Err(err). 196 | Msg("Failed initialising migrator") 197 | } 198 | 199 | // processor 200 | proc, err := processor.New(processor.Config{ 201 | Anchors: c.Anchors, 202 | MinimumAge: c.MinimumAge, 203 | Db: db, 204 | Mg: mg, 205 | }) 206 | 207 | if err != nil { 208 | log.Fatal(). 209 | Err(err). 210 | Msg("Failed initialising processor") 211 | } 212 | 213 | log.Info(). 214 | Stringer("min_age", c.MinimumAge). 215 | Strs("anchors", c.Anchors). 216 | Msg("Initialised processor") 217 | 218 | // Check authentication. If no auth -> warn user. 219 | if c.Auth.Username == "" || c.Auth.Password == "" { 220 | log.Warn().Msg("Webhooks running without authentication") 221 | } 222 | 223 | // daemon triggers 224 | for _, t := range c.Triggers.Bernard { 225 | trigger, err := bernard.New(t, db) 226 | if err != nil { 227 | log.Fatal(). 228 | Err(err). 229 | Str("trigger", "bernard"). 230 | Msg("Failed initialising trigger") 231 | } 232 | 233 | go trigger(proc.Add) 234 | } 235 | 236 | for _, t := range c.Triggers.Inotify { 237 | trigger, err := inotify.New(t) 238 | if err != nil { 239 | log.Fatal(). 240 | Err(err). 241 | Str("trigger", "inotify"). 242 | Msg("Failed initialising trigger") 243 | } 244 | 245 | go trigger(proc.Add) 246 | } 247 | 248 | // http triggers 249 | router := getRouter(c, proc) 250 | 251 | for _, h := range c.Host { 252 | go func(host string) { 253 | addr := host 254 | if !strings.Contains(addr, ":") { 255 | addr = fmt.Sprintf("%s:%d", host, c.Port) 256 | } 257 | 258 | log.Info().Msgf("Starting server on %s", addr) 259 | if err := http.ListenAndServe(addr, router); err != nil { 260 | log.Fatal(). 261 | Str("addr", addr). 262 | Err(err). 263 | Msg("Failed starting web server") 264 | } 265 | }(h) 266 | } 267 | 268 | log.Info(). 269 | Int("manual", 1). 270 | Int("bernard", len(c.Triggers.Bernard)). 271 | Int("inotify", len(c.Triggers.Inotify)). 272 | Int("lidarr", len(c.Triggers.Lidarr)). 273 | Int("radarr", len(c.Triggers.Radarr)). 274 | Int("readarr", len(c.Triggers.Readarr)). 275 | Int("sonarr", len(c.Triggers.Sonarr)). 276 | Msg("Initialised triggers") 277 | 278 | // targets 279 | targets := make([]autoscan.Target, 0) 280 | 281 | for _, t := range c.Targets.Autoscan { 282 | tp, err := ast.New(t) 283 | if err != nil { 284 | log.Fatal(). 285 | Err(err). 286 | Str("target", "autoscan"). 287 | Str("target_url", t.URL). 288 | Msg("Failed initialising target") 289 | } 290 | 291 | targets = append(targets, tp) 292 | } 293 | 294 | for _, t := range c.Targets.Plex { 295 | tp, err := plex.New(t) 296 | if err != nil { 297 | log.Fatal(). 298 | Err(err). 299 | Str("target", "plex"). 300 | Str("target_url", t.URL). 301 | Msg("Failed initialising target") 302 | } 303 | 304 | targets = append(targets, tp) 305 | } 306 | 307 | for _, t := range c.Targets.Emby { 308 | tp, err := emby.New(t) 309 | if err != nil { 310 | log.Fatal(). 311 | Err(err). 312 | Str("target", "emby"). 313 | Str("target_url", t.URL). 314 | Msg("Failed initialising target") 315 | } 316 | 317 | targets = append(targets, tp) 318 | } 319 | 320 | for _, t := range c.Targets.Jellyfin { 321 | tp, err := jellyfin.New(t) 322 | if err != nil { 323 | log.Fatal(). 324 | Err(err). 325 | Str("target", "jellyfin"). 326 | Str("target_url", t.URL). 327 | Msg("Failed initialising target") 328 | } 329 | 330 | targets = append(targets, tp) 331 | } 332 | 333 | log.Info(). 334 | Int("autoscan", len(c.Targets.Autoscan)). 335 | Int("plex", len(c.Targets.Plex)). 336 | Int("emby", len(c.Targets.Emby)). 337 | Int("jellyfin", len(c.Targets.Jellyfin)). 338 | Msg("Initialised targets") 339 | 340 | // scan stats 341 | if c.ScanStats.Seconds() > 0 { 342 | go scanStats(proc, c.ScanStats) 343 | } 344 | 345 | // display initialised banner 346 | log.Info(). 347 | Str("version", fmt.Sprintf("%s (%s@%s)", Version, GitCommit, Timestamp)). 348 | Msg("Initialised") 349 | 350 | // processor 351 | log.Info().Msg("Processor started") 352 | 353 | targetsAvailable := false 354 | targetsSize := len(targets) 355 | for { 356 | // sleep indefinitely when no targets setup 357 | if targetsSize == 0 { 358 | log.Warn().Msg("No targets initialised, processor stopped, triggers will continue...") 359 | select {} 360 | } 361 | 362 | // target availability checker 363 | if !targetsAvailable { 364 | err = proc.CheckAvailability(targets) 365 | switch { 366 | case err == nil: 367 | targetsAvailable = true 368 | case errors.Is(err, autoscan.ErrFatal): 369 | log.Error(). 370 | Err(err). 371 | Msg("Fatal error occurred while checking target availability, processor stopped, triggers will continue...") 372 | 373 | // sleep indefinitely 374 | select {} 375 | default: 376 | log.Error(). 377 | Err(err). 378 | Msg("Not all targets are available, retrying in 15 seconds...") 379 | 380 | time.Sleep(15 * time.Second) 381 | continue 382 | } 383 | } 384 | 385 | // process scans 386 | err = proc.Process(targets) 387 | switch { 388 | case err == nil: 389 | // Sleep scan-delay between successful requests to reduce the load on targets. 390 | time.Sleep(c.ScanDelay) 391 | 392 | case errors.Is(err, autoscan.ErrNoScans): 393 | // No scans currently available, let's wait a couple of seconds 394 | log.Trace(). 395 | Msg("No scans are available, retrying in 15 seconds...") 396 | 397 | time.Sleep(15 * time.Second) 398 | 399 | case errors.Is(err, autoscan.ErrAnchorUnavailable): 400 | log.Error(). 401 | Err(err). 402 | Msg("Not all anchor files are available, retrying in 15 seconds...") 403 | 404 | time.Sleep(15 * time.Second) 405 | 406 | case errors.Is(err, autoscan.ErrTargetUnavailable): 407 | targetsAvailable = false 408 | log.Error(). 409 | Err(err). 410 | Msg("Not all targets are available, retrying in 15 seconds...") 411 | 412 | time.Sleep(15 * time.Second) 413 | 414 | case errors.Is(err, autoscan.ErrFatal): 415 | // fatal error occurred, processor must stop (however, triggers must not) 416 | log.Error(). 417 | Err(err). 418 | Msg("Fatal error occurred while processing targets, processor stopped, triggers will continue...") 419 | 420 | // sleep indefinitely 421 | select {} 422 | 423 | default: 424 | // unexpected error 425 | log.Fatal(). 426 | Err(err). 427 | Msg("Failed processing targets") 428 | } 429 | } 430 | } 431 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= 2 | github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= 3 | github.com/alecthomas/kong v0.2.9/go.mod h1:kQOmtJgV+Lb4aj+I2LEn40cbtawdWJ9Y8QLq+lElKxE= 4 | github.com/alecthomas/kong v0.2.11/go.mod h1:kQOmtJgV+Lb4aj+I2LEn40cbtawdWJ9Y8QLq+lElKxE= 5 | github.com/alecthomas/kong v0.6.1 h1:1kNhcFepkR+HmasQpbiKDLylIL8yh5B5y1zPp5bJimA= 6 | github.com/alecthomas/kong v0.6.1/go.mod h1:JfHWDzLmbh/puW6I3V7uWenoh56YNVONW+w8eKeUr9I= 7 | github.com/alecthomas/repr v0.0.0-20210801044451-80ca428c5142 h1:8Uy0oSf5co/NZXje7U1z8Mpep++QJOldL2hs/sBQf48= 8 | github.com/alecthomas/repr v0.0.0-20210801044451-80ca428c5142/go.mod h1:2kn6fqh/zIyPLmm3ugklbEi5hg5wS435eygvNfaDQL8= 9 | github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= 10 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 11 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 12 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 13 | github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= 14 | github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= 15 | github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= 16 | github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= 17 | github.com/go-chi/chi/v5 v5.0.7 h1:rDTPXLDHGATaeHvVlLcR4Qe0zftYethFucbjVQ1PxU8= 18 | github.com/go-chi/chi/v5 v5.0.7/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= 19 | github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= 20 | github.com/google/go-cmp v0.5.3 h1:x95R7cp+rSeeqAMI2knLtQ0DKlaBhv2NrtrOvafPHRo= 21 | github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 22 | github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= 23 | github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= 24 | github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= 25 | github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= 26 | github.com/l3uddz/bernard v0.5.1 h1:PdkmJn44q4dmix1riBkrvnpb2LVvhyRLwIRhWoc05bo= 27 | github.com/l3uddz/bernard v0.5.1/go.mod h1:J2ad7LeQl+6Nxc8sGJ8HtQIsv9c9bk2jHxsBt9OIHpo= 28 | github.com/m-rots/stubbs v1.0.0/go.mod h1:iDS6z2oonw2UMo2l0S1WTPJ9git7FWU4YEo6fq7F2WU= 29 | github.com/m-rots/stubbs v1.1.0 h1:QR1LHxFYPasju/sEO0KLmI5/RADF70CW3ZtisCs7XrQ= 30 | github.com/m-rots/stubbs v1.1.0/go.mod h1:Ive+DY/P1EikQ644M3tuyvsO/7ohPLnmEru2L+6hbVw= 31 | github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= 32 | github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= 33 | github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= 34 | github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= 35 | github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= 36 | github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= 37 | github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= 38 | github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= 39 | github.com/mattn/go-sqlite3 v2.0.3+incompatible h1:gXHsfypPkaMZrKbD5209QV9jbUTJKjyR5WD3HYQSd+U= 40 | github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= 41 | github.com/natefinch/lumberjack v2.0.0+incompatible h1:4QJd3OLAMgj7ph+yZTuX13Ld4UpgHp07nNdFX7mqFfM= 42 | github.com/natefinch/lumberjack v2.0.0+incompatible/go.mod h1:Wi9p2TTF5DG5oU+6YfsmYQpsTIOm0B1VNzQg9Mw6nPk= 43 | github.com/oriser/regroup v0.0.0-20201024192559-010c434ff8f3/go.mod h1:odkMeLkWS8G6+WP2z3Pn2vkzhPSvBtFhAUYTKXAtZMQ= 44 | github.com/oriser/regroup v0.0.0-20210730155327-fca8d7531263 h1:Qd1Ml+uEhpesT8Og0ysEhu5+DGhbhW+qxjapH8t1Kvs= 45 | github.com/oriser/regroup v0.0.0-20210730155327-fca8d7531263/go.mod h1:odkMeLkWS8G6+WP2z3Pn2vkzhPSvBtFhAUYTKXAtZMQ= 46 | github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 47 | github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 48 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 49 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 50 | github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk= 51 | github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= 52 | github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= 53 | github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= 54 | github.com/rs/xid v1.4.0 h1:qd7wPTDkN6KQx2VmMBLrpHkiyQwgFXRnkOLacUiaSNY= 55 | github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= 56 | github.com/rs/zerolog v1.28.0 h1:MirSo27VyNi7RJYP3078AA1+Cyzd2GB66qy3aUHvsWY= 57 | github.com/rs/zerolog v1.28.0/go.mod h1:NILgTygv/Uej1ra5XxGf82ZFSLk58MFGAUS2o6usyD0= 58 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 59 | github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= 60 | github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 61 | github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= 62 | github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= 63 | github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= 64 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 65 | golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= 66 | golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= 67 | golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 68 | golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= 69 | golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= 70 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 71 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 72 | golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= 73 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 74 | golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 75 | golang.org/x/sync v0.0.0-20220907140024-f12130a52804 h1:0SH2R3f1b1VmIMG7BXbEZCBUu2dKmHschSmjqGUrW8A= 76 | golang.org/x/sync v0.0.0-20220907140024-f12130a52804/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 77 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 78 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 79 | golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 80 | golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 81 | golang.org/x/sys v0.0.0-20201126233918-771906719818/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 82 | golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 83 | golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 84 | golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 85 | golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 86 | golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 87 | golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 88 | golang.org/x/sys v0.0.0-20220915200043-7b5979e65e41 h1:ohgcoMbSofXygzo6AD2I1kz3BFmW1QArPYTtwEM3UXc= 89 | golang.org/x/sys v0.0.0-20220915200043-7b5979e65e41/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 90 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 91 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 92 | golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9 h1:ftMN5LMiBFjbzleLqtoBZk7KdJwhuybIU+FckUHgoyQ= 93 | golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= 94 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 95 | golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 96 | golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= 97 | golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= 98 | golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= 99 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 100 | golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 101 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 102 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 103 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= 104 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 105 | gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= 106 | gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= 107 | gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= 108 | gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= 109 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 110 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 111 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 112 | lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= 113 | lukechampine.com/uint128 v1.2.0 h1:mBi/5l91vocEN8otkC5bDLhi2KdCticRiwbdB0O+rjI= 114 | lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= 115 | modernc.org/cc/v3 v3.31.5-0.20210308123301-7a3e9dab9009/go.mod h1:0R6jl1aZlIl2avnYfbfHBS1QB6/f+16mihBObaBC878= 116 | modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= 117 | modernc.org/cc/v3 v3.38.1 h1:Yu2IiiRpustRFUgMDZKwVn2RvyJzpfYSOw7zHeKtSi4= 118 | modernc.org/cc/v3 v3.38.1/go.mod h1:vtL+3mdHx/wcj3iEGz84rQa8vEqR6XM84v5Lcvfph20= 119 | modernc.org/ccgo/v3 v3.9.0/go.mod h1:nQbgkn8mwzPdp4mm6BT6+p85ugQ7FrGgIcYaE7nSrpY= 120 | modernc.org/ccgo/v3 v3.16.9 h1:AXquSwg7GuMk11pIdw7fmO1Y/ybgazVkMhsZWCV0mHM= 121 | modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo= 122 | modernc.org/ccorpus v1.11.6 h1:J16RXiiqiCgua6+ZvQot4yUuUy8zxgqbqEEUuGPlISk= 123 | modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= 124 | modernc.org/httpfs v1.0.6 h1:AAgIpFZRXuYnkjftxTAZwMIiwEqAfk8aVB2/oA6nAeM= 125 | modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= 126 | modernc.org/libc v1.7.13-0.20210308123627-12f642a52bb8/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w= 127 | modernc.org/libc v1.8.0/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w= 128 | modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0= 129 | modernc.org/libc v1.19.0 h1:bXyVhGQg6KIClTr8FMVIDPl7jtbcs7aS5WP7vLDaxPs= 130 | modernc.org/libc v1.19.0/go.mod h1:ZRfIaEkgrYgZDl6pa4W39HgN5G/yDW+NRmNKZBDFrk0= 131 | modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= 132 | modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= 133 | modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= 134 | modernc.org/mathutil v1.5.0 h1:rV0Ko/6SfM+8G+yKiyI830l3Wuz1zRutdslNoQ0kfiQ= 135 | modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= 136 | modernc.org/memory v1.0.4/go.mod h1:nV2OApxradM3/OVbs2/0OsP6nPfakXpi50C7dcoHXlc= 137 | modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= 138 | modernc.org/memory v1.4.0 h1:crykUfNSnMAXaOJnnxcSzbUGMqkLWjklJKkBK2nwZwk= 139 | modernc.org/memory v1.4.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= 140 | modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= 141 | modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4= 142 | modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= 143 | modernc.org/sqlite v1.10.0/go.mod h1:PGzq6qlhyYjL6uVbSgS6WoF7ZopTW/sI7+7p+mb4ZVU= 144 | modernc.org/sqlite v1.18.2 h1:S2uFiaNPd/vTAP/4EmyY8Qe2Quzu26A2L1e25xRNTio= 145 | modernc.org/sqlite v1.18.2/go.mod h1:kvrTLEWgxUcHa2GfHBQtanR1H9ht3hTJNtKpzH9k1u0= 146 | modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= 147 | modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= 148 | modernc.org/strutil v1.1.3 h1:fNMm+oJklMGYfU9Ylcywl0CO5O6nTfaowNsh2wpPjzY= 149 | modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= 150 | modernc.org/tcl v1.5.0/go.mod h1:gb57hj4pO8fRrK54zveIfFXBaMHK3SKJNWcmRw1cRzc= 151 | modernc.org/tcl v1.13.2 h1:5PQgL/29XkQ9wsEmmNPjzKs+7iPCaYqUJAhzPvQbjDA= 152 | modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= 153 | modernc.org/token v1.0.1 h1:A3qvTqOwexpfZZeyI0FeGPDlSWX5pjZu9hF4lU+EKWg= 154 | modernc.org/token v1.0.1/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= 155 | modernc.org/z v1.0.1-0.20210308123920-1f282aa71362/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA= 156 | modernc.org/z v1.0.1/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA= 157 | modernc.org/z v1.5.1 h1:RTNHdsrOpeoSeOF4FbzTo8gBYByaJ5xT7NgZ9ZqRiJM= 158 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Autoscan 2 | 3 | Autoscan replaces the default Plex and Emby behaviour for picking up file changes on the file system. 4 | Autoscan integrates with Sonarr, Radarr, Readarr, Lidarr and Google Drive to fetch changes in near real-time without relying on the file system. 5 | 6 | Wait, what happened to [Plex Autoscan](https://github.com/l3uddz/plex_autoscan)? 7 | Well, Autoscan is a rewrite of the original Plex Autoscan written in the Go language. 8 | In addition, this rewrite introduces a more modular approach and should be easy to extend in the future. 9 | 10 | ## Comparison to Plex Autoscan 11 | 12 | - [A-Train](https://github.com/m-rots/a-train/pkgs/container/a-train), Autoscan's Google Drive integration, only supports Shared Drives and requires Service Account authentication. 13 | - A-Train does not support RClone Crypt remotes. 14 | - Autoscan does not rely on manual trash deletion when connected to Plex. Therefore, you should re-enable the `Empty trash automatically after every scan` setting in Plex. 15 | 16 | Autoscan also improves upon [Plex Autoscan](https://github.com/l3uddz/plex_autoscan) by adding the following features: 17 | 18 | - Autoscan supports Plex music libraries. 19 | - Autoscan adds additional support for Emby and Jellyfin. 20 | - Autoscan can send _scans_ to multiple Plex, Emby and Jellyfin servers. 21 | 22 | ## Installing autoscan 23 | 24 | Autoscan offers [pre-compiled binaries](https://github.com/saltydk/autoscan/releases/latest) for both Linux and MacOS for each official release. In addition, we also offer a [Docker image](#docker)! 25 | 26 | Alternatively, you can build the Autoscan binary yourself. 27 | To build the autoscan CLI on your system, make sure: 28 | 29 | 1. Your machine runs Linux, macOS or WSL2 30 | 2. You have [Go](https://golang.org/doc/install) installed (1.16 or later) 31 | 3. Clone this repository and cd into it from the terminal 32 | 4. Run `go build -o autoscan ./cmd/autoscan` from the terminal 33 | 34 | You should now have a binary with the name `autoscan` in the root directory of the project. 35 | To start autoscan, simply run `./autoscan`. If you want autoscan to be globally available, move it to `/bin` or `/usr/local/bin`. 36 | 37 | If you need to debug certain Autoscan behaviour, either add the `-v` flag for debug mode or the `-vv` flag for trace mode to get even more details about internal behaviour. 38 | 39 | ## Overview 40 | 41 | Autoscan is split into three distinct modules: 42 | 43 | - Triggers 44 | - Processor 45 | - Targets 46 | 47 | ### Rewriting paths 48 | 49 | Triggers, targets and the processor all live in different contexts. Some are used in Docker containers, others run on host OS, it can be a big mess! 50 | 51 | That's where rewrite rules come into play. They allow you to translate paths between a trigger's / target's perspective and the processor's perspective. 52 | 53 | **Before you begin, make sure you understand how regular expressions work!** \ 54 | Make sure you know how capture groups work, as these are used for the `to` field. 55 | 56 | Triggers can receive paths from any source: A remote server, a Docker container and the local file system. The `rewrite` field can be defined for each individual trigger. This field can contain multiple rewriting rules. Therefore, each rule should have a `-` to indicate the next rule on the list. The `from` should be a regexp pattern describing the path from the trigger's perspective. The `to` should then convert this path into a path which is local to Autoscan. 57 | 58 | Targets work the other way around. They have to convert the path local to Autoscan to a path understood by the target, which can be a Docker container, remote server, etc. The `from` should be a regexp pattern describing the path from Autoscan's perspective. The `to` should then convert this path into a path which is local to the target. 59 | 60 | It is important that all three modules can have access to a file. When a trigger receives a scan, then the file should be available from both the processor and all targets. 61 | 62 | #### Simple example 63 | 64 | - Sonarr running in a Docker container (same example works for Lidarr, Radarr and Readarr) 65 | - Autoscan running on the host OS (not in a container) 66 | - Plex running in a Docker container 67 | 68 | The following config only defines rewrite paths, this should not be used directly! 69 | 70 | ```yaml 71 | triggers: 72 | sonarr: 73 | - rewrite: 74 | # /tv contains folders with tv shows 75 | # This path is used within the Sonarr Docker container 76 | - from: /tv/ 77 | 78 | # /mnt/unionfs/Media/TV links to the same folder, though from the host OS 79 | # This folder is accessed by Autoscan 80 | to: /mnt/unionfs/Media/TV/ 81 | 82 | targets: 83 | plex: 84 | - rewrite: 85 | # Same folder as above, accessible by Autoscan. 86 | # Note how we strip the "TV" part, 87 | # as we want both Movies and TV. 88 | - from: /mnt/unionfs/Media/ 89 | 90 | # This path is used within the Plex Docker container 91 | to: /data/ 92 | ``` 93 | 94 | Let's take a look at the journey of the path `/tv/Westworld/Season 1/s01e01.mkv` coming from Sonarr. 95 | 96 | 1. Sonarr's path is translated to a path local to Autoscan. \ 97 | `/mnt/unionfs/Media/TV/Westworld/Season 1/s01e01.mkv` 98 | 2. The path is accessed by Autoscan to check whether it exists and adds it to the datastore. 99 | 3. Autoscan's path is translated to a path local to Plex. \ 100 | `/data/TV/Season 1/s01e01.mkv` 101 | 102 | This should be all that's needed to get you going. Good luck! 103 | 104 | ## Triggers 105 | 106 | Triggers are the 'input' of Autoscan. 107 | They translate incoming data into a common data format called the Scan. 108 | 109 | Autoscan currently supports the following triggers: 110 | 111 | - [A-Train](https://github.com/m-rots/a-train/pkgs/container/a-train): The official Google Drive trigger for Autoscan. \ 112 | _A-Train is [available separately](https://github.com/m-rots/a-train/pkgs/container/a-train)._ 113 | 114 | - Inotify: Listens for changes on the file system. \ 115 | **This should not be used on top of RClone mounts.** \ 116 | *Bugs may still exist.* 117 | 118 | - Manual: When you want to scan a path manually. 119 | 120 | - The -arrs: Lidarr, Sonarr, Radarr and Readarr. \ 121 | Webhook support for Lidarr, Sonarr, Radarr and Readarr. 122 | 123 | All triggers support: 124 | 125 | - Trigger-wide priority: higher priorities are processed sooner. \ 126 | *Defaults to 0.* 127 | 128 | - RegExp-based rewriting rules: translate a path given by the trigger to a path on the local file system. \ 129 | *If the paths are identical between the trigger and the local file system, then the `rewrite` field should be ignored.* 130 | 131 | ### A-Train 132 | 133 | Autoscan can monitor Google Drive through [A-Train](https://github.com/m-rots/a-train/pkgs/container/a-train). A-Train is a stand-alone tool created by the Autoscan developers and is officially part of the Autoscan project. 134 | 135 | The A-Train trigger configuration is not required, as Autoscan automatically listens for A-Train requests. However, to configure global and drive-specific rewrite rules, you could add A-Train to your config: 136 | 137 | ```yaml 138 | triggers: 139 | a-train: 140 | priority: 5 141 | rewrite: # Global rewrites 142 | - from: ^/Media/ 143 | to: /mnt/unionfs/Media/ 144 | # Drives only need to be given when Drive-specific rewrites are used 145 | drives: 146 | - id: 0A1xxxxxxxxxUk9PVA # The ID of Shared Drive #1 147 | rewrite: # Drive-specific rewrite (has priority over global rewrite) 148 | - from: ^/TV/ 149 | to: /mnt/unionfs/TV/ 150 | - id: 0A2xxxxxxxxxUk9PVA # The ID of Shared Drive #2 151 | rewrite: # Drive-specific rewrite (has priority over global rewrite) 152 | - from: ^/Movies/ 153 | to: /mnt/unionfs/Movies/ 154 | ``` 155 | 156 | ### Manual 157 | 158 | **Note: You can visit `/triggers/manual` within a browser to manually submit requests** 159 | 160 | Autoscan also supports a `manual` webhook for custom scripts or for software which is not supported by Autoscan directly. The manual endpoint is available at `/triggers/manual`. 161 | 162 | The manual endpoint accepts one or multiple directory paths as input and should be given one or multiple `dir` query parameters. Just like the other webhooks, the manual webhook is protected with basic authentication if the `auth` option is set in the config file of the user. 163 | 164 | URL template: `POST /triggers/manual?dir=$path1&dir=$path2` 165 | 166 | The following curl command sends a request to Autoscan to scan the directories `/test/one` and `/test/two`: 167 | 168 | ```bash 169 | curl --request POST \ 170 | --url 'http://localhost:3030/triggers/manual?dir=%2Ftest%2Fone&dir=%2Ftest%2Ftwo' \ 171 | --header 'Authorization: Basic aGVsbG8gdGhlcmU6Z2VuZXJhbCBrZW5vYmk=' 172 | ``` 173 | 174 | ### The -arrs 175 | 176 | If one wants to configure a HTTPTrigger with multiple distinct configurations, then these configurations MUST provide a field called `Name` which uniquely identifies the trigger. 177 | The name field is then used to create the route: `/triggers/:name`. 178 | 179 | The following -arrs are currently provided by Autoscan: 180 | 181 | - Lidarr 182 | - Radarr 183 | - Readarr 184 | - Sonarr 185 | 186 | #### Connecting the -arrs 187 | 188 | To add your webhook to Sonarr, Radarr, Readarr or Lidarr, do: 189 | 190 | 1. Open the `settings` page in Sonarr/Radarr/Readarr/Lidarr 191 | 2. Select the tab `connect` 192 | 3. Click on the big plus sign 193 | 4. Select `webhook` 194 | 5. Use `Autoscan` as name (or whatever you prefer) 195 | 6. Select `On Import` and `On Upgrade` 196 | 7. Set the URL to Autoscan's URL and add `/triggers/:name` where name is the name set in the trigger's config. 197 | 8. Optional: set username and password. 198 | 199 | #### The latest events 200 | 201 | Autoscan also supports the following events in the latest versions of Radarr and Sonarr: 202 | - `Rename` 203 | - `On Movie Delete` and `On Series Delete` 204 | - `On Movie File Delete` and `On Episode File Delete` 205 | 206 | We are not 100% sure whether these three events cover all the possible file system interactions. 207 | So for now, please do keep using Bernard or the Inotify trigger to fetch all scans. 208 | 209 | ### Configuration 210 | 211 | A snippet of the `config.yml` file showcasing what is possible. 212 | You can mix and match exactly the way you like: 213 | 214 | ```yaml 215 | # Optionally, protect your webhooks with authentication 216 | authentication: 217 | username: hello there 218 | password: general kenobi 219 | 220 | # port for Autoscan webhooks to listen on 221 | port: 3030 222 | 223 | triggers: 224 | # The manual trigger is always enabled, the config only adjusts its priority and the rewrite rules. 225 | manual: 226 | priority: 5 227 | rewrite: 228 | - from: ^/Media/ 229 | to: /mnt/unionfs/Media/ 230 | 231 | a-train: 232 | priority: 5 233 | rewrite: # Global rewrites 234 | - from: ^/Media/ 235 | to: /mnt/unionfs/Media/ 236 | # Drives only need to be given when Drive-specific rewrites are used 237 | drives: 238 | - id: 0A1xxxxxxxxxUk9PVA # The ID of Shared Drive #1 239 | rewrite: # Drive-specific rewrite (has priority over global rewrite) 240 | - from: ^/TV/ 241 | to: /mnt/unionfs/TV/ 242 | 243 | inotify: 244 | - priority: 0 245 | 246 | # filter with regular expressions 247 | include: 248 | - ^/mnt/unionfs/Media/ 249 | exclude: 250 | - '\.(srt|pdf)$' 251 | 252 | # rewrite inotify path to unified filesystem 253 | rewrite: 254 | - from: ^/mnt/local/Media/ 255 | to: /mnt/unionfs/Media/ 256 | 257 | # local filesystem paths to monitor 258 | paths: 259 | - path: /mnt/local/Media 260 | 261 | lidarr: 262 | - name: lidarr # /triggers/lidarr 263 | priority: 1 264 | 265 | radarr: 266 | - name: radarr # /triggers/radarr 267 | priority: 2 268 | - name: radarr4k # /triggers/radarr4k 269 | priority: 5 270 | 271 | readarr: 272 | - name: readarr # /triggers/readarr 273 | priority: 1 274 | 275 | sonarr: 276 | - name: sonarr-docker # /triggers/sonarr-docker 277 | priority: 2 278 | 279 | # Rewrite the path from within the container 280 | # to your local filesystem. 281 | rewrite: 282 | - from: /tv/ 283 | to: /mnt/unionfs/Media/TV/ 284 | ``` 285 | 286 | ## Processor 287 | 288 | Triggers pass the Scans they receive to the processor. 289 | The processor then saves the Scans to its datastore. 290 | 291 | *The processor uses SQLite as its datastore, feel free to hack around!* 292 | 293 | In a separate process, the processor selects Scans from the datastore. 294 | It will always group files belonging to the same folder together and it waits until all the files in that folder are older than the `minimum-age`, which defaults to 10 minutes. 295 | 296 | When all files are older than the minimum age, then the processor will call all the configured targets in parallel to request a folder scan. 297 | 298 | ### Anchor files 299 | 300 | To prevent the processor from calling targets when a remote mount is offline, you can define a list of so called `anchor files`. 301 | These anchor files do not have any special properties and often have no content. 302 | However, they can be used to check whether a file exists on the file system. 303 | If the file does not exist and you have not made any changes to the file, then it is certain that the remote mount must be offline or the software is having problems. 304 | 305 | When an anchor file is unavailable, the processor will halt its operations until the file is back online. 306 | 307 | We suggest you to use different anchor file names if you merge multiple remote mounts together with a tool such as [UnionFS](https://unionfs.filesystems.org) or [MergerFS](https://github.com/trapexit/mergerfs). 308 | Each remote mount MUST have its own anchor file and its own name for that anchor file. 309 | In addition, make sure to define the 'merged' path to the file and not the remote mount path. 310 | This helps check whether the union-software is working correctly as well. 311 | 312 | ### Minimum age 313 | 314 | Autoscan does not check whether scan requests received by triggers exist on the file system. 315 | Therefore, to make sure a file exists before it reaches the targets, you should set a minimum age. 316 | The minimum age delays the scan from being send to the targets after it has been added to the queue by a trigger. 317 | The default minimum age is set at 10 minutes to prevent common synchronisation issues. 318 | 319 | ### Customising the processor 320 | 321 | The processor allows you to set the minimum age of a Scan. 322 | In addition, you can also define a list of anchor files. 323 | 324 | A snippet of the `config.yml` file: 325 | 326 | ```yaml 327 | # override the minimum age to 30 minutes: 328 | minimum-age: 30m 329 | 330 | # override the delay between processed scans: 331 | # defaults to 5 seconds 332 | scan-delay: 15s 333 | 334 | # override the interval scan stats are displayed: 335 | # defaults to 1 hour / 0s to disable 336 | scan-stats: 1m 337 | 338 | # set multiple anchor files 339 | anchors: 340 | - /mnt/unionfs/drive1.anchor 341 | - /mnt/unionfs/drive2.anchor 342 | ``` 343 | 344 | The `minimum-age`, `scan-delay` and `scan-stats` fields should be given a string in the following format: 345 | 346 | - `1s` if the min-age should be set at 1 second. 347 | - `5m` if the min-age should be set at 5 minutes. 348 | - `1m30s` if the min-age should be set at 1 minute and 30 seconds. 349 | - `1h` if the min-age should be set at 1 hour. 350 | 351 | *Please do not forget the `s`, `m` or `h` suffix, otherwise the time unit defaults to nanoseconds.* 352 | 353 | Scan stats will print the following information at a configured interval: 354 | 355 | - Scans processed 356 | - Scans remaining 357 | 358 | ## Targets 359 | 360 | While collecting Scans is fun and all, they need to have a final destination. 361 | Targets are these final destinations and are given Scans from the processor, one batch at a time. 362 | 363 | Autoscan currently supports the following targets: 364 | 365 | - Plex 366 | - Emby 367 | - Jellyfin 368 | - Autoscan 369 | 370 | ### Plex 371 | 372 | Autoscan replaces Plex's default behaviour of updating the Plex library automatically. 373 | Therefore, it is advised to turn off Plex's `Update my library automatically` feature. 374 | 375 | You can setup one or multiple Plex targets in the config: 376 | 377 | ```yaml 378 | targets: 379 | plex: 380 | - url: https://plex.domain.tld # URL of your Plex server 381 | token: XXXX # Plex API Token 382 | rewrite: 383 | - from: /mnt/unionfs/Media/ # local file system 384 | to: /data/ # path accessible by the Plex docker container (if applicable) 385 | ``` 386 | 387 | There are a couple of things to take note of in the config: 388 | 389 | - URL. The URL can link to the docker container directly, the localhost or a reverse proxy sitting in front of Plex. 390 | - Token. We need a Plex API Token to make requests on your behalf. [This article](https://support.plex.tv/articles/204059436-finding-an-authentication-token-x-plex-token/) should help you out. 391 | - Rewrite. If Plex is not running on the host OS, but in a Docker container (or Autoscan is running in a Docker container), then you need to rewrite paths accordingly. Check out our [rewriting section](#rewriting-paths) for more info. 392 | 393 | ### Emby 394 | 395 | While Emby provides much better behaviour out of the box than Plex, it still might be useful to use Autoscan for even better performance. 396 | 397 | You can setup one or multiple Emby targets in the config: 398 | 399 | ```yaml 400 | targets: 401 | emby: 402 | - url: https://emby.domain.tld # URL of your Emby server 403 | token: XXXX # Emby API Token 404 | rewrite: 405 | - from: /mnt/unionfs/Media/ # local file system 406 | to: /data/ # path accessible by the Emby docker container (if applicable) 407 | ``` 408 | 409 | - URL. The URL can link to the docker container directly, the localhost or a reverse proxy sitting in front of Emby. 410 | - Token. We need an Emby API Token to make requests on your behalf. [This article](https://github.com/MediaBrowser/Emby/wiki/Api-Key-Authentication) should help you out. \ 411 | *It's a bit out of date, but I'm sure you will manage!* 412 | - Rewrite. If Emby is not running on the host OS, but in a Docker container (or Autoscan is running in a Docker container), then you need to rewrite paths accordingly. Check out our [rewriting section](#rewriting-paths) for more info. 413 | 414 | ### Jellyfin 415 | 416 | While Jellyfin provides much better behaviour out of the box than Plex, it still might be useful to use Autoscan for even better performance. 417 | 418 | You can setup one or multiple Jellyfin targets in the config: 419 | 420 | ```yaml 421 | targets: 422 | jellyfin: 423 | - url: https://jellyfin.domain.tld # URL of your Jellyfin server 424 | token: XXXX # Jellyfin API Token 425 | rewrite: 426 | - from: /mnt/unionfs/Media/ # local file system 427 | to: /data/ # path accessible by the Jellyfin docker container (if applicable) 428 | ``` 429 | 430 | - URL. The URL can link to the docker container directly, the localhost or a reverse proxy sitting in front of Jellyfin. 431 | - Token. We need a Jellyfin API Token to make requests on your behalf. [This article](https://github.com/MediaBrowser/Emby/wiki/Api-Key-Authentication) should help you out. \ 432 | *It's a bit out of date, but I'm sure you will manage!* 433 | - Rewrite. If Jellyfin is not running on the host OS, but in a Docker container (or Autoscan is running in a Docker container), then you need to rewrite paths accordingly. Check out our [rewriting section](#rewriting-paths) for more info. 434 | 435 | ### Autoscan 436 | 437 | You can also send scan requests to other instances of autoscan! 438 | 439 | ```yaml 440 | targets: 441 | autoscan: 442 | - url: https://autoscan.domain.tld # URL of Autoscan 443 | username: XXXX # Username for remote autoscan instance 444 | password: XXXX # Password for remote autoscan instance 445 | rewrite: 446 | - from: /mnt/unionfs/Media/ # local file system 447 | to: /mnt/nfs/Media/ # path accessible by the remote autoscan instance (if applicable) 448 | ``` 449 | 450 | ## Full config file 451 | 452 | With the examples given in the [triggers](#triggers), [processor](#processor) and [targets](#targets) sections, here is what your full config file *could* look like: 453 | 454 | ```yaml 455 | # <- processor -> 456 | 457 | # override the minimum age to 30 minutes: 458 | minimum-age: 30m 459 | 460 | # set multiple anchor files 461 | anchors: 462 | - /mnt/unionfs/drive1.anchor 463 | - /mnt/unionfs/drive2.anchor 464 | 465 | # <- triggers -> 466 | 467 | # Protect your webhooks with authentication 468 | authentication: 469 | username: hello there 470 | password: general kenobi 471 | 472 | # port for Autoscan webhooks to listen on 473 | port: 3030 474 | 475 | triggers: 476 | lidarr: 477 | - name: lidarr # /triggers/lidarr 478 | priority: 1 479 | 480 | radarr: 481 | - name: radarr # /triggers/radarr 482 | priority: 2 483 | - name: radarr4k # /triggers/radarr4k 484 | priority: 5 485 | 486 | readarr: 487 | - name: readarr # /triggers/readarr 488 | priority: 1 489 | 490 | sonarr: 491 | - name: sonarr-docker # /triggers/sonarr-docker 492 | priority: 2 493 | 494 | # Rewrite the path from within the container 495 | # to your local filesystem. 496 | rewrite: 497 | - from: /tv/ 498 | to: /mnt/unionfs/Media/TV/ 499 | 500 | # <- targets -> 501 | 502 | targets: 503 | plex: 504 | - url: https://plex.domain.tld # URL of your Plex server 505 | token: XXXX # Plex API Token 506 | rewrite: 507 | - from: /mnt/unionfs/Media/ # local file system 508 | to: /data/ # path accessible by the Plex docker container (if applicable) 509 | 510 | emby: 511 | - url: https://emby.domain.tld # URL of your Emby server 512 | token: XXXX # Emby API Token 513 | rewrite: 514 | - from: /mnt/unionfs/Media/ # local file system 515 | to: /data/ # path accessible by the Emby docker container (if applicable) 516 | ``` 517 | 518 | ## Other configuration options 519 | 520 | ```yaml 521 | # Specify the port to listen on (3030 is the default when not specified) 522 | port: 3030 523 | ``` 524 | 525 | ```yaml 526 | # Specify the host interface(s) to listen on (0.0.0.0 is the default when not specified) 527 | host: 528 | - 127.0.0.1 529 | - 172.19.185.13 530 | - 192.168.0.1:5959 531 | ``` 532 | 533 | - If no port is specified, it will use the default port configured. 534 | - This configuration option is only needed if you have a requirement to listen to multiple interfaces. 535 | 536 | ## Other installation options 537 | 538 | ### Docker 539 | 540 | Autoscan has an accompanying docker image which can be found on [Docker Hub](https://hub.docker.com/r/cloudb0x/autoscan). 541 | 542 | Autoscan requires access to all files being passed between the triggers and the targets. \ 543 | *Just mount the source directory, for many people this is `/mnt/unionfs`.* 544 | 545 | Make sure these files are available within the Autoscan container. 546 | Remember that you MUST use [rewriting rules](#rewriting-paths) if paths are not identical between triggers, autoscan and targets. These rules can be set from the config for each trigger and target individually. 547 | 548 | #### Version Tags 549 | 550 | Autoscan's Docker image provides various versions that are available via tags. The `latest` tag usually provides the latest stable version. Others are considered under development and caution must be exercised when using them. 551 | 552 | | Tag | Description | 553 | | :----: | --- | 554 | | latest | Latest stable version from a tagged GitHub release | 555 | | master | Most recent GitHub master commit | 556 | 557 | #### Usage 558 | 559 | ```bash 560 | docker run \ 561 | --name=autoscan \ 562 | -e "PUID=1000" \ 563 | -e "PGID=1001" \ 564 | -p 3030:3030 \ 565 | -v "/opt/autoscan:/config" \ 566 | -v "/mnt/unionfs:/mnt/unionfs:ro" \ 567 | --restart=unless-stopped \ 568 | -d cloudb0x/autoscan 569 | ``` 570 | 571 | #### Parameters 572 | 573 | Autoscan's Docker image supports the following parameters. 574 | 575 | | Parameter | Function | 576 | | :----: | --- | 577 | | `-p 3030:3030` | The port used by Autoscan's webhook triggers | 578 | | `-e PUID=1000` | The UserID to run the Autoscan binary as | 579 | | `-e PGID=1000` | The GroupID to run the Autoscan binary as | 580 | | `-e AUTOSCAN_VERBOSITY=0` | The Autoscan logging verbosity level to use. (0 = info, 1 = debug, 2 = trace) | 581 | | `-v /config` | Autoscan's config and database file | 582 | 583 | Any other volumes can be referenced within Autoscan's config file `config.yml`, assuming it has been specified as a volume. 584 | --------------------------------------------------------------------------------