├── web ├── .node-version ├── .gitignore ├── public │ ├── 814.814.js.LICENSE.txt │ ├── favicon.png │ ├── index.html │ └── 814.814.js ├── src │ ├── main.js │ ├── Utilities │ │ └── web_root.js │ ├── App.svelte │ ├── components │ │ └── APITable.svelte │ └── pages │ │ ├── Info.svelte │ │ └── Config.svelte ├── README.md ├── package.json └── webpack.config.js ├── .github ├── FUNDING.yml └── workflows │ ├── codeql-analysis.yml │ ├── build.yml │ └── pr_artifact_comment.yml ├── pkg ├── clouddownloader │ ├── types.go │ └── interfaces.go ├── stringqueue │ ├── types.go │ └── stringqueue.go └── premiumizeme │ ├── types.go │ └── premiumizeme.go ├── docker ├── Dockerfile.amd64 ├── Dockerfile.arm64 ├── root │ └── etc │ │ ├── services.d │ │ └── premiumizearr │ │ │ └── run │ │ └── cont-init.d │ │ └── 30-config └── Dockerfile.common ├── internal ├── service │ ├── service.go │ ├── web_service_config_routes.go │ ├── arrs_manager_service.go │ ├── web_service_handlers.go │ ├── web_service.go │ ├── directory_watcher_service.go │ └── transfer_manager_service.go ├── config │ ├── config_update.go │ ├── types.go │ └── config.go ├── directory_watcher │ ├── types.go │ └── directory_watcher.go ├── arr │ ├── types.go │ ├── sonarr.go │ ├── lidarr.go │ └── radarr.go ├── progress_downloader │ └── progress_downloader.go └── utils │ └── utils.go ├── .gitignore ├── init └── premiumizearrd.service ├── renovate.json ├── go.mod ├── scripts └── postinstall.sh ├── config.yaml ├── Makefile ├── go.sum ├── cmd └── premiumizearrd │ ├── main.go │ └── app.go ├── .prerelease.goreleaser.yaml ├── .goreleaser.yaml ├── README.md └── LICENSE /web/.node-version: -------------------------------------------------------------------------------- 1 | v16 -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | ko_fi: ensingerphilipp 2 | -------------------------------------------------------------------------------- /web/.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | node_modules 3 | dist 4 | -------------------------------------------------------------------------------- /web/public/814.814.js.LICENSE.txt: -------------------------------------------------------------------------------- 1 | /*! nouislider - 14.6.1 - 8/17/2020 */ 2 | -------------------------------------------------------------------------------- /pkg/clouddownloader/types.go: -------------------------------------------------------------------------------- 1 | package clouddownloader 2 | 3 | type Transfer struct { 4 | } 5 | -------------------------------------------------------------------------------- /web/public/favicon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ensingerphilipp/Premiumizearr-Nova/HEAD/web/public/favicon.png -------------------------------------------------------------------------------- /docker/Dockerfile.amd64: -------------------------------------------------------------------------------- 1 | # syntax=edrevo/dockerfile-plus 2 | 3 | FROM ghcr.io/linuxserver/baseimage-alpine:3.21 4 | 5 | INCLUDE+ docker/Dockerfile.common 6 | -------------------------------------------------------------------------------- /docker/Dockerfile.arm64: -------------------------------------------------------------------------------- 1 | # syntax=edrevo/dockerfile-plus 2 | 3 | FROM ghcr.io/linuxserver/baseimage-alpine:3.21 4 | 5 | INCLUDE+ docker/Dockerfile.common 6 | -------------------------------------------------------------------------------- /pkg/stringqueue/types.go: -------------------------------------------------------------------------------- 1 | package stringqueue 2 | 3 | import "sync" 4 | 5 | type StringQueue struct { 6 | queue []string 7 | mutex *sync.Mutex 8 | } 9 | -------------------------------------------------------------------------------- /docker/root/etc/services.d/premiumizearr/run: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bash 2 | 3 | cd /opt/app/ || exit 4 | 5 | exec \ 6 | s6-setuidgid abc /opt/app/premiumizearrd -------------------------------------------------------------------------------- /web/src/main.js: -------------------------------------------------------------------------------- 1 | import App from './App.svelte'; 2 | 3 | const app = new App({ 4 | target: document.body, 5 | props: {} 6 | }); 7 | 8 | export default app; 9 | -------------------------------------------------------------------------------- /pkg/clouddownloader/interfaces.go: -------------------------------------------------------------------------------- 1 | package clouddownloader 2 | 3 | // Interface for the CloudDownloader interface 4 | type CloudDownloaderInterface interface { 5 | GetTransfers() []Transfer 6 | } 7 | -------------------------------------------------------------------------------- /internal/service/service.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "github.com/ensingerphilipp/premiumizearr-nova/internal/config" 5 | ) 6 | 7 | //Service interface 8 | type Service interface { 9 | New() (*config.Config, error) 10 | Start() error 11 | Stop() error 12 | } 13 | -------------------------------------------------------------------------------- /internal/config/config_update.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | func (c *Config) UpdateConfig(_newConfig Config) { 4 | oldConfig := *c 5 | 6 | //move private fields over 7 | _newConfig.appCallback = c.appCallback 8 | _newConfig.altConfigLocation = c.altConfigLocation 9 | *c = _newConfig 10 | 11 | c.appCallback(oldConfig, *c) 12 | c.Save() 13 | } 14 | -------------------------------------------------------------------------------- /web/README.md: -------------------------------------------------------------------------------- 1 | # Premiumizearr UI 2 | 3 | - Displays current transfers status' 4 | - Displays current downloads status' 5 | 6 | ## Development 7 | 8 | *Note that you will need to have [Node.js](https://nodejs.org) installed.* 9 | 10 | Install the dependencies... 11 | 12 | ```bash 13 | npm install 14 | ``` 15 | 16 | ...then start webpack: 17 | 18 | ```bash 19 | npm run dev 20 | ``` 21 | -------------------------------------------------------------------------------- /docker/root/etc/cont-init.d/30-config: -------------------------------------------------------------------------------- 1 | #!/usr/bin/with-contenv bash 2 | 3 | # Default UID/GID to 1000 if not set 4 | PUID=${PUID:-1000} 5 | PGID=${PGID:-1000} 6 | 7 | # Optional: log what's happening 8 | echo "[cont-init.d] Setting permissions to $PUID:$PGID" 9 | 10 | # Change ownership recursively 11 | chown -R "$PUID:$PGID" \ 12 | /data \ 13 | /downloads \ 14 | /transfers \ 15 | /blackhole \ 16 | /opt -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | 8 | # Test binary, built with `go test -c` 9 | *.test 10 | 11 | # Output of the go coverage tool, specifically when used with LiteIDE 12 | *.out 13 | 14 | build/ 15 | 16 | *.log 17 | 18 | .vscode 19 | premiumizearrd 20 | premiumizearrd.exe 21 | build 22 | static 23 | dist/ 24 | 25 | !cmd/premiumizearrd/ 26 | -------------------------------------------------------------------------------- /init/premiumizearrd.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Premiumizearr Daemon 3 | After=network.target 4 | [Service] 5 | User=###USER### 6 | Group=###GROUP### 7 | UMask=0002 8 | Type=simple 9 | Environment=PREMIUMIZEARR_LOG_LEVEL=info 10 | ExecStart=/opt/premiumizearrd/premiumizearrd 11 | WorkingDirectory=/opt/premiumizearrd/ 12 | TimeoutStopSec=20 13 | KillMode=process 14 | Restart=on-failure 15 | [Install] 16 | WantedBy=multi-user.target 17 | -------------------------------------------------------------------------------- /renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": ["config:base"], 3 | "labels": ["dependencies"], 4 | "prBodyColumns": ["package", "from", "to", "changelog"], 5 | "packageRules": [ 6 | { 7 | "matchUpdateTypes": ["major"], 8 | "labels": ["breaking change"], 9 | "automerge": false 10 | }, 11 | { 12 | "matchUpdateTypes": ["minor", "patch"], 13 | "automerge": false 14 | } 15 | ], 16 | "prHourlyLimit": 5, 17 | "prConcurrentLimit": 10 18 | } 19 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/ensingerphilipp/premiumizearr-nova 2 | 3 | go 1.23.0 4 | 5 | toolchain go1.24.2 6 | 7 | require ( 8 | github.com/dustin/go-humanize v1.0.1 9 | github.com/fsnotify/fsnotify v1.8.0 10 | github.com/gorilla/mux v1.8.1 11 | github.com/orandin/lumberjackrus v1.0.1 12 | github.com/sirupsen/logrus v1.9.3 13 | golift.io/starr v1.1.0 14 | gopkg.in/yaml.v2 v2.4.0 15 | ) 16 | 17 | require ( 18 | golang.org/x/net v0.37.0 // indirect 19 | golang.org/x/sys v0.31.0 // indirect 20 | gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect 21 | ) 22 | -------------------------------------------------------------------------------- /web/src/Utilities/web_root.js: -------------------------------------------------------------------------------- 1 | export function CalculateAPIPath(path) { 2 | let webRoot = window.location.href; 3 | 4 | if (webRoot.indexOf("index.html") > -1) { 5 | webRoot = webRoot.substring(0, webRoot.indexOf("index.html")); 6 | } 7 | 8 | if (webRoot[webRoot.length - 1] !== "/") { 9 | webRoot += "/"; 10 | } 11 | 12 | if (path[0] == "/") { 13 | // console.log(webRoot + path.substring(1)); 14 | return webRoot + path.substring(1); 15 | } 16 | 17 | // console.log(webRoot + path); 18 | 19 | return webRoot + path; 20 | } 21 | 22 | -------------------------------------------------------------------------------- /scripts/postinstall.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Set default UID and GID if not provided 4 | PUID=${PUID:-1000} 5 | PGID=${PGID:-1000} 6 | 7 | # Set permissions 8 | chown -R "$PUID:$PGID" /opt/premiumizearrd/ 9 | 10 | # Replace placeholders in systemd service file 11 | sed -i "" "s/###USER###/$PUID/g" /etc/systemd/system/premiumizearrd.service 12 | sed -i "" "s/###GROUP###/$PGID/g" /etc/systemd/system/premiumizearrd.service 13 | 14 | # Enable and start the service 15 | systemctl enable premiumizearrd.service 16 | systemctl daemon-reload 17 | systemctl start premiumizearrd.service 18 | -------------------------------------------------------------------------------- /docker/Dockerfile.common: -------------------------------------------------------------------------------- 1 | LABEL build_version="Premiumizearr-Nova ${TAG} Build-date:- ${BUILD_DATE}" 2 | LABEL maintainer="ensingerphilipp" 3 | 4 | COPY docker/root/ / 5 | 6 | EXPOSE 8182 7 | 8 | RUN mkdir /data 9 | RUN mkdir /downloads 10 | RUN mkdir /transfers 11 | RUN mkdir /blackhole 12 | RUN mkdir -p /opt/app/ 13 | 14 | RUN apk add --no-cache bash 15 | RUN apk add libc6-compat 16 | RUN apk add --no-cache wget 17 | RUN apk add --no-cache coreutils 18 | 19 | WORKDIR /opt/app/ 20 | 21 | ENV PREMIUMIZEARR_CONFIG_DIR_PATH=/data 22 | ENV PREMIUMIZEARR_LOGGING_DIR_PATH=/data 23 | 24 | COPY premiumizearrd /opt/app/ 25 | COPY build/static /opt/app/static 26 | 27 | ENTRYPOINT ["/init"] 28 | -------------------------------------------------------------------------------- /config.yaml: -------------------------------------------------------------------------------- 1 | PremiumizemeAPIKey: xxxxxxxxx 2 | Arrs: 3 | - Name: "" 4 | URL: http://127.0.0.1:8989 5 | APIKey: xxxxxxxxx 6 | Type: Sonarr 7 | - Name: "" 8 | URL: http://127.0.0.1:7878 9 | APIKey: xxxxxxxxx 10 | Type: Radarr 11 | - Name: "" 12 | URL: http://127.0.0.1:8686 13 | APIKey: xxxxxxxxx 14 | Type: Lidarr 15 | BlackholeDirectory: "" 16 | PollBlackholeDirectory: false 17 | PollBlackholeIntervalMinutes: 10 18 | DownloadsDirectory: "" 19 | TransferDirectory: "arrDownloads" 20 | bindIP: 0.0.0.0 21 | bindPort: "8182" 22 | WebRoot: "" 23 | SimultaneousDownloads: 5 24 | DownloadSpeedLimit: 100 25 | EnableTlsCheck: false 26 | TransferOnlyMode: false 27 | ArrHistoryUpdateIntervalSeconds: 20 28 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .POSIX: 2 | .SUFFIXES: 3 | 4 | SERVICE = premiumizearrd 5 | GO = go 6 | RM = rm 7 | GOFLAGS = 8 | PREFIX = /usr/local 9 | BUILDDIR = build 10 | 11 | all: clean build 12 | 13 | web: deps build/web 14 | 15 | deps: 16 | cd web && npm i 17 | go mod download 18 | 19 | build: deps build/web build/app 20 | 21 | build/app: 22 | CGO_ENABLED=0 go build -tags 'netgo osusergo' -ldflags '-extldflags "-static"' -o $(BUILDDIR)/$(SERVICE) ./cmd/$(SERVICE) 23 | cp build/premiumizearrd premiumizearrd 24 | 25 | build/web: 26 | mkdir -p build 27 | cd web && npm run build 28 | mkdir -p build/static/ && cp -r web/dist/* build/static/ 29 | cp init/* build/ 30 | 31 | clean: 32 | $(RM) -rf build 33 | -------------------------------------------------------------------------------- /web/public/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | Premiumizearr 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /internal/directory_watcher/types.go: -------------------------------------------------------------------------------- 1 | package directory_watcher 2 | 3 | import "github.com/fsnotify/fsnotify" 4 | 5 | // WatchDirectory watches a directory for changes. 6 | type WatchDirectory struct { 7 | // Path is the path to the directory to watch. 8 | Path string 9 | // Filter is the filter to apply to the directory. 10 | Filter string 11 | // Recursive is true if the directory should be watched recursively. 12 | Recursive bool 13 | // MatchFunction is the function to use to match files. 14 | MatchFunction func(string) int 15 | // Callback is the function to call when a file is created that matches with MatchFunction. 16 | CallbackFunction func(string) 17 | // watcher is the fsnotify watcher. 18 | Watcher *fsnotify.Watcher 19 | } 20 | -------------------------------------------------------------------------------- /web/src/App.svelte: -------------------------------------------------------------------------------- 1 | 15 | 16 |
17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 |
32 | -------------------------------------------------------------------------------- /web/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "premiumizearr-ui", 3 | "version": "0.0.1", 4 | "devDependencies": { 5 | "carbon-components-svelte": "^0.88.0", 6 | "carbon-icons-svelte": "^13.0.0", 7 | "carbon-preprocess-svelte": "^0.11.0", 8 | "copy-webpack-plugin": "^13.0.0", 9 | "cross-env": "^7.0.0", 10 | "css-loader": "^7.0.0", 11 | "esbuild-loader": "^2.0.0", 12 | "mini-css-extract-plugin": "^1.0.0", 13 | "svelte": "^3.49.0", 14 | "svelte-loader": "^3.0.0", 15 | "webpack": "^5.0.0", 16 | "webpack-cli": "^4.0.0", 17 | "webpack-dev-server": "^4.0.0" 18 | }, 19 | "scripts": { 20 | "build": "cross-env NODE_ENV=production webpack", 21 | "dev": "webpack serve --static public" 22 | }, 23 | "dependencies": { 24 | "luxon": "^2.0.0" 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /pkg/stringqueue/stringqueue.go: -------------------------------------------------------------------------------- 1 | package stringqueue 2 | 3 | import "sync" 4 | 5 | func NewStringQueue() *StringQueue { 6 | return &StringQueue{queue: make([]string, 0), mutex: &sync.Mutex{}} 7 | } 8 | 9 | func (UploadQueue *StringQueue) Len() int { 10 | UploadQueue.mutex.Lock() 11 | defer UploadQueue.mutex.Unlock() 12 | return len(UploadQueue.queue) 13 | } 14 | 15 | func (UploadQueue *StringQueue) Add(path string) { 16 | UploadQueue.mutex.Lock() 17 | defer UploadQueue.mutex.Unlock() 18 | UploadQueue.queue = append(UploadQueue.queue, path) 19 | } 20 | 21 | func (UploadQueue *StringQueue) PopTopOfQueue() (bool, string) { 22 | UploadQueue.mutex.Lock() 23 | defer UploadQueue.mutex.Unlock() 24 | if len(UploadQueue.queue) > 0 { 25 | rtn := UploadQueue.queue[0] 26 | UploadQueue.queue = UploadQueue.queue[1:] 27 | return true, rtn 28 | } 29 | return false, "" 30 | } 31 | 32 | func (UploadQueue *StringQueue) GetQueue() []string { 33 | UploadQueue.mutex.Lock() 34 | defer UploadQueue.mutex.Unlock() 35 | return UploadQueue.queue 36 | } 37 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | # For most projects, this workflow file will not need changing; you simply need 2 | # to commit it to your repository. 3 | # 4 | # You may wish to alter this file to override the set of languages analyzed, 5 | # or to provide custom queries or build logic. 6 | # 7 | # ******** NOTE ******** 8 | # We have attempted to detect the languages in your repository. Please check 9 | # the `language` matrix defined below to confirm you have the correct set of 10 | # supported CodeQL languages. 11 | # 12 | name: "CodeQL" 13 | 14 | on: 15 | push: 16 | branches: [ main ] 17 | pull_request: 18 | branches: [ main ] 19 | 20 | jobs: 21 | analyze: 22 | name: Analyze 23 | runs-on: ubuntu-latest 24 | permissions: 25 | actions: read 26 | contents: read 27 | security-events: write 28 | 29 | strategy: 30 | fail-fast: false 31 | matrix: 32 | language: [ 'go', 'javascript' ] 33 | 34 | steps: 35 | - name: Checkout repository 36 | uses: actions/checkout@v2 37 | 38 | - name: Initialize CodeQL 39 | uses: github/codeql-action/init@v2 40 | with: 41 | languages: ${{ matrix.language }} 42 | 43 | - name: Autobuild 44 | uses: github/codeql-action/autobuild@v2 45 | 46 | - name: Perform CodeQL Analysis 47 | uses: github/codeql-action/analyze@v2 48 | -------------------------------------------------------------------------------- /internal/service/web_service_config_routes.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "net/http" 7 | 8 | "github.com/ensingerphilipp/premiumizearr-nova/internal/config" 9 | ) 10 | 11 | type ConfigChangeResponse struct { 12 | Succeeded bool `json:"succeeded"` 13 | Status string `json:"status"` 14 | } 15 | 16 | func (s *WebServerService) ConfigHandler(w http.ResponseWriter, r *http.Request) { 17 | 18 | switch r.Method { 19 | case http.MethodGet: 20 | data, err := json.Marshal(s.config) 21 | if err != nil { 22 | http.Error(w, err.Error(), http.StatusInternalServerError) 23 | return 24 | } 25 | 26 | w.Write(data) 27 | case http.MethodPost: 28 | var newConfig config.Config 29 | err := json.NewDecoder(r.Body).Decode(&newConfig) 30 | if err != nil { 31 | EncodeAndWriteConfigChangeResponse(w, &ConfigChangeResponse{ 32 | Succeeded: false, 33 | Status: fmt.Sprintf("Config failed to update %s", err.Error()), 34 | }) 35 | return 36 | } 37 | s.config.UpdateConfig(newConfig) 38 | EncodeAndWriteConfigChangeResponse(w, &ConfigChangeResponse{ 39 | Succeeded: true, 40 | Status: "Config updated", 41 | }) 42 | default: 43 | http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) 44 | } 45 | 46 | } 47 | 48 | func EncodeAndWriteConfigChangeResponse(w http.ResponseWriter, resp *ConfigChangeResponse) { 49 | data, err := json.Marshal(resp) 50 | if err != nil { 51 | http.Error(w, err.Error(), http.StatusInternalServerError) 52 | return 53 | } 54 | 55 | w.Write(data) 56 | } 57 | -------------------------------------------------------------------------------- /internal/directory_watcher/directory_watcher.go: -------------------------------------------------------------------------------- 1 | package directory_watcher 2 | 3 | import ( 4 | "os" 5 | "path/filepath" 6 | 7 | "github.com/fsnotify/fsnotify" 8 | ) 9 | 10 | // NewWatchDirectory creates a new WatchDirectory. 11 | func NewDirectoryWatcher(path string, recursive bool, matchFunction func(string) int, callbackFunction func(string)) *WatchDirectory { 12 | return &WatchDirectory{ 13 | Path: path, 14 | // TODO (Unused): Add recursive abilities 15 | Recursive: recursive, 16 | MatchFunction: matchFunction, 17 | CallbackFunction: callbackFunction, 18 | } 19 | } 20 | 21 | func (w *WatchDirectory) Watch() error { 22 | var err error 23 | w.Watcher, err = fsnotify.NewWatcher() 24 | if err != nil { 25 | return err 26 | } 27 | 28 | go func() { 29 | var action int = 1 30 | for { 31 | select { 32 | case event, ok := <-w.Watcher.Events: 33 | if !ok { 34 | return 35 | } 36 | if event.Op&fsnotify.Create == fsnotify.Create { 37 | action = w.MatchFunction(event.Name) 38 | if action == 1 { 39 | w.CallbackFunction(event.Name) 40 | } else if action == 2 { 41 | w.Watcher.Add(event.Name) 42 | } 43 | } 44 | case _, ok := <-w.Watcher.Errors: 45 | if !ok { 46 | return 47 | } 48 | } 49 | } 50 | }() 51 | 52 | cleanPath := filepath.Clean(w.Path) 53 | _, err = os.Stat(cleanPath) 54 | if os.IsNotExist(err) { 55 | return err 56 | } 57 | 58 | err = w.Watcher.Add(cleanPath) 59 | if err != nil { 60 | return err 61 | } 62 | 63 | return nil 64 | } 65 | 66 | func (w *WatchDirectory) UpdatePath(path string) error { 67 | w.Watcher.Remove(w.Path) 68 | w.Path = path 69 | return w.Watcher.Add(w.Path) 70 | } 71 | 72 | func (w *WatchDirectory) Stop() error { 73 | return w.Watcher.Close() 74 | } 75 | -------------------------------------------------------------------------------- /web/webpack.config.js: -------------------------------------------------------------------------------- 1 | const MiniCssExtractPlugin = require('mini-css-extract-plugin'); 2 | const { ESBuildMinifyPlugin } = require("esbuild-loader"); 3 | const path = require('path'); 4 | const CopyWebpackPlugin = require('copy-webpack-plugin'); 5 | const { optimizeImports } = require("carbon-preprocess-svelte"); 6 | 7 | const mode = process.env.NODE_ENV || 'development'; 8 | const prod = mode === 'production'; 9 | module.exports = { 10 | entry: { 11 | 'bundle': ['./src/main.js'] 12 | }, 13 | resolve: { 14 | alias: { 15 | svelte: path.dirname(require.resolve('svelte/package.json')) 16 | }, 17 | extensions: ['.mjs', '.js', '.svelte'], 18 | mainFields: ['svelte', 'browser', 'module', 'main'] 19 | }, 20 | output: { 21 | path: path.join(__dirname, '/dist'), 22 | filename: '[name].js', 23 | chunkFilename: '[name].[id].js' 24 | }, 25 | module: { 26 | rules: [ 27 | { 28 | test: /\.svelte$/, 29 | use: { 30 | loader: "svelte-loader", 31 | options: { 32 | preprocess: [optimizeImports()], 33 | hotReload: !prod, 34 | compilerOptions: { dev: !prod }, 35 | }, 36 | }, 37 | }, 38 | { 39 | test: /\.css$/, 40 | use: [MiniCssExtractPlugin.loader, "css-loader"], 41 | }, 42 | { 43 | test: /node_modules\/svelte\/.*\.mjs$/, 44 | resolve: { fullySpecified: false }, 45 | }, 46 | ] 47 | }, 48 | mode, 49 | plugins: [ 50 | new MiniCssExtractPlugin({ 51 | filename: '[name].css' 52 | }), 53 | new CopyWebpackPlugin({ 54 | patterns: [ 55 | { from: 'public' } 56 | ] 57 | }) 58 | ], 59 | devtool: 'source-map', 60 | devServer: { 61 | hot: true, 62 | proxy: { 63 | '/api': 'http://127.0.0.1:8182' 64 | } 65 | }, 66 | optimization: { 67 | minimizer: [new ESBuildMinifyPlugin({ target: "es2015" })], 68 | }, 69 | }; 70 | -------------------------------------------------------------------------------- /internal/config/types.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import "errors" 4 | 5 | var ( 6 | ErrInvalidConfigFile = errors.New("invalid Config File") 7 | ErrFailedToFindConfigFile = errors.New("failed to find config file") 8 | ErrFailedToSaveConfig = errors.New("failed to save config") 9 | ) 10 | 11 | // ArrType enum for Sonarr/Radarr/Lidarr 12 | type ArrType string 13 | 14 | // AppCallback - Callback for the app to use 15 | type AppCallback func(oldConfig Config, newConfig Config) 16 | 17 | const ( 18 | Sonarr ArrType = "Sonarr" 19 | Radarr ArrType = "Radarr" 20 | Lidarr ArrType = "Lidarr" 21 | ) 22 | 23 | type ArrConfig struct { 24 | Name string `yaml:"Name" json:"Name"` 25 | URL string `yaml:"URL" json:"URL"` 26 | APIKey string `yaml:"APIKey" json:"APIKey"` 27 | Type ArrType `yaml:"Type" json:"Type"` 28 | } 29 | 30 | type Config struct { 31 | altConfigLocation string 32 | appCallback AppCallback 33 | 34 | //PremiumizemeAPIKey string with yaml and json tag 35 | PremiumizemeAPIKey string `yaml:"PremiumizemeAPIKey" json:"PremiumizemeAPIKey"` 36 | 37 | Arrs []ArrConfig `yaml:"Arrs" json:"Arrs"` 38 | 39 | BlackholeDirectory string `yaml:"BlackholeDirectory" json:"BlackholeDirectory"` 40 | PollBlackholeDirectory bool `yaml:"PollBlackholeDirectory" json:"PollBlackholeDirectory"` 41 | PollBlackholeIntervalMinutes int `yaml:"PollBlackholeIntervalMinutes" json:"PollBlackholeIntervalMinutes"` 42 | 43 | DownloadsDirectory string `yaml:"DownloadsDirectory" json:"DownloadsDirectory"` 44 | TransferDirectory string `yaml:"TransferDirectory" json:"TransferDirectory"` 45 | 46 | BindIP string `yaml:"bindIP" json:"BindIP"` 47 | BindPort string `yaml:"bindPort" json:"BindPort"` 48 | 49 | WebRoot string `yaml:"WebRoot" json:"WebRoot"` 50 | 51 | SimultaneousDownloads int `yaml:"SimultaneousDownloads" json:"SimultaneousDownloads"` 52 | DownloadSpeedLimit int `yaml:"DownloadSpeedLimit" json:"DownloadSpeedLimit"` 53 | EnableTlsCheck bool `yaml:"EnableTlsCheck" json:"EnableTlsCheck"` 54 | TransferOnlyMode bool `yaml:"TransferOnlyMode" json:"TransferOnlyMode"` 55 | 56 | ArrHistoryUpdateIntervalSeconds int `yaml:"ArrHistoryUpdateIntervalSeconds" json:"ArrHistoryUpdateIntervalSeconds"` 57 | } 58 | -------------------------------------------------------------------------------- /web/src/components/APITable.svelte: -------------------------------------------------------------------------------- 1 | 60 | 61 |
62 | {#if totalName !== ""} 63 |

64 | {totalName} 65 | {safeLength(rows)} 66 |

67 | {/if} 68 |

69 | 70 |

71 |

72 | Message: {status} 73 |

74 |

75 | 76 |

-------------------------------------------------------------------------------- /pkg/premiumizeme/types.go: -------------------------------------------------------------------------------- 1 | package premiumizeme 2 | 3 | type SimpleResponse struct { 4 | Status string `json:"status"` 5 | Message string `json:"message"` 6 | } 7 | 8 | type GenerateZipResponse struct { 9 | Status string `json:"status"` 10 | Location string `json:"location"` 11 | } 12 | 13 | type GenerateFileLinkResponse struct { 14 | Status string `json:"status"` 15 | ID string `json:"id"` 16 | Name string `json:"name"` 17 | Link string `json:"link"` 18 | Type string `json:"type"` 19 | } 20 | 21 | type ListTransfersResponse struct { 22 | Status string `json:"status"` 23 | Transfers []Transfer `json:"transfers"` 24 | } 25 | 26 | type CreateTransferResponse struct { 27 | Status string `json:"status"` 28 | ID string `json:"id"` 29 | Name string `json:"name"` 30 | Type string `json:"type"` 31 | Message string `json:"message"` 32 | } 33 | 34 | type CreateFolderResponse struct { 35 | Status string `json:"status"` 36 | Message string `json:"message"` 37 | ID string `json:"id"` 38 | } 39 | 40 | type ListFoldersResponse struct { 41 | Status string `json:"status"` 42 | Message string `json:"message"` 43 | Content []Item `json:"content"` 44 | Name string `json:"name"` 45 | ParentID string `json:"parent_id"` 46 | FolderID string `json:"folder_id"` 47 | } 48 | 49 | type Item struct { 50 | ID string `json:"id"` 51 | Name string `json:"name"` 52 | Type string `json:"type"` 53 | CreatedAt int `json:"created_at"` 54 | MimeType string `json:"mime_type"` 55 | Link string `json:"link"` 56 | StreamLink string `json:"stream_link"` 57 | } 58 | type FolderItems struct { 59 | Status string `json:"status"` 60 | Contant []Item `json:"content"` 61 | Name string `json:"name"` 62 | ParentID string `json:"parent_id"` 63 | FolderID string `json:"folder_id"` 64 | } 65 | 66 | type Transfer struct { 67 | ID string `json:"id"` 68 | Name string `json:"name"` 69 | Message string `json:"message"` 70 | Status string `json:"status"` 71 | Progress float64 `json:"progress"` 72 | Src string `json:"src"` 73 | FolderID string `json:"folder_id"` 74 | FileID string `json:"file_id"` 75 | } 76 | 77 | const ( 78 | ERROR_FOLDER_ALREADY_EXISTS = "This folder already exists." 79 | ) 80 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: Build 2 | 3 | on: 4 | push: 5 | tags: 6 | - 'v*' 7 | pull_request: 8 | branches: [ main ] 9 | 10 | permissions: 11 | contents: write 12 | packages: write 13 | attestations: write 14 | id-token: write 15 | 16 | jobs: 17 | build: 18 | runs-on: ubuntu-22.04 19 | steps: 20 | - uses: actions/checkout@v2 21 | with: 22 | fetch-depth: 0 23 | 24 | - uses: actions/setup-go@v5 25 | with: 26 | go-version: '1.24' 27 | 28 | - name: go Version 29 | run: go version 30 | 31 | - name: Docker Version 32 | run: docker version 33 | 34 | - name: Set up QEMU 35 | uses: docker/setup-qemu-action@v2 36 | 37 | - name: Set up Docker Buildx 38 | id: buildx 39 | uses: docker/setup-buildx-action@v2 40 | 41 | #Login before Building 42 | - name: Log in to GHCR 43 | uses: docker/login-action@v2 44 | with: 45 | registry: ghcr.io 46 | username: ${{ github.actor }} 47 | password: ${{ secrets.GITHUB_TOKEN }} 48 | 49 | # Standard Build 50 | - name: Build 51 | uses: goreleaser/goreleaser-action@v2 52 | if: startsWith(github.ref, 'refs/tags/') == false 53 | with: 54 | distribution: goreleaser 55 | version: latest 56 | args: release --clean --snapshot 57 | env: 58 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 59 | DOCKER_BUILDKIT: 1 60 | COMPOSE_DOCKER_CLI_BUILD: 1 61 | 62 | # Release build 63 | - name: Release 64 | uses: goreleaser/goreleaser-action@v2 65 | if: startsWith(github.ref, 'refs/tags/') && !contains(github.ref, '-rc') 66 | with: 67 | distribution: goreleaser 68 | version: latest 69 | args: release --clean 70 | env: 71 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 72 | DOCKER_BUILDKIT: 1 73 | COMPOSE_DOCKER_CLI_BUILD: 1 74 | 75 | # Pre-Release build 76 | - name: Pre-Release 77 | uses: goreleaser/goreleaser-action@v2 78 | if: startsWith(github.ref, 'refs/tags/') && contains(github.ref, '-rc') 79 | with: 80 | distribution: goreleaser 81 | version: latest 82 | args: release --clean -f .prerelease.goreleaser.yaml 83 | env: 84 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 85 | DOCKER_BUILDKIT: 1 86 | COMPOSE_DOCKER_CLI_BUILD: 1 87 | 88 | - name: Upload assets 89 | uses: actions/upload-artifact@v4 90 | with: 91 | name: artifacts 92 | path: dist/ 93 | -------------------------------------------------------------------------------- /internal/arr/types.go: -------------------------------------------------------------------------------- 1 | package arr 2 | 3 | import ( 4 | "strings" 5 | "sync" 6 | "time" 7 | 8 | "github.com/ensingerphilipp/premiumizearr-nova/internal/config" 9 | "github.com/ensingerphilipp/premiumizearr-nova/internal/utils" 10 | "github.com/ensingerphilipp/premiumizearr-nova/pkg/premiumizeme" 11 | "golift.io/starr/radarr" 12 | "golift.io/starr/sonarr" 13 | "golift.io/starr/lidarr" 14 | ) 15 | 16 | func CompareFileNamesFuzzy(a, b string) bool { 17 | //Strip file extension 18 | a = utils.StripDownloadTypesExtention(a) 19 | b = utils.StripDownloadTypesExtention(b) 20 | //Strip media type extension 21 | a = utils.StripMediaTypesExtention(a) 22 | b = utils.StripMediaTypesExtention(b) 23 | //Strip Spaces 24 | a = strings.ReplaceAll(a, " ", "") 25 | b = strings.ReplaceAll(b, " ", "") 26 | //Strip periods 27 | a = strings.ReplaceAll(a, ".", "") 28 | b = strings.ReplaceAll(b, ".", "") 29 | //Strip dashes 30 | a = strings.ReplaceAll(a, "-", "") 31 | b = strings.ReplaceAll(b, "-", "") 32 | //Strip underscores 33 | a = strings.ReplaceAll(a, "_", "") 34 | b = strings.ReplaceAll(b, "_", "") 35 | //Convert to lowercase 36 | a = strings.ToLower(a) 37 | b = strings.ToLower(b) 38 | 39 | return a == b 40 | } 41 | 42 | type IArr interface { 43 | HistoryContains(string) (int64, bool) 44 | MarkHistoryItemAsFailed(int64) error 45 | HandleErrorTransfer(*premiumizeme.Transfer, int64, *premiumizeme.Premiumizeme) error 46 | GetArrName() string 47 | } 48 | 49 | type SonarrArr struct { 50 | Name string 51 | ClientMutex sync.Mutex 52 | Client *sonarr.Sonarr 53 | HistoryMutex sync.Mutex 54 | History *sonarr.History 55 | LastUpdateMutex sync.Mutex 56 | LastUpdate time.Time 57 | LastUpdateCount int 58 | LastUpdateCountMutex sync.Mutex 59 | Config *config.Config 60 | } 61 | 62 | type RadarrArr struct { 63 | Name string 64 | ClientMutex sync.Mutex 65 | Client *radarr.Radarr 66 | HistoryMutex sync.Mutex 67 | History *radarr.History 68 | LastUpdateMutex sync.Mutex 69 | LastUpdate time.Time 70 | LastUpdateCount int 71 | LastUpdateCountMutex sync.Mutex 72 | Config *config.Config 73 | } 74 | 75 | type LidarrArr struct { 76 | Name string 77 | ClientMutex sync.Mutex 78 | Client *lidarr.Lidarr 79 | HistoryMutex sync.Mutex 80 | History *lidarr.History 81 | LastUpdateMutex sync.Mutex 82 | LastUpdate time.Time 83 | LastUpdateCount int 84 | LastUpdateCountMutex sync.Mutex 85 | Config *config.Config 86 | } 87 | -------------------------------------------------------------------------------- /.github/workflows/pr_artifact_comment.yml: -------------------------------------------------------------------------------- 1 | name: Comment build artifacts on pr 2 | on: 3 | workflow_run: 4 | workflows: ['Build'] 5 | types: [completed] 6 | jobs: 7 | pr_comment: 8 | if: github.event.workflow_run.event == 'pull_request' && github.event.workflow_run.conclusion == 'success' 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: actions/github-script@v5 12 | with: 13 | # This snippet is public-domain, taken from 14 | # https://github.com/oprypin/nightly.link/blob/master/.github/workflows/pr-comment.yml 15 | script: | 16 | async function upsertComment(owner, repo, issue_number, purpose, body) { 17 | const {data: comments} = await github.rest.issues.listComments( 18 | {owner, repo, issue_number}); 19 | const marker = ``; 20 | body = marker + "\n" + body; 21 | const existing = comments.filter((c) => c.body.includes(marker)); 22 | if (existing.length > 0) { 23 | const last = existing[existing.length - 1]; 24 | core.info(`Updating comment ${last.id}`); 25 | await github.rest.issues.updateComment({ 26 | owner, repo, 27 | body, 28 | comment_id: last.id, 29 | }); 30 | } else { 31 | core.info(`Creating a comment in issue / PR #${issue_number}`); 32 | await github.rest.issues.createComment({issue_number, body, owner, repo}); 33 | } 34 | } 35 | const {owner, repo} = context.repo; 36 | const run_id = ${{github.event.workflow_run.id}}; 37 | const pull_requests = ${{ toJSON(github.event.workflow_run.pull_requests) }}; 38 | if (!pull_requests.length) { 39 | return core.error("This workflow doesn't match any pull requests!"); 40 | } 41 | const artifacts = await github.paginate( 42 | github.rest.actions.listWorkflowRunArtifacts, {owner, repo, run_id}); 43 | if (!artifacts.length) { 44 | return core.error(`No artifacts found`); 45 | } 46 | let body = `Download the artifacts for this pull request:\n`; 47 | for (const art of artifacts) { 48 | body += `\n* [${art.name}.zip](https://nightly.link/${owner}/${repo}/actions/artifacts/${art.id}.zip)`; 49 | } 50 | core.info("Review thread message body:", body); 51 | for (const pr of pull_requests) { 52 | await upsertComment(owner, repo, pr.number, 53 | "nightly-link", body); 54 | } -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 2 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 3 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 4 | github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= 5 | github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= 6 | github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= 7 | github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= 8 | github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= 9 | github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= 10 | github.com/orandin/lumberjackrus v1.0.1 h1:7ysDQ0MHD79zIFN9/EiDHjUcgopNi5ehtxFDy8rUkWo= 11 | github.com/orandin/lumberjackrus v1.0.1/go.mod h1:xYLt6H8W93pKnQgUQaxsApS0Eb4BwHLOkxk5DVzf5H0= 12 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 13 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 14 | github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= 15 | github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= 16 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 17 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 18 | github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= 19 | github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 20 | golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c= 21 | golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= 22 | golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 23 | golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= 24 | golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= 25 | golift.io/starr v1.1.0 h1:KTAecOEne/zKQvrh8mcfEA7YlQ39FnvjZRIhJSIvxL4= 26 | golift.io/starr v1.1.0/go.mod h1:WnLkyfF7X2q676mXriGMZQrBA3wGt1BjA2qdxMmA/wg= 27 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= 28 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 29 | gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= 30 | gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= 31 | gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= 32 | gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= 33 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 34 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 35 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 36 | -------------------------------------------------------------------------------- /cmd/premiumizearrd/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | 7 | "github.com/ensingerphilipp/premiumizearr-nova/internal/utils" 8 | ) 9 | 10 | const asciiArt = ".______ .______ _______ .___ ___. __ __ __ .___ ___. __ ________ _______ ___ .______ .______ \n" + 11 | "| _ \\ | _ \\ | ____|| \\/ | | | | | | | | \\/ | | | | / | ____| / \\ | _ \\ | _ \\ \n" + 12 | "| |_) | | |_) | | |__ | \\ / | | | | | | | | \\ / | | | ---/ / | |__ / ^ \\ | |_) | | |_) | \n" + 13 | "| ___/ | / | __| | |\\/| | | | | | | | | |\\/| | | | / / | __| / /_\\ \\ | / | / \n" + 14 | "| | | |\\ \\----| |____ | | | | | | | '--' | | | | | | | / /----.| |____ / _____ \\ | |\\ \\----| |\\ \\----. \n" + 15 | "| _| | _| `._____|_______||__| |__| |__| \\______/ |__| |__| |__| /________||_______/__/ \\__\\ | _| `._____| _| `.__| \n" + 16 | " \n" + 17 | " .__ __. ______ ____ ____ ___ \n" + 18 | " | \\ | | / __ \\ \\ \\ / / / \\ \n" + 19 | " ______ | \\| | | | | | \\ \\/ / / ^ \\ \n" + 20 | " |______| | . ` | | | | | \\ / / /_\\ \\ \n" + 21 | " | |\\ | | '--' | \\ / / _____ \\ \n" + 22 | " |__| \\__| \\______/ \\__/ /__/ \\__\\ \n" + 23 | " Version: 1.5.0 \n" 24 | 25 | func main() { 26 | //Flags 27 | var logLevel string 28 | var configFile string 29 | var loggingDirectory string 30 | 31 | //Parse flags 32 | fmt.Println(asciiArt) 33 | fmt.Println("Premiumizearr-Nova Version: 1.5.0") 34 | flag.StringVar(&logLevel, "log", utils.EnvOrDefault("PREMIUMIZEARR_LOG_LEVEL", "info"), "Logging level: \n \tinfo,debug,trace") 35 | flag.StringVar(&configFile, "config", utils.EnvOrDefault("PREMIUMIZEARR_CONFIG_DIR_PATH", "./"), "The directory the config.yml is located in") 36 | flag.StringVar(&loggingDirectory, "logging-dir", utils.EnvOrDefault("PREMIUMIZEARR_LOGGING_DIR_PATH", "./"), "The directory logs are to be written to") 37 | flag.Parse() 38 | 39 | App := &App{} 40 | App.Start(logLevel, configFile, loggingDirectory) 41 | 42 | } 43 | -------------------------------------------------------------------------------- /internal/arr/sonarr.go: -------------------------------------------------------------------------------- 1 | package arr 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | "github.com/ensingerphilipp/premiumizearr-nova/pkg/premiumizeme" 8 | log "github.com/sirupsen/logrus" 9 | "golift.io/starr/sonarr" 10 | ) 11 | 12 | ////// 13 | //Sonarr 14 | ////// 15 | 16 | //Data Access 17 | 18 | // GetHistory: Updates the history if it's been more than 15 seconds since last update 19 | func (arr *SonarrArr) GetHistory() (sonarr.History, error) { 20 | arr.LastUpdateMutex.Lock() 21 | defer arr.LastUpdateMutex.Unlock() 22 | arr.HistoryMutex.Lock() 23 | defer arr.HistoryMutex.Unlock() 24 | arr.ClientMutex.Lock() 25 | defer arr.ClientMutex.Unlock() 26 | arr.LastUpdateCountMutex.Lock() 27 | defer arr.LastUpdateCountMutex.Unlock() 28 | 29 | if time.Since(arr.LastUpdate) > time.Duration(arr.Config.ArrHistoryUpdateIntervalSeconds)*time.Second || arr.History == nil { 30 | his, err := arr.Client.GetHistory(0, 1000) 31 | if err != nil { 32 | return sonarr.History{}, err 33 | } 34 | 35 | arr.History = his 36 | arr.LastUpdate = time.Now() 37 | arr.LastUpdateCount = his.TotalRecords 38 | log.Debugf("[Sonarr] [%s]: Updated history, next update in %d seconds", arr.Name, arr.Config.ArrHistoryUpdateIntervalSeconds) 39 | } 40 | 41 | log.Tracef("[Sonarr] [%s]: Returning from GetHistory", arr.Name) 42 | return *arr.History, nil 43 | } 44 | 45 | func (arr *SonarrArr) MarkHistoryItemAsFailed(id int64) error { 46 | arr.ClientMutex.Lock() 47 | defer arr.ClientMutex.Unlock() 48 | return arr.Client.Fail(id) 49 | } 50 | 51 | func (arr *SonarrArr) GetArrName() string { 52 | return "Sonarr" 53 | } 54 | 55 | // Functions 56 | 57 | func (arr *SonarrArr) HistoryContains(name string) (int64, bool) { 58 | log.Tracef("Sonarr [%s]: Checking history for %s", arr.Name, name) 59 | his, err := arr.GetHistory() 60 | if err != nil { 61 | return 0, false 62 | } 63 | log.Tracef("Sonarr [%s]: Got History, now Locking History", arr.Name) 64 | arr.HistoryMutex.Lock() 65 | defer arr.HistoryMutex.Unlock() 66 | 67 | for _, item := range his.Records { 68 | if CompareFileNamesFuzzy(item.SourceTitle, name) { 69 | return item.ID, true 70 | } 71 | } 72 | log.Tracef("Sonarr [%s]: %s Not in History", name) 73 | 74 | return -1, false 75 | } 76 | 77 | func (arr *SonarrArr) HandleErrorTransfer(transfer *premiumizeme.Transfer, arrID int64, pm *premiumizeme.Premiumizeme) error { 78 | his, err := arr.GetHistory() 79 | if err != nil { 80 | return fmt.Errorf("failed to get history from sonarr: %+v", err) 81 | } 82 | 83 | arr.HistoryMutex.Lock() 84 | defer arr.HistoryMutex.Unlock() 85 | 86 | complete := false 87 | 88 | for _, queueItem := range his.Records { 89 | if queueItem.ID == arrID { 90 | if queueItem.EventType == "grabbed" { 91 | err := arr.MarkHistoryItemAsFailed(queueItem.ID) 92 | if err != nil { 93 | return fmt.Errorf("failed to blacklist item in sonarr: %+v", err) 94 | } 95 | err = pm.DeleteTransfer(transfer.ID) 96 | if err != nil { 97 | return fmt.Errorf("failed to delete transfer from premiumize.me: %+v", err) 98 | } 99 | complete = true 100 | break 101 | } 102 | } 103 | } 104 | 105 | if !complete { 106 | err := pm.DeleteTransfer(transfer.ID) 107 | if err != nil { 108 | return fmt.Errorf("failed to delete transfer from premiumize.me: %+v", err) 109 | } 110 | } 111 | 112 | return nil 113 | } 114 | -------------------------------------------------------------------------------- /internal/service/arrs_manager_service.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/ensingerphilipp/premiumizearr-nova/internal/arr" 7 | "github.com/ensingerphilipp/premiumizearr-nova/internal/config" 8 | log "github.com/sirupsen/logrus" 9 | "golift.io/starr" 10 | "golift.io/starr/radarr" 11 | "golift.io/starr/sonarr" 12 | "golift.io/starr/lidarr" 13 | ) 14 | 15 | type ArrsManagerService struct { 16 | arrs []arr.IArr 17 | config *config.Config 18 | } 19 | 20 | func (am ArrsManagerService) New() ArrsManagerService { 21 | am.arrs = []arr.IArr{} 22 | return am 23 | } 24 | 25 | func (am *ArrsManagerService) Init(_config *config.Config) { 26 | am.config = _config 27 | } 28 | 29 | func (am *ArrsManagerService) Start() { 30 | am.arrs = []arr.IArr{} 31 | log.Debugf("Starting ArrsManagerService") 32 | for _, arr_config := range am.config.Arrs { 33 | switch arr_config.Type { 34 | case config.Sonarr: 35 | c := starr.New(arr_config.APIKey, arr_config.URL, 0) 36 | wrapper := arr.SonarrArr{ 37 | Name: arr_config.Name, 38 | Client: sonarr.New(c), 39 | History: nil, 40 | LastUpdate: time.Now(), 41 | Config: am.config, 42 | } 43 | am.arrs = append(am.arrs, &wrapper) 44 | log.Tracef("Added Sonarr arr: %s", arr_config.Name) 45 | case config.Radarr: 46 | c := starr.New(arr_config.APIKey, arr_config.URL, 0) 47 | wrapper := arr.RadarrArr{ 48 | Name: arr_config.Name, 49 | Client: radarr.New(c), 50 | History: nil, 51 | LastUpdate: time.Now(), 52 | Config: am.config, 53 | } 54 | am.arrs = append(am.arrs, &wrapper) 55 | log.Tracef("Added Radarr arr: %s", arr_config.Name) 56 | case config.Lidarr: 57 | c := starr.New(arr_config.APIKey, arr_config.URL, 0) 58 | wrapper := arr.LidarrArr{ 59 | Name: arr_config.Name, 60 | Client: lidarr.New(c), 61 | History: nil, 62 | LastUpdate: time.Now(), 63 | Config: am.config, 64 | } 65 | am.arrs = append(am.arrs, &wrapper) 66 | log.Tracef("Added Lidarr arr: %s", arr_config.Name) 67 | default: 68 | log.Error("Unknown arr type: %s, not adding Arr %s", arr_config.Type, arr_config.Name) 69 | } 70 | } 71 | log.Debugf("Created %d Arrs", len(am.arrs)) 72 | } 73 | 74 | func (am *ArrsManagerService) Stop() { 75 | //noop 76 | } 77 | 78 | func (am *ArrsManagerService) ConfigUpdatedCallback(currentConfig config.Config, newConfig config.Config) { 79 | if len(currentConfig.Arrs) != len(newConfig.Arrs) { 80 | am.Start() 81 | return 82 | } 83 | for i, arr_config := range newConfig.Arrs { 84 | if currentConfig.Arrs[i].Type != arr_config.Type || 85 | currentConfig.Arrs[i].APIKey != arr_config.APIKey || 86 | currentConfig.Arrs[i].URL != arr_config.URL { 87 | am.Start() 88 | return 89 | } 90 | } 91 | } 92 | 93 | func (am *ArrsManagerService) GetArrs() []arr.IArr { 94 | return am.arrs 95 | } 96 | 97 | func TestArrConnection(arr config.ArrConfig) error { 98 | c := starr.New(arr.APIKey, arr.URL, 0) 99 | 100 | switch arr.Type { 101 | case config.Sonarr: 102 | _, err := sonarr.New(c).GetSystemStatus() 103 | return err 104 | case config.Radarr: 105 | _, err := radarr.New(c).GetSystemStatus() 106 | return err 107 | case config.Lidarr: 108 | _, err := lidarr.New(c).GetSystemStatus() 109 | return err 110 | default: 111 | return nil 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /internal/arr/lidarr.go: -------------------------------------------------------------------------------- 1 | package arr 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | "github.com/ensingerphilipp/premiumizearr-nova/pkg/premiumizeme" 8 | log "github.com/sirupsen/logrus" 9 | "golift.io/starr/lidarr" 10 | ) 11 | 12 | ////// 13 | //Lidarr 14 | ////// 15 | 16 | //Data Access 17 | 18 | // GetHistory: Updates the history if it's been more than 15 seconds since last update 19 | func (arr *LidarrArr) GetHistory() (lidarr.History, error) { 20 | arr.LastUpdateMutex.Lock() 21 | defer arr.LastUpdateMutex.Unlock() 22 | arr.HistoryMutex.Lock() 23 | defer arr.HistoryMutex.Unlock() 24 | arr.ClientMutex.Lock() 25 | defer arr.ClientMutex.Unlock() 26 | arr.LastUpdateCountMutex.Lock() 27 | defer arr.LastUpdateCountMutex.Unlock() 28 | 29 | if time.Since(arr.LastUpdate) > time.Duration(arr.Config.ArrHistoryUpdateIntervalSeconds)*time.Second || arr.History == nil { 30 | his, err := arr.Client.GetHistory(0, 1000) 31 | if err != nil { 32 | return lidarr.History{}, err 33 | } 34 | 35 | arr.History = his 36 | arr.LastUpdate = time.Now() 37 | arr.LastUpdateCount = his.TotalRecords 38 | log.Debugf("[Lidarr] [%s]: Updated history, next update in %d seconds", arr.Name, arr.Config.ArrHistoryUpdateIntervalSeconds) 39 | } 40 | 41 | log.Tracef("[Lidarr] [%s]: Returning from GetHistory", arr.Name) 42 | return *arr.History, nil 43 | } 44 | 45 | func (arr *LidarrArr) MarkHistoryItemAsFailed(id int64) error { 46 | arr.ClientMutex.Lock() 47 | defer arr.ClientMutex.Unlock() 48 | return arr.Client.Fail(id) 49 | 50 | } 51 | 52 | func (arr *LidarrArr) GetArrName() string { 53 | return "Lidarr" 54 | } 55 | 56 | //Functions 57 | 58 | func (arr *LidarrArr) HistoryContains(name string) (int64, bool) { 59 | log.Tracef("Lidarr [%s]: Checking history for %s", arr.Name, name) 60 | his, err := arr.GetHistory() 61 | if err != nil { 62 | log.Errorf("Lidarr [%s]: Failed to get history: %+v", arr.Name, err) 63 | return -1, false 64 | } 65 | log.Tracef("Lidarr [%s]: Got History, now Locking History", arr.Name) 66 | arr.HistoryMutex.Lock() 67 | defer arr.HistoryMutex.Unlock() 68 | 69 | for _, item := range his.Records { 70 | if CompareFileNamesFuzzy(item.SourceTitle, name) { 71 | return item.ID, true 72 | } 73 | } 74 | 75 | return -1, false 76 | } 77 | 78 | func (arr *LidarrArr) HandleErrorTransfer(transfer *premiumizeme.Transfer, arrID int64, pm *premiumizeme.Premiumizeme) error { 79 | his, err := arr.GetHistory() 80 | if err != nil { 81 | return fmt.Errorf("failed to get history from lidarr: %+v", err) 82 | } 83 | 84 | arr.HistoryMutex.Lock() 85 | defer arr.HistoryMutex.Unlock() 86 | 87 | complete := false 88 | 89 | for _, queueItem := range his.Records { 90 | if queueItem.ID == arrID { 91 | if queueItem.EventType == "grabbed" { 92 | err := arr.MarkHistoryItemAsFailed(queueItem.ID) 93 | if err != nil { 94 | return fmt.Errorf("failed to blacklist item in lidarr: %+v", err) 95 | } 96 | err = pm.DeleteTransfer(transfer.ID) 97 | if err != nil { 98 | return fmt.Errorf("failed to delete transfer from premiumize.me: %+v", err) 99 | } 100 | complete = true 101 | break 102 | } 103 | } 104 | } 105 | 106 | if !complete { 107 | err := pm.DeleteTransfer(transfer.ID) 108 | if err != nil { 109 | return fmt.Errorf("failed to delete transfer from premiumize.me: %+v", err) 110 | } 111 | } 112 | 113 | return nil 114 | } 115 | -------------------------------------------------------------------------------- /internal/arr/radarr.go: -------------------------------------------------------------------------------- 1 | package arr 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | "github.com/ensingerphilipp/premiumizearr-nova/pkg/premiumizeme" 8 | log "github.com/sirupsen/logrus" 9 | "golift.io/starr/radarr" 10 | ) 11 | 12 | ////// 13 | //Radarr 14 | ////// 15 | 16 | //Data Access 17 | 18 | // GetHistory: Updates the history if it's been more than 15 seconds since last update 19 | func (arr *RadarrArr) GetHistory() (radarr.History, error) { 20 | arr.LastUpdateMutex.Lock() 21 | defer arr.LastUpdateMutex.Unlock() 22 | arr.HistoryMutex.Lock() 23 | defer arr.HistoryMutex.Unlock() 24 | arr.ClientMutex.Lock() 25 | defer arr.ClientMutex.Unlock() 26 | arr.LastUpdateCountMutex.Lock() 27 | defer arr.LastUpdateCountMutex.Unlock() 28 | 29 | if time.Since(arr.LastUpdate) > time.Duration(arr.Config.ArrHistoryUpdateIntervalSeconds)*time.Second || arr.History == nil { 30 | his, err := arr.Client.GetHistory(0, 1000) 31 | if err != nil { 32 | return radarr.History{}, err 33 | } 34 | 35 | arr.History = his 36 | arr.LastUpdate = time.Now() 37 | arr.LastUpdateCount = his.TotalRecords 38 | log.Debugf("[Radarr] [%s]: Updated history, next update in %d seconds", arr.Name, arr.Config.ArrHistoryUpdateIntervalSeconds) 39 | } 40 | 41 | log.Tracef("[Radarr] [%s]: Returning from GetHistory", arr.Name) 42 | return *arr.History, nil 43 | } 44 | 45 | func (arr *RadarrArr) MarkHistoryItemAsFailed(id int64) error { 46 | arr.ClientMutex.Lock() 47 | defer arr.ClientMutex.Unlock() 48 | return arr.Client.Fail(id) 49 | 50 | } 51 | 52 | func (arr *RadarrArr) GetArrName() string { 53 | return "Radarr" 54 | } 55 | 56 | //Functions 57 | 58 | func (arr *RadarrArr) HistoryContains(name string) (int64, bool) { 59 | log.Tracef("Radarr [%s]: Checking history for %s", arr.Name, name) 60 | his, err := arr.GetHistory() 61 | if err != nil { 62 | log.Errorf("Radarr [%s]: Failed to get history: %+v", arr.Name, err) 63 | return -1, false 64 | } 65 | log.Tracef("Radarr [%s]: Got History, now Locking History", arr.Name) 66 | arr.HistoryMutex.Lock() 67 | defer arr.HistoryMutex.Unlock() 68 | 69 | for _, item := range his.Records { 70 | if CompareFileNamesFuzzy(item.SourceTitle, name) { 71 | return item.ID, true 72 | } 73 | } 74 | 75 | return -1, false 76 | } 77 | 78 | func (arr *RadarrArr) HandleErrorTransfer(transfer *premiumizeme.Transfer, arrID int64, pm *premiumizeme.Premiumizeme) error { 79 | his, err := arr.GetHistory() 80 | if err != nil { 81 | return fmt.Errorf("failed to get history from radarr: %+v", err) 82 | } 83 | 84 | arr.HistoryMutex.Lock() 85 | defer arr.HistoryMutex.Unlock() 86 | 87 | complete := false 88 | 89 | for _, queueItem := range his.Records { 90 | if queueItem.ID == arrID { 91 | if queueItem.EventType == "grabbed" { 92 | err := arr.MarkHistoryItemAsFailed(queueItem.ID) 93 | if err != nil { 94 | return fmt.Errorf("failed to blacklist item in radarr: %+v", err) 95 | } 96 | err = pm.DeleteTransfer(transfer.ID) 97 | if err != nil { 98 | return fmt.Errorf("failed to delete transfer from premiumize.me: %+v", err) 99 | } 100 | complete = true 101 | break 102 | } 103 | } 104 | } 105 | 106 | if !complete { 107 | err := pm.DeleteTransfer(transfer.ID) 108 | if err != nil { 109 | return fmt.Errorf("failed to delete transfer from premiumize.me: %+v", err) 110 | } 111 | } 112 | 113 | return nil 114 | } 115 | -------------------------------------------------------------------------------- /cmd/premiumizearrd/app.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "path" 5 | "time" 6 | 7 | "github.com/ensingerphilipp/premiumizearr-nova/internal/config" 8 | "github.com/ensingerphilipp/premiumizearr-nova/internal/service" 9 | "github.com/ensingerphilipp/premiumizearr-nova/pkg/premiumizeme" 10 | "github.com/orandin/lumberjackrus" 11 | log "github.com/sirupsen/logrus" 12 | ) 13 | 14 | type App struct { 15 | config config.Config 16 | premiumizemeClient premiumizeme.Premiumizeme 17 | transferManager service.TransferManagerService 18 | directoryWatcher service.DirectoryWatcherService 19 | webServer service.WebServerService 20 | arrsManager service.ArrsManagerService 21 | } 22 | 23 | // Makes go vet error - prevents copies 24 | func (app *App) Lock() {} 25 | func (app *App) UnLock() {} 26 | 27 | func (app *App) Start(logLevel string, configFile string, loggingDirectory string) error { 28 | //Setup static login 29 | lvl, err := log.ParseLevel(logLevel) 30 | if err != nil { 31 | log.Errorf("error flag not recognized, defaulting to Info!!", err) 32 | lvl = log.InfoLevel 33 | } 34 | log.SetLevel(lvl) 35 | hook, err := lumberjackrus.NewHook( 36 | &lumberjackrus.LogFile{ 37 | Filename: path.Join(loggingDirectory, "premiumizearr.general.log"), 38 | MaxSize: 100, 39 | MaxBackups: 1, 40 | MaxAge: 1, 41 | Compress: false, 42 | LocalTime: false, 43 | }, 44 | log.InfoLevel, 45 | &log.TextFormatter{}, 46 | &lumberjackrus.LogFileOpts{ 47 | log.InfoLevel: &lumberjackrus.LogFile{ 48 | Filename: path.Join(loggingDirectory, "premiumizearr.info.log"), 49 | MaxSize: 100, 50 | MaxBackups: 1, 51 | MaxAge: 1, 52 | Compress: false, 53 | LocalTime: false, 54 | }, 55 | log.ErrorLevel: &lumberjackrus.LogFile{ 56 | Filename: path.Join(loggingDirectory, "premiumizearr.error.log"), 57 | MaxSize: 100, // optional 58 | MaxBackups: 1, // optional 59 | MaxAge: 1, // optional 60 | Compress: false, // optional 61 | LocalTime: false, // optional 62 | }, 63 | }, 64 | ) 65 | 66 | if err != nil { 67 | panic(err) 68 | } 69 | 70 | log.AddHook(hook) 71 | 72 | log.Info("---------- Starting premiumizearr daemon ----------") 73 | log.Info("") 74 | 75 | log.Trace("Running load or create config") 76 | log.Tracef("Reading config file location from flag or env: %s", configFile) 77 | app.config, err = config.LoadOrCreateConfig(configFile, app.ConfigUpdatedCallback) 78 | 79 | if err != nil { 80 | panic(err) 81 | } 82 | 83 | // Initialisation 84 | app.premiumizemeClient = premiumizeme.NewPremiumizemeClient(app.config.PremiumizemeAPIKey) 85 | 86 | app.transferManager = service.TransferManagerService{}.New() 87 | app.directoryWatcher = service.DirectoryWatcherService{}.New() 88 | app.webServer = service.WebServerService{}.New() 89 | app.arrsManager = service.ArrsManagerService{}.New() 90 | 91 | // Initialise Services 92 | app.arrsManager.Init(&app.config) 93 | app.directoryWatcher.Init(&app.premiumizemeClient, &app.config) 94 | 95 | // Must come after arrsManager 96 | app.transferManager.Init(&app.premiumizemeClient, &app.arrsManager, &app.config) 97 | // Must come after transfer, arrManager and directory 98 | app.webServer.Init(&app.transferManager, &app.directoryWatcher, &app.arrsManager, &app.config) 99 | 100 | app.arrsManager.Start() 101 | app.webServer.Start() 102 | app.directoryWatcher.Start() 103 | //Block until the program is terminated 104 | app.transferManager.Run(15 * time.Second) 105 | 106 | return nil 107 | } 108 | 109 | func (app *App) ConfigUpdatedCallback(currentConfig config.Config, newConfig config.Config) { 110 | app.transferManager.ConfigUpdatedCallback(currentConfig, newConfig) 111 | app.directoryWatcher.ConfigUpdatedCallback(currentConfig, newConfig) 112 | app.webServer.ConfigUpdatedCallback(currentConfig, newConfig) 113 | app.arrsManager.ConfigUpdatedCallback(currentConfig, newConfig) 114 | } 115 | -------------------------------------------------------------------------------- /internal/service/web_service_handlers.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "encoding/json" 5 | "net/http" 6 | "path" 7 | "sort" 8 | 9 | "github.com/ensingerphilipp/premiumizearr-nova/internal/config" 10 | "github.com/ensingerphilipp/premiumizearr-nova/pkg/premiumizeme" 11 | ) 12 | 13 | type TransfersResponse struct { 14 | Transfers []premiumizeme.Transfer `json:"data"` 15 | Status string `json:"status"` 16 | } 17 | 18 | func (s *WebServerService) TransfersHandler(w http.ResponseWriter, r *http.Request) { 19 | var resp TransfersResponse 20 | resp.Transfers = *s.transferManager.GetTransfers() 21 | resp.Status = s.transferManager.GetStatus() 22 | data, err := json.Marshal(resp) 23 | if err != nil { 24 | http.Error(w, err.Error(), http.StatusInternalServerError) 25 | return 26 | } 27 | 28 | w.Write(data) 29 | } 30 | 31 | type BlackholeFile struct { 32 | ID int `json:"id"` 33 | Name string `json:"name"` 34 | } 35 | type BlackholeResponse struct { 36 | BlackholeFiles []BlackholeFile `json:"data"` 37 | Status string `json:"status"` 38 | } 39 | 40 | type Download struct { 41 | Added int64 `json:"added"` 42 | Name string `json:"name"` 43 | Progress string `json:"progress"` 44 | Speed string `json:"speed"` 45 | } 46 | type DownloadsResponse struct { 47 | Downloads []Download `json:"data"` 48 | Status string `json:"status"` 49 | } 50 | 51 | func (s *WebServerService) DownloadsHandler(w http.ResponseWriter, r *http.Request) { 52 | var resp DownloadsResponse 53 | 54 | if s.transferManager == nil { 55 | resp.Status = "Not Initialized" 56 | } else { 57 | downloads := s.transferManager.GetDownloads() 58 | 59 | // Collect downloads into a slice for sorting 60 | sortedDownloads := make([]*DownloadDetails, 0, len(downloads)) 61 | for _, v := range downloads { 62 | sortedDownloads = append(sortedDownloads, v) 63 | } 64 | 65 | // Sort downloads by Name 66 | sort.Slice(sortedDownloads, func(i, j int) bool { 67 | return sortedDownloads[i].Name < sortedDownloads[j].Name 68 | }) 69 | 70 | // Build the response 71 | for _, v := range sortedDownloads { 72 | resp.Downloads = append(resp.Downloads, Download{ 73 | Added: v.Added.Unix(), 74 | Name: v.Name, 75 | Progress: v.ProgressDownloader.GetProgress(), 76 | Speed: v.ProgressDownloader.GetSpeed(), 77 | }) 78 | } 79 | resp.Status = "" 80 | } 81 | 82 | data, err := json.Marshal(resp) 83 | if err != nil { 84 | http.Error(w, err.Error(), http.StatusInternalServerError) 85 | return 86 | } 87 | 88 | w.Write(data) 89 | } 90 | 91 | func (s *WebServerService) BlackholeHandler(w http.ResponseWriter, r *http.Request) { 92 | var resp BlackholeResponse 93 | 94 | if s.directoryWatcherService == nil { 95 | resp.Status = "Not Initialized" 96 | } else { 97 | for i, n := range s.directoryWatcherService.Queue.GetQueue() { 98 | name := path.Base(n) 99 | resp.BlackholeFiles = append(resp.BlackholeFiles, BlackholeFile{ 100 | ID: i, 101 | Name: name, 102 | }) 103 | } 104 | 105 | resp.Status = s.directoryWatcherService.GetStatus() 106 | } 107 | 108 | data, err := json.Marshal(resp) 109 | if err != nil { 110 | http.Error(w, err.Error(), http.StatusInternalServerError) 111 | return 112 | } 113 | 114 | w.Write(data) 115 | } 116 | 117 | type TestArrResponse struct { 118 | Status string `json:"status"` 119 | Succeeded bool `json:"succeeded"` 120 | } 121 | 122 | func (s *WebServerService) TestArrHandler(w http.ResponseWriter, r *http.Request) { 123 | var arr config.ArrConfig 124 | err := json.NewDecoder(r.Body).Decode(&arr) 125 | if err != nil { 126 | http.Error(w, err.Error(), http.StatusInternalServerError) 127 | return 128 | } 129 | 130 | err = TestArrConnection(arr) 131 | 132 | var resp TestArrResponse 133 | if err != nil { 134 | resp.Status = err.Error() 135 | resp.Succeeded = false 136 | } else { 137 | resp.Succeeded = true 138 | } 139 | 140 | data, err := json.Marshal(resp) 141 | if err != nil { 142 | http.Error(w, err.Error(), http.StatusInternalServerError) 143 | return 144 | } 145 | 146 | w.Write(data) 147 | } 148 | -------------------------------------------------------------------------------- /.prerelease.goreleaser.yaml: -------------------------------------------------------------------------------- 1 | version: 2 2 | env: 3 | - DOCKER_BUILDKIT=1 # Make sure all values are strings 4 | 5 | before: 6 | hooks: 7 | - go mod tidy 8 | - make web 9 | builds: 10 | - env: 11 | - CGO_ENABLED=0 12 | goos: 13 | - linux 14 | - windows 15 | main: ./cmd/premiumizearrd 16 | ldflags: 17 | - '-extldflags "-static"' 18 | flags: 19 | - '-tags=netgo,osusergo' # Add build tags 20 | binary: premiumizearrd 21 | goarch: 22 | - amd64 23 | - arm64 24 | goarm: 25 | - 8 26 | ignore: 27 | - goos: windows 28 | goarch: arm64 29 | - goos: windows 30 | goarch: arm 31 | 32 | archives: 33 | - format_overrides: 34 | - goos: windows 35 | format: zip 36 | wrap_in_directory: true 37 | files: 38 | - README.md 39 | - LICENSE 40 | - src: build/*.service 41 | dst: ./ 42 | strip_parent: true 43 | - src: build/static/* 44 | dst: static 45 | strip_parent: true 46 | 47 | checksum: 48 | name_template: 'checksums.txt' 49 | 50 | changelog: 51 | sort: asc 52 | filters: 53 | exclude: 54 | - '^docs:' 55 | - '^test:' 56 | 57 | nfpms: 58 | - 59 | package_name: premiumizearr 60 | bindir: /opt/premiumizearrd 61 | vendor: Philipp Ensinger 62 | homepage: https://github.com/ensingerphilipp/premiumizearr-nova 63 | maintainer: Philipp 64 | description: Service to connect premiumize.me to Arr clients. 65 | license: GPLv3 66 | formats: 67 | - deb 68 | contents: 69 | - src: build/static/* 70 | dst: /opt/premiumizearrd/static/ 71 | - src: init/premiumizearrd.service 72 | dst: /etc/systemd/system/premiumizearrd.service 73 | scripts: 74 | postinstall: "scripts/postinstall.sh" 75 | 76 | dockers: 77 | - 78 | use: buildx 79 | goos: linux 80 | goarch: amd64 81 | image_templates: 82 | - "ghcr.io/ensingerphilipp/premiumizearr-nova:{{ .Tag }}-amd64" 83 | skip_push: "false" 84 | build_flag_templates: 85 | - "--pull" 86 | - "--label=org.opencontainers.image.created={{.Date}}" 87 | - "--label=org.opencontainers.image.title={{.ProjectName}}" 88 | - "--label=org.opencontainers.image.revision={{.FullCommit}}" 89 | - "--label=org.opencontainers.image.version={{.Version}}" 90 | - "--label=org.opencontainers.image.source=\"https://github.com/ensingerphilipp/premiumizearr-nova\"" 91 | - "--platform=linux/amd64" 92 | dockerfile: "docker/Dockerfile.amd64" 93 | extra_files: 94 | - build/static/ 95 | - docker/ 96 | - 97 | use: buildx 98 | goos: linux 99 | goarch: arm64 100 | image_templates: 101 | - "ghcr.io/ensingerphilipp/premiumizearr-nova:{{ .Tag }}-arm64" 102 | skip_push: "false" 103 | build_flag_templates: 104 | - "--pull" 105 | - "--label=org.opencontainers.image.created={{.Date}}" 106 | - "--label=org.opencontainers.image.title={{.ProjectName}}" 107 | - "--label=org.opencontainers.image.revision={{.FullCommit}}" 108 | - "--label=org.opencontainers.image.version={{.Version}}" 109 | - "--label=org.opencontainers.image.source=\"https://github.com/ensingerphilipp/premiumizearr-nova\"" 110 | - "--platform=linux/arm64" 111 | dockerfile: "docker/Dockerfile.arm64" 112 | extra_files: 113 | - build/static/ 114 | - docker/ 115 | 116 | docker_manifests: 117 | - skip_push: false 118 | - name_template: 'ghcr.io/ensingerphilipp/premiumizearr-nova:dev' 119 | image_templates: 120 | - 'ghcr.io/ensingerphilipp/premiumizearr-nova:{{ .Tag }}-amd64' 121 | - 'ghcr.io/ensingerphilipp/premiumizearr-nova:{{ .Tag }}-arm64' 122 | - name_template: 'ghcr.io/ensingerphilipp/premiumizearr-nova:{{ .Tag }}' 123 | image_templates: 124 | - 'ghcr.io/ensingerphilipp/premiumizearr-nova:{{ .Tag }}-amd64' 125 | - 'ghcr.io/ensingerphilipp/premiumizearr-nova:{{ .Tag }}-arm64' 126 | 127 | release: 128 | prerelease: true 129 | header: "Premiumizearr Pre-Release {{ .Tag }}" 130 | footer: "**Full Changelog**: https://github.com/ensingerphilipp/premiumizearr-nova/compare/{{ .PreviousTag }}...{{ .Tag }}" 131 | 132 | 133 | -------------------------------------------------------------------------------- /internal/progress_downloader/progress_downloader.go: -------------------------------------------------------------------------------- 1 | package progress_downloader 2 | 3 | import ( 4 | "bufio" 5 | "fmt" 6 | "os/exec" 7 | "regexp" 8 | "strconv" 9 | 10 | "github.com/dustin/go-humanize" 11 | log "github.com/sirupsen/logrus" 12 | ) 13 | 14 | // WriteCounter tracks download progress. 15 | type WriteCounter struct { 16 | TotalDownloaded uint64 // Total bytes downloaded 17 | Percentage string // Percentage completed (e.g., "1%") 18 | Speed string // Download speed (e.g., "35.4M") 19 | RemainingTime string // Time remaining (e.g., "18m41s") 20 | } 21 | 22 | // NewWriteCounter creates a new WriteCounter. 23 | func NewWriteCounter() *WriteCounter { 24 | return &WriteCounter{ 25 | TotalDownloaded: 0, 26 | Percentage: "0%", 27 | Speed: "0", 28 | RemainingTime: "unknown", 29 | } 30 | } 31 | 32 | // GetSpeed returns the current download speed as a string. 33 | func (wc *WriteCounter) GetSpeed() string { 34 | return fmt.Sprintf("%sB / Second", wc.Speed) 35 | } 36 | 37 | // GetProgress returns the progress as a human-readable string. 38 | func (wc *WriteCounter) GetProgress() string { 39 | //log.Infof("GetProgress called: %s complete", humanize.Bytes(wc.TotalDownloaded)) 40 | return fmt.Sprintf("%s Complete (%s)", wc.Percentage, humanize.Bytes(wc.TotalDownloaded)) 41 | } 42 | 43 | // DownloadFile uses wget for downloading and updates WriteCounter for progress tracking. 44 | func DownloadFile(checkcertificate bool, ratelimit int, url string, filepath string, counter *WriteCounter) error { 45 | // Prepare the wget command without passing empty args 46 | args := []string{"-oL", "wget", "-c"} 47 | if ratelimit != 0 { 48 | args = append(args, "--limit-rate="+strconv.Itoa(ratelimit)+"M") 49 | } 50 | args = append(args, "--progress=dot:giga") 51 | if checkcertificate == false { 52 | args = append(args, "--no-check-certificate") 53 | } 54 | args = append(args, "--no-use-server-timestamps", "-O", filepath, url) 55 | cmd := exec.Command("stdbuf", args...) 56 | 57 | // Get a pipe for the command's output 58 | stdout, err := cmd.StderrPipe() 59 | if err != nil { 60 | log.Errorf("failed to create stdout pipe: %v", err) 61 | return fmt.Errorf("failed to create stdout pipe: %v", err) 62 | } 63 | 64 | // Start the command 65 | if err := cmd.Start(); err != nil { 66 | log.Errorf("failed to start wget: %v", err) 67 | return fmt.Errorf("failed to start wget: %v", err) 68 | } 69 | // Regex to parse output lines 70 | progressRegex := regexp.MustCompile(`(\d+)([KMG]?) .* (\d+%) (\d+\.\d+[KMG]) (\d+[smh][0-9]*[smh]?)`) 71 | // Scan the output 72 | scanner := bufio.NewScanner(stdout) 73 | //log.Infof("Scanner created") 74 | for scanner.Scan() { 75 | line := scanner.Text() 76 | // Check if the line matches the regex 77 | matches := progressRegex.FindStringSubmatch(line) 78 | if len(matches) == 6 { // Full match expected 79 | // Extract values 80 | sizeStr, unit, percentage, speed, remainingTime := matches[1], matches[2], matches[3], matches[4], matches[5] 81 | // Convert size to bytes 82 | sizeInBytes, _ := strconv.ParseUint(sizeStr, 10, 64) 83 | switch unit { 84 | case "K": 85 | sizeInBytes *= 1024 86 | case "M": 87 | sizeInBytes *= 1024 * 1024 88 | case "G": 89 | sizeInBytes *= 1024 * 1024 * 1024 90 | } 91 | 92 | // Update the counter with parsed values 93 | counter.TotalDownloaded = sizeInBytes 94 | counter.Percentage = percentage 95 | counter.Speed = speed 96 | counter.RemainingTime = remainingTime 97 | 98 | // Print progress 99 | //fmt.Printf("\r%s complete (%s, %s remaining)", counter.GetProgress(), counter.GetSpeed(), counter.RemainingTime) 100 | //log.Infof("\r%s complete (%s, %s remaining)", counter.GetProgress(), counter.GetSpeed(), counter.RemainingTime) 101 | } 102 | } 103 | 104 | // Check for scanner errors 105 | if err := scanner.Err(); err != nil { 106 | log.Errorf("error reading wget output: %v", err) 107 | return fmt.Errorf("error reading wget output: %v", err) 108 | } 109 | 110 | // Wait for wget to finish 111 | if err := cmd.Wait(); err != nil { 112 | log.Errorf("wget command failed: %v", err) 113 | return fmt.Errorf("wget command failed: %v", err) 114 | } 115 | 116 | fmt.Println("\nDownload completed successfully.") 117 | return nil 118 | } 119 | -------------------------------------------------------------------------------- /internal/service/web_service.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "html/template" 7 | "net/http" 8 | "os" 9 | "path/filepath" 10 | "strings" 11 | "time" 12 | 13 | "github.com/gorilla/mux" 14 | "github.com/ensingerphilipp/premiumizearr-nova/internal/config" 15 | log "github.com/sirupsen/logrus" 16 | ) 17 | 18 | type IndexTemplates struct { 19 | RootPath string 20 | } 21 | 22 | var indexBytes []byte 23 | 24 | type WebServerService struct { 25 | transferManager *TransferManagerService 26 | directoryWatcherService *DirectoryWatcherService 27 | arrsManagerService *ArrsManagerService 28 | config *config.Config 29 | srv *http.Server 30 | } 31 | 32 | func (s WebServerService) New() WebServerService { 33 | s.config = nil 34 | s.transferManager = nil 35 | s.directoryWatcherService = nil 36 | s.arrsManagerService = nil 37 | s.srv = nil 38 | return s 39 | } 40 | 41 | func (s *WebServerService) ConfigUpdatedCallback(currentConfig config.Config, newConfig config.Config) { 42 | if currentConfig.BindIP != newConfig.BindIP || 43 | currentConfig.BindPort != newConfig.BindPort || 44 | currentConfig.WebRoot != newConfig.WebRoot { 45 | log.Tracef("Config updated, restarting web server...") 46 | s.srv.Close() 47 | s.Start() 48 | } 49 | } 50 | 51 | func (s *WebServerService) Init(transferManager *TransferManagerService, directoryWatcher *DirectoryWatcherService, arrManager *ArrsManagerService, config *config.Config) { 52 | s.transferManager = transferManager 53 | s.directoryWatcherService = directoryWatcher 54 | s.arrsManagerService = arrManager 55 | s.config = config 56 | } 57 | 58 | func (s *WebServerService) Start() { 59 | log.Info("Starting web server...") 60 | tmpl, err := template.ParseFiles("./static/index.html") 61 | if err != nil { 62 | log.Fatal(err) 63 | } 64 | 65 | var ibytes bytes.Buffer 66 | err = tmpl.Execute(&ibytes, &IndexTemplates{s.config.WebRoot}) 67 | if err != nil { 68 | log.Fatal(err) 69 | } 70 | indexBytes = ibytes.Bytes() 71 | 72 | spa := spaHandler{ 73 | staticPath: "static", 74 | indexPath: "index.html", 75 | webRoot: s.config.WebRoot, 76 | } 77 | 78 | r := mux.NewRouter() 79 | 80 | r.HandleFunc("/api/transfers", s.TransfersHandler) 81 | r.HandleFunc("/api/downloads", s.DownloadsHandler) 82 | r.HandleFunc("/api/blackhole", s.BlackholeHandler) 83 | r.HandleFunc("/api/config", s.ConfigHandler) 84 | r.HandleFunc("/api/testArr", s.TestArrHandler) 85 | 86 | r.PathPrefix("/").Handler(spa) 87 | 88 | address := fmt.Sprintf("%s:%s", s.config.BindIP, s.config.BindPort) 89 | 90 | s.srv = &http.Server{ 91 | Handler: r, 92 | Addr: address, 93 | // Good practice: enforce timeouts for servers you create! 94 | WriteTimeout: 15 * time.Second, 95 | ReadTimeout: 15 * time.Second, 96 | } 97 | 98 | log.Infof("Web server started on %s", address) 99 | 100 | go s.srv.ListenAndServe() 101 | } 102 | 103 | // Shamelessly stolen from mux examples https://github.com/gorilla/mux#examples 104 | type spaHandler struct { 105 | staticPath string 106 | indexPath string 107 | webRoot string 108 | } 109 | 110 | func (h spaHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { 111 | // get the absolute path to prevent directory traversal 112 | path, err := filepath.Abs(r.URL.Path) 113 | if err != nil { 114 | // if we failed to get the absolute path respond with a 400 bad request 115 | // and stop 116 | http.Error(w, err.Error(), http.StatusBadRequest) 117 | return 118 | } 119 | 120 | if h.webRoot != "" { 121 | path = strings.Replace(path, h.webRoot, "", 1) 122 | } 123 | // prepend the path with the path to the static directory 124 | path = filepath.Join(h.staticPath, path) 125 | 126 | // check whether a file exists at the given path 127 | _, err = os.Stat(path) 128 | if os.IsNotExist(err) || strings.HasSuffix(path, h.staticPath) { 129 | // file does not exist, serve index.html 130 | // http.ServeFile(w, r, filepath.Join(h.staticPath, h.indexPath)) 131 | // file does not exist, serve index.html template 132 | w.Write(indexBytes) 133 | return 134 | } else if err != nil { 135 | // if we got an error (that wasn't that the file doesn't exist) stating the 136 | // file, return a 500 internal server error and stop 137 | http.Error(w, err.Error(), http.StatusInternalServerError) 138 | return 139 | } 140 | 141 | r.URL.Path = strings.Replace(path, h.staticPath, "", -1) 142 | // otherwise, use http.FileServer to serve the static dir 143 | http.FileServer(http.Dir(h.staticPath)).ServeHTTP(w, r) 144 | } 145 | -------------------------------------------------------------------------------- /.goreleaser.yaml: -------------------------------------------------------------------------------- 1 | version: 2 2 | env: 3 | - DOCKER_BUILDKIT=1 # Make sure all values are strings 4 | 5 | before: 6 | hooks: 7 | - go mod tidy 8 | - make web 9 | builds: 10 | - env: 11 | - CGO_ENABLED=0 12 | goos: 13 | - linux 14 | - windows 15 | main: ./cmd/premiumizearrd 16 | ldflags: 17 | - '-extldflags "-static"' 18 | flags: 19 | - '-tags=netgo,osusergo' # Add build tags 20 | binary: premiumizearrd 21 | goarch: 22 | - amd64 23 | - arm64 24 | goarm: 25 | - 8 26 | ignore: 27 | - goos: windows 28 | goarch: arm64 29 | - goos: windows 30 | goarch: arm 31 | 32 | archives: 33 | - format_overrides: 34 | - goos: windows 35 | format: zip 36 | wrap_in_directory: true 37 | files: 38 | - README.md 39 | - LICENSE 40 | - src: build/*.service 41 | dst: ./ 42 | strip_parent: true 43 | - src: build/static/* 44 | dst: static 45 | strip_parent: true 46 | 47 | checksum: 48 | name_template: 'checksums.txt' 49 | 50 | snapshot: 51 | name_template: "{{ incpatch .Version }}-next" 52 | 53 | changelog: 54 | sort: asc 55 | filters: 56 | exclude: 57 | - '^docs:' 58 | - '^test:' 59 | 60 | nfpms: 61 | - 62 | package_name: premiumizearr 63 | bindir: /opt/premiumizearrd 64 | vendor: Philipp Ensinger 65 | homepage: https://github.com/ensingerphilipp/premiumizearr-nova 66 | maintainer: Philipp 67 | description: Service to connect premiumize.me to Arr clients. 68 | license: GPLv3 69 | formats: 70 | - deb 71 | contents: 72 | - src: build/static/* 73 | dst: /opt/premiumizearrd/static/ 74 | - src: init/premiumizearrd.service 75 | dst: /etc/systemd/system/premiumizearrd.service 76 | scripts: 77 | postinstall: "scripts/postinstall.sh" 78 | 79 | dockers: 80 | - 81 | use: buildx 82 | goos: linux 83 | goarch: amd64 84 | image_templates: 85 | - "ghcr.io/ensingerphilipp/premiumizearr-nova:{{ .Tag }}-amd64" 86 | skip_push: "false" 87 | build_flag_templates: 88 | - "--pull" 89 | - "--label=org.opencontainers.image.created={{.Date}}" 90 | - "--label=org.opencontainers.image.title={{.ProjectName}}" 91 | - "--label=org.opencontainers.image.revision={{.FullCommit}}" 92 | - "--label=org.opencontainers.image.version={{.Version}}" 93 | - "--label=org.opencontainers.image.source=\"https://github.com/ensingerphilipp/premiumizearr-nova\"" 94 | - "--platform=linux/amd64" 95 | dockerfile: "docker/Dockerfile.amd64" 96 | extra_files: 97 | - build/static/ 98 | - docker/ 99 | - 100 | use: buildx 101 | goos: linux 102 | goarch: arm64 103 | image_templates: 104 | - "ghcr.io/ensingerphilipp/premiumizearr-nova:{{ .Tag }}-arm64" 105 | skip_push: "false" 106 | build_flag_templates: 107 | - "--pull" 108 | - "--label=org.opencontainers.image.created={{.Date}}" 109 | - "--label=org.opencontainers.image.title={{.ProjectName}}" 110 | - "--label=org.opencontainers.image.revision={{.FullCommit}}" 111 | - "--label=org.opencontainers.image.version={{.Version}}" 112 | - "--label=org.opencontainers.image.source=\"https://github.com/ensingerphilipp/premiumizearr-nova\"" 113 | - "--platform=linux/arm64" 114 | dockerfile: "docker/Dockerfile.arm64" 115 | extra_files: 116 | - build/static/ 117 | - docker/ 118 | 119 | docker_manifests: 120 | # Release variants not created on rc-$i tags 121 | - skip_push: auto 122 | - name_template: 'ghcr.io/ensingerphilipp/premiumizearr-nova:latest' 123 | image_templates: 124 | - 'ghcr.io/ensingerphilipp/premiumizearr-nova:{{ .Tag }}-amd64' 125 | - 'ghcr.io/ensingerphilipp/premiumizearr-nova:{{ .Tag }}-arm64' 126 | - name_template: 'ghcr.io/ensingerphilipp/premiumizearr-nova:{{ .Tag }}' 127 | image_templates: 128 | - 'ghcr.io/ensingerphilipp/premiumizearr-nova:{{ .Tag }}-amd64' 129 | - 'ghcr.io/ensingerphilipp/premiumizearr-nova:{{ .Tag }}-arm64' 130 | - name_template: 'ghcr.io/ensingerphilipp/premiumizearr-nova:{{ .Major }}' 131 | image_templates: 132 | - 'ghcr.io/ensingerphilipp/premiumizearr-nova:{{ .Tag }}-amd64' 133 | - 'ghcr.io/ensingerphilipp/premiumizearr-nova:{{ .Tag }}-arm64' 134 | - name_template: 'ghcr.io/ensingerphilipp/premiumizearr-nova:{{ .Major }}.{{ .Minor }}' 135 | image_templates: 136 | - 'ghcr.io/ensingerphilipp/premiumizearr-nova:{{ .Tag }}-amd64' 137 | - 'ghcr.io/ensingerphilipp/premiumizearr-nova:{{ .Tag }}-arm64' 138 | 139 | release: 140 | # If set to auto, will mark the release as not ready for production 141 | # in case there is an indicator for this in the tag e.g. v1.0.0-rc1 142 | # If set to true, will mark the release as not ready for production. 143 | # Default is false. 144 | prerelease: auto 145 | footer: "**Full Changelog**: https://github.com/ensingerphilipp/premiumizearr-nova/compare/{{ .PreviousTag }}...{{ .Tag }}" 146 | 147 | 148 | -------------------------------------------------------------------------------- /internal/utils/utils.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "archive/zip" 5 | "fmt" 6 | "io" 7 | "os" 8 | "path/filepath" 9 | "strings" 10 | 11 | "github.com/ensingerphilipp/premiumizearr-nova/pkg/premiumizeme" 12 | log "github.com/sirupsen/logrus" 13 | ) 14 | 15 | func StripDownloadTypesExtention(fileName string) string { 16 | var exts = [...]string{".nzb", ".magnet", ".torrent"} 17 | for _, ext := range exts { 18 | fileName = strings.TrimSuffix(fileName, ext) 19 | } 20 | 21 | return fileName 22 | } 23 | 24 | func StripMediaTypesExtention(fileName string) string { 25 | var exts = [...]string{".mkv", ".mp4", ".avi", ".mov", ".flv", ".wmv", ".mpg", ".mpeg", ".m4v", ".3gp", ".3g2", ".m2ts", ".mts", ".ts", ".webm", ".m4a", ".m4b", ".m4p", ".m4r", ".m4v"} 26 | for _, ext := range exts { 27 | fileName = strings.TrimSuffix(fileName, ext) 28 | } 29 | 30 | return fileName 31 | } 32 | 33 | // https://golangcode.com/unzip-files-in-go/ 34 | func Unzip(src string, dest string) error { 35 | r, err := zip.OpenReader(src) 36 | if err != nil { 37 | return err 38 | } 39 | defer r.Close() 40 | 41 | for _, f := range r.File { 42 | // Store filename/path for returning and using later on 43 | fpath := filepath.Join(dest, f.Name) 44 | 45 | // Check for ZipSlip. More Info: https://snyk.io/research/zip-slip-vulnerability#go 46 | if !strings.HasPrefix(fpath, filepath.Clean(dest)+string(os.PathSeparator)) { 47 | return fmt.Errorf("%s: illegal file path", fpath) 48 | } 49 | 50 | if f.FileInfo().IsDir() { 51 | // Make Folder 52 | os.MkdirAll(fpath, os.ModePerm) 53 | continue 54 | } 55 | 56 | // Make File 57 | if err = os.MkdirAll(filepath.Dir(fpath), os.ModePerm); err != nil { 58 | return err 59 | } 60 | 61 | outFile, err := os.OpenFile(fpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode()) 62 | if err != nil { 63 | return err 64 | } 65 | 66 | rc, err := f.Open() 67 | if err != nil { 68 | return err 69 | } 70 | 71 | _, err = io.Copy(outFile, rc) 72 | 73 | // Close the file without defer to close before next iteration of loop 74 | outFile.Close() 75 | rc.Close() 76 | 77 | if err != nil { 78 | return err 79 | } 80 | } 81 | 82 | return nil 83 | } 84 | 85 | func StringInSlice(a string, list []string) int { 86 | for i, b := range list { 87 | if b == a { 88 | return i 89 | } 90 | } 91 | return -1 92 | } 93 | 94 | func GetDownloadsFolderIDFromPremiumizeme(premiumizemeClient *premiumizeme.Premiumizeme, transferFolderName string) string { 95 | var downloadsFolderID string 96 | folders, err := premiumizemeClient.GetFolders() 97 | if err != nil { 98 | log.Errorf("Error getting folders: %s", err) 99 | log.Errorf("Cannot read folders from premiumize.me, application will not run!") 100 | return "" 101 | } 102 | 103 | for _, folder := range folders { 104 | if folder.Name == transferFolderName { 105 | downloadsFolderID = folder.ID 106 | log.Debugf("Found downloads folder with ID: %s", folder.ID) 107 | } 108 | } 109 | 110 | if len(downloadsFolderID) == 0 { 111 | id, err := premiumizemeClient.CreateFolder(transferFolderName, nil) 112 | if err != nil { 113 | log.Errorf("Cannot create downloads folder on premiumize.me, application will not run correctly! %+v", err) 114 | } 115 | downloadsFolderID = id 116 | } 117 | 118 | return downloadsFolderID 119 | } 120 | 121 | func EnvOrDefault(envName string, defaultValue string) string { 122 | envValue := os.Getenv(envName) 123 | if len(envValue) == 0 { 124 | return defaultValue 125 | } 126 | return envValue 127 | } 128 | 129 | func IsRunningInDockerContainer() bool { 130 | // docker creates a .dockerenv file at the root 131 | // of the directory tree inside the container. 132 | // if this file exists then the viewer is running 133 | // from inside a container so return true 134 | 135 | if _, err := os.Stat("/.dockerenv"); err == nil { 136 | return true 137 | } 138 | 139 | return false 140 | } 141 | 142 | func IsDirectoryWriteable(path string) bool { 143 | if _, err := os.Stat(path); os.IsNotExist(err) { 144 | log.Errorf("directory does not exist: ", path) 145 | return false 146 | } 147 | 148 | if _, err := os.Create(path + "/test.txt"); err != nil { 149 | log.Errorf("cannot write test.txt to directory: ", path) 150 | return false 151 | } 152 | 153 | // Delete test file 154 | if err := os.Remove(path + "/test.txt"); err != nil { 155 | log.Errorf("cannot delete test.txt file in: ", path) 156 | return false 157 | } 158 | 159 | return true 160 | } 161 | 162 | // https://stackoverflow.com/questions/33450980/how-to-remove-all-contents-of-a-directory-using-golang 163 | func RemoveContents(dir string) error { 164 | d, err := os.Open(dir) 165 | if err != nil { 166 | return err 167 | } 168 | defer d.Close() 169 | names, err := d.Readdirnames(-1) 170 | if err != nil { 171 | return err 172 | } 173 | for _, name := range names { 174 | err = os.RemoveAll(filepath.Join(dir, name)) 175 | if err != nil { 176 | return err 177 | } 178 | } 179 | return nil 180 | } 181 | -------------------------------------------------------------------------------- /internal/config/config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "errors" 5 | "io/ioutil" 6 | 7 | "github.com/ensingerphilipp/premiumizearr-nova/internal/utils" 8 | log "github.com/sirupsen/logrus" 9 | 10 | "os" 11 | "path" 12 | 13 | "gopkg.in/yaml.v2" 14 | ) 15 | 16 | // LoadOrCreateConfig - Loads the config from disk or creates a new one 17 | func LoadOrCreateConfig(altConfigLocation string, _appCallback AppCallback) (Config, error) { 18 | config, err := loadConfigFromDisk(altConfigLocation) 19 | 20 | if err != nil { 21 | if err == ErrFailedToFindConfigFile { 22 | log.Warn("No config file found, created default config file") 23 | config = defaultConfig() 24 | } 25 | if err == ErrInvalidConfigFile || err == ErrFailedToSaveConfig { 26 | return config, err 27 | } 28 | } 29 | 30 | // Override directory if running in docker 31 | if utils.IsRunningInDockerContainer() { 32 | // Override config data directories if blank 33 | if config.BlackholeDirectory == "" { 34 | log.Trace("Running in docker, overriding blank directory settings for blackhole directory to /blackhole inside the container") 35 | config.BlackholeDirectory = "/blackhole" 36 | } 37 | if config.DownloadsDirectory == "" { 38 | log.Trace("Running in docker, overriding blank directory settings for downloads directory to /downloads inside the container") 39 | config.DownloadsDirectory = "/downloads" 40 | } 41 | } 42 | 43 | log.Tracef("Setting config location to %s", altConfigLocation) 44 | 45 | config.appCallback = _appCallback 46 | config.altConfigLocation = altConfigLocation 47 | 48 | config.Save() 49 | 50 | return config, nil 51 | } 52 | 53 | // Save - Saves the config to disk 54 | func (c *Config) Save() error { 55 | log.Trace("Marshaling & saving config") 56 | data, err := yaml.Marshal(*c) 57 | if err != nil { 58 | log.Error(err) 59 | return err 60 | } 61 | 62 | savePath := "./config.yaml" 63 | if c.altConfigLocation != "" { 64 | savePath = path.Join(c.altConfigLocation, "config.yaml") 65 | } 66 | 67 | log.Tracef("Writing config to %s", savePath) 68 | err = ioutil.WriteFile(savePath, data, 0644) 69 | if err != nil { 70 | log.Errorf("Failed to save config file: %+v", err) 71 | return err 72 | } 73 | 74 | log.Trace("Config saved") 75 | return nil 76 | } 77 | 78 | func loadConfigFromDisk(altConfigLocation string) (Config, error) { 79 | var config Config 80 | 81 | log.Trace("Trying to load config from disk") 82 | configLocation := path.Join(altConfigLocation, "config.yaml") 83 | 84 | log.Tracef("Reading config from %s", configLocation) 85 | file, err := ioutil.ReadFile(configLocation) 86 | 87 | if err != nil { 88 | log.Trace("Failed to find config file") 89 | return config, ErrFailedToFindConfigFile 90 | } 91 | 92 | log.Trace("Loading to interface") 93 | var configInterface map[interface{}]interface{} 94 | err = yaml.Unmarshal(file, &configInterface) 95 | if err != nil { 96 | log.Errorf("Failed to unmarshal config file: %+v", err) 97 | return config, ErrInvalidConfigFile 98 | } 99 | 100 | log.Trace("Unmarshalling to struct") 101 | err = yaml.Unmarshal(file, &config) 102 | if err != nil { 103 | log.Errorf("Failed to unmarshal config file: %+v", err) 104 | return config, ErrInvalidConfigFile 105 | } 106 | 107 | log.Trace("Checking for missing config fields") 108 | updated := false 109 | 110 | if configInterface["PollBlackholeDirectory"] == nil { 111 | log.Info("PollBlackholeDirectory not set, setting to false") 112 | config.PollBlackholeDirectory = false 113 | updated = true 114 | } 115 | 116 | if configInterface["SimultaneousDownloads"] == nil { 117 | log.Info("SimultaneousDownloads not set, setting to 5") 118 | config.SimultaneousDownloads = 5 119 | updated = true 120 | } 121 | 122 | if configInterface["DownloadSpeedLimit"] == nil { 123 | log.Info("DownloadSpeedLimit not set, setting to 100 Megabytes per second") 124 | config.DownloadSpeedLimit = 100 125 | updated = true 126 | } 127 | 128 | if configInterface["EnableTlsCheck"] == nil { 129 | log.Info("EnableTlsCheck not set, setting to false") 130 | config.EnableTlsCheck = false 131 | updated = true 132 | } 133 | 134 | if configInterface["TransferOnlyMode"] == nil { 135 | log.Info("TransferOnlyMode not set, setting to false") 136 | config.TransferOnlyMode = false 137 | updated = true 138 | } 139 | 140 | if configInterface["PollBlackholeIntervalMinutes"] == nil { 141 | log.Info("PollBlackholeIntervalMinutes not set, setting to 10") 142 | config.PollBlackholeIntervalMinutes = 10 143 | updated = true 144 | } 145 | 146 | if configInterface["ArrHistoryUpdateIntervalSeconds"] == nil { 147 | log.Info("ArrHistoryUpdateIntervalSeconds not set, setting to 20") 148 | config.ArrHistoryUpdateIntervalSeconds = 20 149 | updated = true 150 | } 151 | 152 | config.altConfigLocation = altConfigLocation 153 | 154 | if updated { 155 | log.Trace("Version updated saving") 156 | err = config.Save() 157 | 158 | if err == nil { 159 | log.Trace("Config saved") 160 | return config, nil 161 | } else { 162 | log.Errorf("Failed to save config to %s", configLocation) 163 | log.Error(err) 164 | return config, ErrFailedToSaveConfig 165 | } 166 | } 167 | 168 | log.Trace("Config loaded") 169 | return config, nil 170 | } 171 | 172 | func defaultConfig() Config { 173 | return Config{ 174 | PremiumizemeAPIKey: "xxxxxxxxx", 175 | Arrs: []ArrConfig{ 176 | {Name: "Sonarr", URL: "http://127.0.0.1:8989", APIKey: "xxxxxxxxx", Type: Sonarr}, 177 | {Name: "Radarr", URL: "http://127.0.0.1:7878", APIKey: "xxxxxxxxx", Type: Radarr}, 178 | {Name: "Lidarr", URL: "http://127.0.0.1:8686", APIKey: "xxxxxxxxx", Type: Lidarr}, 179 | }, 180 | BlackholeDirectory: "", 181 | PollBlackholeDirectory: false, 182 | PollBlackholeIntervalMinutes: 10, 183 | DownloadsDirectory: "", 184 | TransferDirectory: "arrDownloads", 185 | BindIP: "0.0.0.0", 186 | BindPort: "8182", 187 | WebRoot: "", 188 | SimultaneousDownloads: 5, 189 | DownloadSpeedLimit: 100, 190 | EnableTlsCheck: false, 191 | TransferOnlyMode: false, 192 | ArrHistoryUpdateIntervalSeconds: 20, 193 | } 194 | } 195 | 196 | var ( 197 | ErrDownloadDirectorySetToRoot = errors.New("download directory set to root") 198 | ErrDownloadDirectoryNotWriteable = errors.New("download directory not writeable") 199 | ) 200 | 201 | func (c *Config) GetDownloadsBaseLocation() (string, error) { 202 | if c.DownloadsDirectory == "" { 203 | log.Tracef("Download directory not set, using default: %s", os.TempDir()) 204 | return path.Join(os.TempDir(), "premiumizearrd"), nil 205 | } 206 | 207 | if c.DownloadsDirectory == "/" || c.DownloadsDirectory == "\\" || c.DownloadsDirectory == "C:\\" { 208 | log.Error("Download directory set to root, please set a directory") 209 | return "", ErrDownloadDirectorySetToRoot 210 | } 211 | 212 | if !utils.IsDirectoryWriteable(c.DownloadsDirectory) { 213 | log.Errorf("Download directory not writeable: %s", c.DownloadsDirectory) 214 | return c.DownloadsDirectory, ErrDownloadDirectoryNotWriteable 215 | } 216 | 217 | log.Tracef("Download directory set to: %s", c.DownloadsDirectory) 218 | return c.DownloadsDirectory, nil 219 | } 220 | -------------------------------------------------------------------------------- /internal/service/directory_watcher_service.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "io/ioutil" 5 | "os" 6 | "path" 7 | "path/filepath" 8 | "sync" 9 | "time" 10 | 11 | "github.com/ensingerphilipp/premiumizearr-nova/internal/config" 12 | "github.com/ensingerphilipp/premiumizearr-nova/internal/directory_watcher" 13 | "github.com/ensingerphilipp/premiumizearr-nova/internal/utils" 14 | "github.com/ensingerphilipp/premiumizearr-nova/pkg/premiumizeme" 15 | "github.com/ensingerphilipp/premiumizearr-nova/pkg/stringqueue" 16 | log "github.com/sirupsen/logrus" 17 | ) 18 | 19 | type DirectoryWatcherService struct { 20 | mu sync.RWMutex 21 | premiumizemeClient *premiumizeme.Premiumizeme 22 | config *config.Config 23 | Queue *stringqueue.StringQueue 24 | status string 25 | downloadsFolderID string 26 | watchDirectory *directory_watcher.WatchDirectory 27 | } 28 | 29 | const ( 30 | ERROR_LIMIT_REACHED = "Limit of transfers reached!" 31 | ERROR_ALREADY_UPLOADED = "You already added this job." 32 | ) 33 | 34 | func (DirectoryWatcherService) New() DirectoryWatcherService { 35 | return DirectoryWatcherService{ 36 | premiumizemeClient: nil, 37 | config: nil, 38 | Queue: nil, 39 | status: "", 40 | downloadsFolderID: "", 41 | } 42 | } 43 | 44 | func (dw *DirectoryWatcherService) Init(premiumizemeClient *premiumizeme.Premiumizeme, config *config.Config) { 45 | dw.premiumizemeClient = premiumizemeClient 46 | dw.config = config 47 | } 48 | 49 | func (dw *DirectoryWatcherService) ConfigUpdatedCallback(currentConfig config.Config, newConfig config.Config) { 50 | if currentConfig.BlackholeDirectory != newConfig.BlackholeDirectory { 51 | log.Info("Blackhole directory changed, restarting directory watcher...") 52 | log.Info("Running initial directory scan...") 53 | go dw.directoryScan(dw.config.BlackholeDirectory) 54 | dw.watchDirectory.UpdatePath(newConfig.BlackholeDirectory) 55 | } 56 | 57 | if currentConfig.TransferDirectory != newConfig.TransferDirectory { 58 | log.Info("TransferDirectory directory changed, changing directory watcher...") 59 | dw.setTransferDirectory(newConfig.TransferDirectory) 60 | } 61 | 62 | if currentConfig.PollBlackholeDirectory != newConfig.PollBlackholeDirectory { 63 | log.Info("Poll blackhole directory changed, restarting directory watcher...") 64 | } 65 | } 66 | 67 | func (dw *DirectoryWatcherService) GetStatus() string { 68 | return dw.status 69 | } 70 | 71 | // Start: This is the entrypoint for the directory watcher 72 | func (dw *DirectoryWatcherService) Start() { 73 | log.Info("Starting directory watcher...") 74 | 75 | dw.downloadsFolderID = utils.GetDownloadsFolderIDFromPremiumizeme(dw.premiumizemeClient, dw.config.TransferDirectory) 76 | 77 | log.Info("Creating Queue...") 78 | dw.Queue = stringqueue.NewStringQueue() 79 | 80 | log.Info("Starting uploads processor...") 81 | go dw.processUploads() 82 | 83 | log.Info("Running initial directory scan...") 84 | go dw.directoryScan(dw.config.BlackholeDirectory) 85 | 86 | if dw.watchDirectory != nil { 87 | log.Info("Stopping directory watcher...") 88 | err := dw.watchDirectory.Stop() 89 | if err != nil { 90 | log.Errorf("Error stopping directory watcher: %s", err) 91 | } 92 | } 93 | 94 | if dw.config.PollBlackholeDirectory { 95 | log.Info("Starting directory poller...") 96 | go func() { 97 | for { 98 | if !dw.config.PollBlackholeDirectory { 99 | log.Info("Directory poller stopped") 100 | break 101 | } 102 | time.Sleep(time.Duration(dw.config.PollBlackholeIntervalMinutes) * time.Minute) 103 | log.Infof("Running directory scan of %s", dw.config.BlackholeDirectory) 104 | dw.directoryScan(dw.config.BlackholeDirectory) 105 | log.Infof("Scan complete, next scan in %d minutes", dw.config.PollBlackholeIntervalMinutes) 106 | } 107 | }() 108 | } else { 109 | log.Info("Starting directory watcher...") 110 | dw.watchDirectory = directory_watcher.NewDirectoryWatcher(dw.config.BlackholeDirectory, 111 | true, 112 | dw.checkFile, 113 | dw.addFileToQueue, 114 | ) 115 | dw.watchDirectory.Watch() 116 | } 117 | } 118 | 119 | func (dw *DirectoryWatcherService) directoryScan(p string) { 120 | log.Trace("Running directory scan") 121 | files, err := ioutil.ReadDir(p) 122 | if err != nil { 123 | log.Errorf("Error with directory scan %+v", err) 124 | return 125 | } 126 | 127 | for _, file := range files { 128 | go func(file os.FileInfo) { 129 | file_path := path.Join(p, file.Name()) 130 | if dw.checkFile(file_path) == 1 { 131 | dw.addFileToQueue(file_path) 132 | } 133 | }(file) 134 | } 135 | } 136 | 137 | func (dw *DirectoryWatcherService) checkFile(path string) int { 138 | log.Tracef("Checking file %s", path) 139 | 140 | fi, err := os.Stat(path) 141 | if err != nil { 142 | log.Errorf("Error checking file %s", path) 143 | return 0 144 | } 145 | 146 | if fi.IsDir() { 147 | log.Errorf("Directory created in blackhole %s ignoring (Warning premiumizearrd does not look in subfolders!)", path) 148 | return 2 149 | } 150 | 151 | ext := filepath.Ext(path) 152 | if ext == ".nzb" || ext == ".magnet" || ext == ".torrent" { 153 | return 1 154 | } else { 155 | return 0 156 | } 157 | } 158 | 159 | func (dw *DirectoryWatcherService) addFileToQueue(path string) { 160 | dw.Queue.Add(path) 161 | log.Infof("File created in blackhole %s added to Queue. Queue length %d", path, dw.Queue.Len()) 162 | } 163 | 164 | func (dw *DirectoryWatcherService) processUploads() { 165 | for { 166 | if dw.Queue.Len() < 1 { 167 | log.Trace("No files in Queue, sleeping for 10 seconds") 168 | time.Sleep(time.Second * time.Duration(10)) 169 | } 170 | 171 | isQueueFile, filePath := dw.Queue.PopTopOfQueue() 172 | if !isQueueFile { 173 | time.Sleep(time.Second * time.Duration(10)) 174 | continue 175 | } 176 | 177 | sleepTimeSeconds := 2 178 | if filePath != "" { 179 | log.Debugf("Processing %s", filePath) 180 | dw.mu.RLock() 181 | folderID := dw.downloadsFolderID 182 | dw.mu.RUnlock() 183 | err := dw.premiumizemeClient.CreateTransfer(filePath, folderID) 184 | if err != nil { 185 | switch err.Error() { 186 | case ERROR_LIMIT_REACHED: 187 | dw.status = "Limit of transfers reached!" 188 | log.Trace("Transfer limit reached waiting 10 seconds and retrying") 189 | sleepTimeSeconds = 10 190 | case ERROR_ALREADY_UPLOADED: 191 | log.Trace("File already uploaded, removing from Disk") 192 | os.Remove(filePath) 193 | default: 194 | log.Error("Error creating transfer: %s", err) 195 | } 196 | } else { 197 | dw.status = "Okay" 198 | os.Remove(filePath) 199 | if err != nil { 200 | log.Errorf("Error could not delete %s Error: %+v", filePath, err) 201 | } 202 | log.Infof("Removed %s from blackhole Queue. Queue Size: %d", filePath, dw.Queue.Len()) 203 | } 204 | time.Sleep(time.Second * time.Duration(sleepTimeSeconds)) 205 | } else { 206 | log.Errorf("Received %s from blackhole Queue. Appears to be an empty path.") 207 | } 208 | } 209 | } 210 | 211 | func (dw *DirectoryWatcherService) setTransferDirectory(newDir string) { 212 | newID := utils.GetDownloadsFolderIDFromPremiumizeme(dw.premiumizemeClient, newDir) 213 | 214 | dw.mu.Lock() 215 | defer dw.mu.Unlock() 216 | 217 | dw.downloadsFolderID = newID 218 | } 219 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Premiumizearr-Nova 2 | ## Build 1.5.0 3 | 4 | [![Build](https://github.com/ensingerphilipp/premiumizearr-nova/actions/workflows/build.yml/badge.svg)](https://github.com/ensingerphilipp/premiumizearr-nova/actions/workflows/build.yml) 5 | 6 | *NEW: Added Transfer-Only-Mode* ✅ 7 | 8 | *NEW: Skip TLS-Certificate-Check to prevent failed 0B Downloads* ✅ 9 | 10 | *NEW: Change transferfolder on Premiumize to a user-specified folder* ✅ 11 | 12 | ## Enjoying so far? Im running on ☕ 13 | 14 | 15 | ## Overview 📝 16 | Continuation and Improvement of the Premiumizearr Arr* Bridge Download Client compatible with Sonarr and Radarr. 17 | 18 | This project is based on code from [Jackdallas' Premiumizearr](https://github.com/jackdallas/premiumizearr). 19 | It aims to improve its function and fix bugs as the Original Repo has gone stale and does not respond to issues and pull requests. 20 | The code has been reused with modifications to suit my own use case. 21 | 22 | * Complete Downloader Revamp (Fix EOF DataStream Error, Improve Speed, Graceful Resumable Downloads, Limitable Downloadspeed) 23 | * Added .torrent support, Add Support for Single Files, Switch from ZIP API to Direct Download 24 | * Fix Gui Row Ordering Bugs, Improve API 25 | * Updated base images and dependencies 26 | * Added Lidarr Support @FuJa0815 27 | * Added Transfer-Only-Mode 28 | * Several minor improvements and fixes 29 | 30 | Next Steps: 31 | * Fix Sonarr Connection Bugs 32 | 33 | ## Features 34 | 35 | - Monitor blackhole directory to push `.magnet`, `.torrent` and `.nzb` to Premiumize.me 36 | - Monitor and download Premiumize.me transfers (web ui on default port 8182) 37 | - Mark transfers as failed in Radarr & Sonarr 38 | 39 | ## Install 40 | 41 | ### Docker 42 | It is highly recommended to use the amd64 and arm64 docker images. 43 | 44 | 1. First create data, blackhole, downloads folders that will be mounted into the docker container. 45 | 2. Make sure all Folders and are writeable and readable by UID 1000 and GID 1000 (Or the UID GID you want to set and use -> see env variables in Docker Run) 46 | 3. Create or choose a network for the docker container to run in - **Important, if you have connection problems try explicitly disabling ipv6 for your docker network or docker daemon as ipv6 might break some things - see https://github.com/ensingerphilipp/Premiumizearr-Nova/issues/12** 47 | 5. Adapt the command below with the correct folders and network to run 48 | 6. Do not use sudo! 49 | 50 | 51 | [Docker images are listed here](https://github.com/ensingerphilipp/premiumizearr-nova/pkgs/container/premiumizearr-nova) 52 | 53 | ```cmd 54 | docker run -d --name premiumizearr \ 55 | --network=compose_default \ 56 | -v /mount/premiumize/data:/data \ 57 | -v /mount/premiumize/blackhole:/blackhole \ 58 | -v /mount/premiumize/downloads:/downloads \ 59 | -e PGID=1000 \ 60 | -e PUID=1000 \ 61 | -p 8182:8182 \ 62 | --restart unless-stopped \ 63 | ghcr.io/ensingerphilipp/premiumizearr-nova:latest 64 | ``` 65 | 66 | If you wish to increase logging (which you'll be asked to do if you submit an issue) you can add `-e PREMIUMIZEARR_LOG_LEVEL=trace` to the command 67 | 68 | > Note: The /data mount is where the `config.yaml` and log files are kept 69 | > You might need to run the docker command with UID GID 1000 on the host as well 70 | > If you absolutely can not use docker, scroll to the bottom of the README for unsupported Installation-Methods, they are automatically built and untested. 71 | 72 | ## First Setup 73 | 74 | ### Premiumizearrd 75 | 76 | Running for the first time the server will start on `http://0.0.0.0:8182` 77 | 78 | If you already use this binding for something else you can edit them in the `config.yaml` 79 | 80 | > WARNING: This app exposes api keys in the ui and does not have authentication, it is strongly recommended you put it behind a reverse proxy with auth and set the host to `127.0.0.1` to hide the app from the web. 81 | 82 | ### Sonarr/Radarr 83 | 84 | - Go to your Arr's `Download Client` settings page 85 | - Add a new Torrent Blackhole client, set the `Torrent Folder` to the previously set `BlackholeDirectory` location, set the `Watch Folder` to the previously set `DownloadsDirectory` location 86 | - Add a new Usenet Blackhole client, set the `Nzb Folder` to the previously set `BlackholeDirectory` location, set the `Watch Folder` to the previously set `DownloadsDirectory` location 87 | - Also: Dont forget to press the "Save" Button when editing settings inside premiumizearr-nova web-ui 88 | 89 | ### Reverse Proxy 90 | 91 | Premiumizearr does not have authentication built in so it's strongly recommended you use a reverse proxy 92 | 93 | #### Nginx 94 | 95 | ```nginx 96 | location /premiumizearr/ { 97 | proxy_pass http://127.0.0.1:8182/; 98 | proxy_set_header Host $proxy_host; 99 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 100 | proxy_set_header X-Forwarded-Host $host; 101 | proxy_set_header X-Forwarded-Proto $scheme; 102 | proxy_redirect off; 103 | proxy_http_version 1.1; 104 | proxy_set_header Upgrade $http_upgrade; 105 | proxy_set_header Connection $http_connection; 106 | } 107 | ``` 108 | 109 | ## License 110 | 111 | This project is licensed under the **GNU General Public License v3.0** - see the [LICENSE](./LICENSE) file for details. 112 | 113 | ### Original Code 114 | 115 | This project reuses code from [Jackdallas' Premiumizearr](https://github.com/jackdallas/premiumizearr), which is licensed under the **GNU General Public License v3.0**. 116 | 117 | ### Modifications 118 | 119 | The following changes have been made to the original code: 120 | - See Commit History 121 | 122 | All modifications to the original code are also licensed under the same license, i.e., **GNU GPL v3**. 123 | 124 | ## Unsupported Installation Methods 125 | 126 | Those methods should work but are discouraged and receive no support, you should use docker! 127 | 128 | [Grab the latest release artifact links here](https://github.com/ensingerphilipp/premiumizearr-nova/releases/) 129 | 130 | ### Binary 131 | 132 | #### System Install 133 | 134 | ```cli 135 | wget https://github.com/ensingerphilipp/premiumizearr-nova/releases/download/x.x.x/Premiumizearr_x.x.x_linux_amd64.tar.gz 136 | tar xf Premiumizearr_x.x.x.x_linux_amd64.tar.gz 137 | cd Premiumizearr_x.x.x.x_linux_amd64 138 | sudo mkdir /opt/premiumizearrd/ 139 | sudo cp -r premiumizearrd static/ /opt/premiumizearrd/ 140 | sudo cp premiumizearrd.service /etc/systemd/system/ 141 | sudo systemctl-reload 142 | sudo systemctl enable premiumizearrd.service 143 | sudo systemctl start premiumizearrd.service 144 | ``` 145 | 146 | #### User Install 147 | 148 | ```cli 149 | wget https://github.com/ensingerphilipp/premiumizearr-nova/releases/download/x.x.x/Premiumizearr_x.x.x_linux_amd64.tar.gz 150 | tar xf Premiumizearr_x.x.x.x_linux_amd64.tar.gz 151 | cd Premiumizearr_x.x.x.x_linux_amd64 152 | mkdir -p ~/.local/bin/ 153 | cp -r premiumizearrd static/ ~/.local/bin/ 154 | echo -e "export PATH=~/.local/bin/:$PATH" >> ~/.bashrc 155 | source ~/.bashrc 156 | ``` 157 | 158 | You're now able to run the daemon from anywhere just by typing `premiumizearrd` 159 | 160 | ### deb file 161 | 162 | ```cmd 163 | wget https://github.com/ensingerphilipp/premiumizearr-nova/releases/download/x.x.x/premiumizearr_x.x.x._linux_amd64.deb 164 | sudo dpkg -i premiumizearr_x.x.x.x_linux_amd64.deb 165 | ``` 166 | 167 | ### Windows Installation 168 | 169 | 1. [Download the Windows Release here](https://github.com/ensingerphilipp/premiumizearr-nova/releases/) 170 | 2. Follow the Setup Instructions and try to match them to Windows Commandline where possible 171 | -------------------------------------------------------------------------------- /web/src/pages/Info.svelte: -------------------------------------------------------------------------------- 1 | 165 | 166 |
167 | 168 | 169 |

Blackhole

170 | 179 |
180 | 181 |

Downloads

182 | 195 |
196 |
197 | 198 | 199 |

Transfers

200 |

Download Speed: {HumanReadableSpeed(dlSpeed)}

201 | 212 |
213 |
214 |
215 | -------------------------------------------------------------------------------- /web/src/pages/Config.svelte: -------------------------------------------------------------------------------- 1 | 188 | 189 |
190 | 191 | 192 |

*Arr Settings

193 | 194 | 200 | {#if config.Arrs !== undefined} 201 | {#each config.Arrs as arr, i} 202 |
- {arr.Name ? arr.Name : i}
203 | 204 | { 209 | UntestArr(i); 210 | }} 211 | /> 212 | { 217 | UntestArr(i); 218 | }} 219 | /> 220 | { 225 | UntestArr(i); 226 | }} 227 | /> 228 | { 232 | config.Arrs[i].Type = e.detail.selectedId; 233 | UntestArr(i); 234 | }} 235 | items={[ 236 | { id: "Sonarr", text: "Sonarr" }, 237 | { id: "Radarr", text: "Radarr" }, 238 | { id: "Lidarr", text: "Lidarr" } 239 | ]} 240 | disabled={inputDisabled} 241 | /> 242 | 262 | 263 | {/each} 264 | {/if} 265 |
266 | 269 |
270 | 271 |

Premiumize.me Settings

272 | 273 | 279 | 284 | 285 |

Directory Settings

286 | 287 | 292 | 298 | 305 | 306 | 307 | 312 | 313 |

Web Server Settings

314 | 315 | 320 | 325 | 330 | 331 |

Download Settings

332 | 333 | 338 | 343 | 349 | 355 | 356 | 359 |
360 |
361 |
362 | 363 | { 369 | errorModal = false; 370 | }} 371 | > 372 |

{errorMessage}

373 |
374 | 375 | { 380 | restartRequiredModal = false; 381 | }} 382 | > 383 |

{restartRequiredMessage}

384 |
385 | 400 | -------------------------------------------------------------------------------- /internal/service/transfer_manager_service.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path" 7 | "path/filepath" 8 | "sync" 9 | "time" 10 | 11 | "github.com/ensingerphilipp/premiumizearr-nova/internal/config" 12 | "github.com/ensingerphilipp/premiumizearr-nova/internal/progress_downloader" 13 | "github.com/ensingerphilipp/premiumizearr-nova/internal/utils" 14 | "github.com/ensingerphilipp/premiumizearr-nova/pkg/premiumizeme" 15 | log "github.com/sirupsen/logrus" 16 | ) 17 | 18 | type DownloadDetails struct { 19 | Added time.Time 20 | Name string 21 | ProgressDownloader *progress_downloader.WriteCounter 22 | } 23 | 24 | type TransferManagerService struct { 25 | premiumizemeClient *premiumizeme.Premiumizeme 26 | arrsManager *ArrsManagerService 27 | config *config.Config 28 | lastUpdated int64 29 | transfers []premiumizeme.Transfer 30 | runningTask bool 31 | downloadListMutex *sync.Mutex 32 | downloadList map[string]*DownloadDetails 33 | status string 34 | downloadsFolderID string 35 | } 36 | 37 | // Handle 38 | func (t TransferManagerService) New() TransferManagerService { 39 | t.premiumizemeClient = nil 40 | t.arrsManager = nil 41 | t.config = nil 42 | t.lastUpdated = time.Now().Unix() 43 | t.transfers = make([]premiumizeme.Transfer, 0) 44 | t.runningTask = false 45 | t.downloadListMutex = &sync.Mutex{} 46 | t.downloadList = make(map[string]*DownloadDetails, 0) 47 | t.status = "" 48 | t.downloadsFolderID = "" 49 | return t 50 | } 51 | 52 | func (t *TransferManagerService) Init(pme *premiumizeme.Premiumizeme, arrsManager *ArrsManagerService, config *config.Config) { 53 | t.premiumizemeClient = pme 54 | t.arrsManager = arrsManager 55 | t.config = config 56 | t.CleanUpDownloadDirPeriod() 57 | } 58 | 59 | func (t *TransferManagerService) CleanUpDownloadDirPeriod() { 60 | log.Info("Cleaning download directory - deleting files older than 4 days") 61 | 62 | downloadBase, err := t.config.GetDownloadsBaseLocation() 63 | if err != nil { 64 | log.Errorf("Error getting download base location: %s", err.Error()) 65 | return 66 | } 67 | 68 | // Define the threshold for deletion: 4 days 69 | threshold := time.Now().AddDate(0, 0, -4) 70 | 71 | err = filepath.Walk(downloadBase, func(path string, info os.FileInfo, err error) error { 72 | if err != nil { 73 | log.Warnf("Error accessing path %s: %s", path, err.Error()) 74 | return nil // Continue processing other files/directories 75 | } 76 | 77 | // Skip the base directory itself 78 | if path == downloadBase { 79 | return nil 80 | } 81 | 82 | // Check if the file/directory is older than 4 days 83 | if info.ModTime().Before(threshold) { 84 | log.Infof("Deleting %s (last modified: %s)", path, info.ModTime()) 85 | 86 | // Remove the directory/file 87 | err = os.RemoveAll(path) 88 | if err != nil { 89 | log.Errorf("Error deleting %s: %s", path, err.Error()) 90 | } 91 | } 92 | return nil 93 | }) 94 | 95 | if err != nil { 96 | log.Errorf("Error cleaning download directory: %s", err.Error()) 97 | } 98 | } 99 | 100 | func (t *TransferManagerService) CleanUpDownloadDir() { 101 | log.Info("Cleaning download directory") 102 | 103 | downloadBase, err := t.config.GetDownloadsBaseLocation() 104 | if err != nil { 105 | log.Errorf("Error getting download base location: %s", err.Error()) 106 | return 107 | } 108 | 109 | err = utils.RemoveContents(downloadBase) 110 | if err != nil { 111 | log.Errorf("Error cleaning download directory: %s", err.Error()) 112 | return 113 | } 114 | 115 | } 116 | 117 | func (manager *TransferManagerService) ConfigUpdatedCallback(currentConfig config.Config, newConfig config.Config) { 118 | if currentConfig.DownloadsDirectory != newConfig.DownloadsDirectory { 119 | log.Trace("Inside ConfigUpdatedCallback") 120 | manager.CleanUpDownloadDir() 121 | } 122 | 123 | if currentConfig.TransferDirectory != newConfig.TransferDirectory { 124 | log.Trace("Updating Transfer Directory in TransferManagerService") 125 | newDir := newConfig.TransferDirectory 126 | newID := utils.GetDownloadsFolderIDFromPremiumizeme(manager.premiumizemeClient, newDir) 127 | manager.downloadsFolderID = newID 128 | } 129 | } 130 | 131 | func (manager *TransferManagerService) Run(interval time.Duration) { 132 | manager.downloadsFolderID = utils.GetDownloadsFolderIDFromPremiumizeme(manager.premiumizemeClient, manager.config.TransferDirectory) 133 | for { 134 | manager.runningTask = true 135 | manager.TaskUpdateTransfersList() 136 | if !manager.config.TransferOnlyMode { 137 | manager.TaskCheckPremiumizeDownloadsFolder() 138 | } else { 139 | log.Info("TransferOnlyMode is enabled, skipping Download") 140 | } 141 | manager.runningTask = false 142 | manager.lastUpdated = time.Now().Unix() 143 | time.Sleep(interval) 144 | } 145 | } 146 | 147 | func (manager *TransferManagerService) GetDownloads() map[string]*DownloadDetails { 148 | return manager.downloadList 149 | } 150 | 151 | func (manager *TransferManagerService) GetTransfers() *[]premiumizeme.Transfer { 152 | return &manager.transfers 153 | } 154 | func (manager *TransferManagerService) GetStatus() string { 155 | return manager.status 156 | } 157 | 158 | func (manager *TransferManagerService) TaskUpdateTransfersList() { 159 | log.Debug("Running Task UpdateTransfersList") 160 | transfers, err := manager.premiumizemeClient.GetTransfers() 161 | if err != nil { 162 | log.Errorf("Error getting transfers: %s", err.Error()) 163 | return 164 | } 165 | manager.updateTransfers(transfers) 166 | 167 | log.Tracef("Checking %d transfers against %d Arr clients", len(transfers), len(manager.arrsManager.GetArrs())) 168 | for _, transfer := range transfers { 169 | found := false 170 | for _, arr := range manager.arrsManager.GetArrs() { 171 | if found { 172 | break 173 | } 174 | if transfer.Status == "error" { 175 | log.Tracef("Checking errored transfer %s against %s history", transfer.Name, arr.GetArrName()) 176 | arrID, contains := arr.HistoryContains(transfer.Name) 177 | if !contains { 178 | log.Tracef("%s history doesn't contain %s", arr.GetArrName(), transfer.Name) 179 | continue 180 | } 181 | log.Tracef("Found %s in %s history", transfer.Name, arr.GetArrName()) 182 | found = true 183 | log.Debugf("Processing transfer that has errored: %s", transfer.Name) 184 | go arr.HandleErrorTransfer(&transfer, arrID, manager.premiumizemeClient) 185 | 186 | } 187 | } 188 | } 189 | } 190 | 191 | func (manager *TransferManagerService) TaskCheckPremiumizeDownloadsFolder() { 192 | log.Debug("Running Task CheckPremiumizeDownloadsFolder") 193 | 194 | if manager.downloadsFolderID == "" { 195 | log.Errorf("Premiumize-Download-Folder ID is empty, cannot check Folder - aborting (This could be due to a Premiumize or CDN Outage)") 196 | return 197 | } 198 | 199 | items, err := manager.premiumizemeClient.ListFolder(manager.downloadsFolderID) 200 | if err != nil { 201 | log.Errorf("Error listing downloads folder: %s", err.Error()) 202 | return 203 | } 204 | 205 | for _, item := range items { 206 | if manager.countDownloads() < manager.config.SimultaneousDownloads { 207 | log.Debugf("Processing completed item: %s", item.Name) 208 | manager.HandleFinishedItem(item, manager.config.DownloadsDirectory) 209 | //Sleep for one Second to let Asynchronous Downloads Start and Update 210 | time.Sleep(time.Second * 1) 211 | } else { 212 | log.Debugf("Not processing any more transfers, %d are running and cap is %d", manager.countDownloads(), manager.config.SimultaneousDownloads) 213 | break 214 | } 215 | } 216 | } 217 | 218 | func (manager *TransferManagerService) updateTransfers(transfers []premiumizeme.Transfer) { 219 | manager.transfers = transfers 220 | } 221 | 222 | func (manager *TransferManagerService) addDownload(item *premiumizeme.Item) { 223 | manager.downloadListMutex.Lock() 224 | defer manager.downloadListMutex.Unlock() 225 | 226 | manager.downloadList[item.Name] = &DownloadDetails{ 227 | Added: time.Now(), 228 | Name: item.Name, 229 | ProgressDownloader: progress_downloader.NewWriteCounter(), 230 | } 231 | } 232 | 233 | func (manager *TransferManagerService) countDownloads() int { 234 | manager.downloadListMutex.Lock() 235 | defer manager.downloadListMutex.Unlock() 236 | // Calculate len(manager.downloadList) / 2 as every download also has a Parent Folder in manager.downloadList 237 | return (len(manager.downloadList) / 2) 238 | } 239 | 240 | func (manager *TransferManagerService) removeDownload(name string) { 241 | manager.downloadListMutex.Lock() 242 | defer manager.downloadListMutex.Unlock() 243 | 244 | delete(manager.downloadList, name) 245 | } 246 | 247 | func (manager *TransferManagerService) downloadExists(itemName string) bool { 248 | manager.downloadListMutex.Lock() 249 | defer manager.downloadListMutex.Unlock() 250 | 251 | for _, dl := range manager.downloadList { 252 | if dl.Name == itemName { 253 | return true 254 | } 255 | } 256 | 257 | return false 258 | } 259 | 260 | func (manager *TransferManagerService) HandleFinishedItem(item premiumizeme.Item, downloadDirectory string) { 261 | if manager.downloadExists(item.Name) { 262 | log.Tracef("Transfer %s is already downloading", item.Name) 263 | return 264 | } 265 | 266 | // If single Item is encountered (Torrent Download) it is moved into a new Folder with the Name of the Item to be downloaded during next refresh 267 | if item.Type == "file" { 268 | log.Tracef("Handling Item Type File in finished Transfer %s", item.Name) 269 | 270 | id, err := manager.premiumizemeClient.CreateFolder(item.Name+".folder", &manager.downloadsFolderID) 271 | if err != nil { 272 | log.Errorf("cannot create Folder for Single File Download! %+v", err) 273 | return 274 | } 275 | var singleFileFolderID string = id 276 | 277 | err = manager.premiumizemeClient.MoveItem(item.ID, singleFileFolderID) 278 | if err != nil { 279 | log.Errorf("cannot move Single File to Folder for Download! %+v", err) 280 | return 281 | } 282 | 283 | log.Infof("Single File moved to Folder for Download %s", item.Name) 284 | return 285 | } 286 | 287 | if item.Type != "folder" { 288 | log.Errorf("Item Type mismatch when trying to handle finished Transfer %s | %s", item.Name, item.Type) 289 | return 290 | } 291 | 292 | manager.addDownload(&item) 293 | go func() { 294 | defer manager.removeDownload(item.Name) 295 | err := manager.downloadFolderRecursively(item, downloadDirectory) 296 | if err != nil { 297 | log.Errorf("Error downloading item %s: %s", item.Name, err) 298 | manager.removeDownload(item.Name) 299 | return 300 | } 301 | 302 | err = manager.premiumizemeClient.DeleteFolder(item.ID) 303 | if err != nil { 304 | manager.removeDownload(item.Name) 305 | log.Errorf("Error deleting folder on premiumize.me: %s", err) 306 | return 307 | } 308 | 309 | }() 310 | } 311 | 312 | func (manager *TransferManagerService) downloadFolderRecursively(item premiumizeme.Item, downloadDirectory string) error { 313 | if item.ID == "" { 314 | return fmt.Errorf("Premiumize-Download-Folder ID is empty, cannot check Folder - aborting (This could be due to a Premiumize Outage)") 315 | } 316 | 317 | items, err := manager.premiumizemeClient.ListFolder(item.ID) 318 | if err != nil { 319 | return fmt.Errorf("error listing folder items: %w", err) 320 | } 321 | savePath := path.Join(downloadDirectory, (item.Name + "/")) 322 | log.Trace("Downloading to: ", savePath) 323 | err = os.Mkdir(savePath, os.ModePerm) 324 | if err != nil { 325 | log.Errorf("could not create save path: %s", err) 326 | // manager.removeDownload(item.Name) 327 | // return fmt.Errorf("error creating save path: %w", err) 328 | // no return due to os permissions sometime inaccurately throwing errors on different configurations 329 | } 330 | 331 | for _, item := range items { 332 | if manager.downloadExists(item.Name) { 333 | log.Tracef("Transfer %s is already downloading", item.Name) 334 | return nil 335 | } 336 | if item.Type == "file" { 337 | manager.addDownload(&item) 338 | link, err := manager.premiumizemeClient.GenerateFileLink(item.ID) 339 | if err != nil { 340 | log.Debugf("File Link Generation err: %s", err) 341 | } 342 | var fileSavePath = path.Join(savePath, item.Name) 343 | log.Trace("Downloading to: ", fileSavePath) 344 | // Add Option to Disable / Enable checking download certificate as certain CDNs have invalid / self-signed certificates 345 | var checkcertificate bool = manager.config.EnableTlsCheck 346 | var ratelimit int = manager.config.DownloadSpeedLimit 347 | err = progress_downloader.DownloadFile(checkcertificate, ratelimit, link, fileSavePath, manager.downloadList[item.Name].ProgressDownloader) 348 | if err != nil { 349 | return fmt.Errorf("error downloading file %s: %w", item.Name, err) 350 | } 351 | manager.removeDownload(item.Name) 352 | } else if item.Type == "folder" { 353 | err = manager.downloadFolderRecursively(item, savePath) 354 | if err != nil { 355 | return fmt.Errorf("error downloading folder %s: %w", item.Name, err) 356 | } 357 | } 358 | } 359 | return nil 360 | } 361 | -------------------------------------------------------------------------------- /pkg/premiumizeme/premiumizeme.go: -------------------------------------------------------------------------------- 1 | package premiumizeme 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "fmt" 7 | "io" 8 | "mime/multipart" 9 | "net/http" 10 | "net/url" 11 | "os" 12 | "path" 13 | "path/filepath" 14 | "strconv" 15 | "strings" 16 | "time" 17 | 18 | log "github.com/sirupsen/logrus" 19 | ) 20 | 21 | type Premiumizeme struct { 22 | APIKey string 23 | } 24 | 25 | func NewPremiumizemeClient(APIKey string) Premiumizeme { 26 | return Premiumizeme{APIKey: APIKey} 27 | } 28 | 29 | func (pm *Premiumizeme) createPremiumizemeURL(urlPath string) (url.URL, error) { 30 | u, err := url.Parse("https://www.premiumize.me/api/") 31 | if err != nil { 32 | return *u, err 33 | } 34 | u.Path = path.Join(u.Path, urlPath) 35 | q := u.Query() 36 | q.Set("apikey", pm.APIKey) 37 | u.RawQuery = q.Encode() 38 | return *u, nil 39 | } 40 | 41 | var ( 42 | ErrAPIKeyNotSet = fmt.Errorf("premiumize.me API key not set") 43 | ) 44 | 45 | func (pm *Premiumizeme) GetTransfers() ([]Transfer, error) { 46 | if pm.APIKey == "" { 47 | return nil, ErrAPIKeyNotSet 48 | } 49 | 50 | log.Trace("Getting transfers list from premiumize.me") 51 | url, err := pm.createPremiumizemeURL("/transfer/list") 52 | if err != nil { 53 | return nil, err 54 | } 55 | 56 | var ret []Transfer 57 | req, _ := http.NewRequest("GET", url.String(), nil) 58 | 59 | resp, err := http.DefaultClient.Do(req) 60 | if err != nil { 61 | return ret, err 62 | } 63 | 64 | defer resp.Body.Close() 65 | res := ListTransfersResponse{} 66 | err = json.NewDecoder(resp.Body).Decode(&res) 67 | 68 | if res.Status != "success" { 69 | return ret, fmt.Errorf("%s", res.Status) 70 | } 71 | 72 | if err != nil { 73 | return ret, err 74 | } 75 | 76 | log.Tracef("Received %d transfers", len(res.Transfers)) 77 | return res.Transfers, nil 78 | } 79 | 80 | func (pm *Premiumizeme) ListFolder(folderID string) ([]Item, error) { 81 | if pm.APIKey == "" { 82 | return nil, ErrAPIKeyNotSet 83 | } 84 | 85 | var ret []Item 86 | url, err := pm.createPremiumizemeURL("/folder/list") 87 | if err != nil { 88 | return ret, err 89 | } 90 | 91 | q := url.Query() 92 | q.Set("id", folderID) 93 | url.RawQuery = q.Encode() 94 | 95 | client := &http.Client{} 96 | request, err := http.NewRequest("GET", url.String(), nil) 97 | if err != nil { 98 | return ret, err 99 | } 100 | 101 | resp, err := client.Do(request) 102 | if err != nil { 103 | return ret, err 104 | } 105 | 106 | if resp.StatusCode != 200 { 107 | return ret, fmt.Errorf("error listing folder: %s (%d)", resp.Status, resp.StatusCode) 108 | } 109 | 110 | defer resp.Body.Close() 111 | res := ListFoldersResponse{} 112 | log.Trace("Reading response") 113 | err = json.NewDecoder(resp.Body).Decode(&res) 114 | 115 | if err != nil { 116 | return ret, err 117 | } 118 | 119 | if res.Status != "success" { 120 | return ret, fmt.Errorf(res.Message) 121 | } 122 | 123 | return res.Content, nil 124 | } 125 | 126 | func (pm *Premiumizeme) GetFolders() ([]Item, error) { 127 | if pm.APIKey == "" { 128 | return nil, ErrAPIKeyNotSet 129 | } 130 | 131 | log.Trace("Getting folder list from premiumize.me") 132 | url, err := pm.createPremiumizemeURL("/folder/list") 133 | if err != nil { 134 | return nil, err 135 | } 136 | 137 | var ret []Item 138 | req, _ := http.NewRequest("GET", url.String(), nil) 139 | 140 | resp, err := http.DefaultClient.Do(req) 141 | if err != nil { 142 | return ret, err 143 | } 144 | 145 | defer resp.Body.Close() 146 | res := ListFoldersResponse{} 147 | err = json.NewDecoder(resp.Body).Decode(&res) 148 | 149 | if res.Status != "success" { 150 | return ret, fmt.Errorf("%s", res.Status) 151 | } 152 | 153 | if err != nil { 154 | return ret, err 155 | } 156 | 157 | log.Tracef("Received %d Folders", len(res.Content)) 158 | return res.Content, nil 159 | } 160 | 161 | func (pm *Premiumizeme) CreateTransfer(filePath string, parentID string) error { 162 | if pm.APIKey == "" { 163 | return ErrAPIKeyNotSet 164 | } 165 | 166 | //TODO: handle file size, i.e. incorrect file being saved 167 | log.Trace("Opening file: ", filePath) 168 | file, err := os.Open(filePath) 169 | if err != nil { 170 | log.Errorf("First try failed, waiting 1 second and trying to open file: %s again", filePath) 171 | time.Sleep(1 * time.Second) 172 | file, err = os.Open(filePath) 173 | if err != nil { 174 | return err 175 | } 176 | } 177 | defer file.Close() 178 | 179 | url, err := pm.createPremiumizemeURL("/transfer/create") 180 | if err != nil { 181 | return err 182 | } 183 | 184 | client := &http.Client{} 185 | var request *http.Request 186 | 187 | switch filepath.Ext(file.Name()) { 188 | case ".nzb": 189 | request, err = createNZBRequest(file, &url, parentID) 190 | case ".magnet": 191 | request, err = createMagnetRequest(file, &url, parentID) 192 | case ".torrent": 193 | request, err = createTorrentRequest(file, &url, parentID) 194 | } 195 | 196 | if err != nil { 197 | return err 198 | } 199 | 200 | resp, err := client.Do(request) 201 | if err != nil { 202 | return err 203 | } 204 | 205 | if resp.StatusCode != 200 { 206 | return fmt.Errorf("error creating transfer: %s (%d)", resp.Status, resp.StatusCode) 207 | } 208 | 209 | defer resp.Body.Close() 210 | res := CreateTransferResponse{} 211 | log.Trace("Reading response") 212 | err = json.NewDecoder(resp.Body).Decode(&res) 213 | 214 | if err != nil { 215 | return err 216 | } 217 | 218 | if res.Status != "success" { 219 | return fmt.Errorf(res.Message) 220 | } 221 | 222 | log.Tracef("Transfer created: %+v", res) 223 | 224 | return nil 225 | } 226 | 227 | func (pm *Premiumizeme) DeleteFolder(folderID string) error { 228 | if pm.APIKey == "" { 229 | return ErrAPIKeyNotSet 230 | } 231 | 232 | url, err := pm.createPremiumizemeURL("/folder/delete") 233 | if err != nil { 234 | return err 235 | } 236 | 237 | q := url.Query() 238 | q.Set("id", folderID) 239 | url.RawQuery = q.Encode() 240 | 241 | client := &http.Client{} 242 | request, err := http.NewRequest("DELETE", url.String(), nil) 243 | if err != nil { 244 | return err 245 | } 246 | 247 | resp, err := client.Do(request) 248 | if err != nil { 249 | return err 250 | } 251 | 252 | if resp.StatusCode != 200 { 253 | return fmt.Errorf("error deleting folder: %s (%d)", resp.Status, resp.StatusCode) 254 | } 255 | 256 | defer resp.Body.Close() 257 | res := SimpleResponse{} 258 | log.Trace("Reading response") 259 | err = json.NewDecoder(resp.Body).Decode(&res) 260 | 261 | if err != nil { 262 | return err 263 | } 264 | 265 | if res.Status != "success" { 266 | return fmt.Errorf(res.Message) 267 | } 268 | 269 | log.Tracef("Folder deleted: %+v", res) 270 | 271 | return nil 272 | } 273 | 274 | func (pm *Premiumizeme) MoveItem(itemID string, folderID string) error { 275 | if pm.APIKey == "" { 276 | return ErrAPIKeyNotSet 277 | } 278 | 279 | url, err := pm.createPremiumizemeURL("/folder/paste") 280 | if err != nil { 281 | return err 282 | } 283 | 284 | q := url.Query() 285 | q.Set("files[]", itemID) 286 | q.Set("id", folderID) 287 | url.RawQuery = q.Encode() 288 | 289 | client := &http.Client{} 290 | request, err := http.NewRequest("POST", url.String(), nil) 291 | if err != nil { 292 | return err 293 | } 294 | 295 | resp, err := client.Do(request) 296 | if err != nil { 297 | return err 298 | } 299 | 300 | if resp.StatusCode != 200 { 301 | return fmt.Errorf("error moving single item to folder: %s (%d)", resp.Status, resp.StatusCode) 302 | } 303 | 304 | defer resp.Body.Close() 305 | res := SimpleResponse{} 306 | log.Trace("Reading response") 307 | err = json.NewDecoder(resp.Body).Decode(&res) 308 | 309 | if err != nil { 310 | return err 311 | } 312 | 313 | if res.Status != "success" { 314 | return fmt.Errorf(res.Message) 315 | } 316 | 317 | log.Tracef("Item moved: %+v", res) 318 | 319 | return nil 320 | } 321 | 322 | func (pm *Premiumizeme) CreateFolder(folderName string, parentID *string) (string, error) { 323 | if pm.APIKey == "" { 324 | return "", ErrAPIKeyNotSet 325 | } 326 | 327 | url, err := pm.createPremiumizemeURL("/folder/create") 328 | if err != nil { 329 | return "", err 330 | } 331 | 332 | q := url.Query() 333 | q.Set("name", folderName) 334 | if parentID != nil { 335 | q.Set("parent_id", *parentID) 336 | } 337 | url.RawQuery = q.Encode() 338 | 339 | client := &http.Client{} 340 | request, err := http.NewRequest("POST", url.String(), nil) 341 | if err != nil { 342 | return "", err 343 | } 344 | 345 | resp, err := client.Do(request) 346 | if err != nil { 347 | return "", err 348 | } 349 | defer resp.Body.Close() 350 | 351 | if resp.StatusCode != 200 { 352 | return "", fmt.Errorf("error creating folder: %s (%d)", resp.Status, resp.StatusCode) 353 | } 354 | 355 | res := CreateFolderResponse{} 356 | log.Trace("Reading response") 357 | err = json.NewDecoder(resp.Body).Decode(&res) 358 | if err != nil { 359 | return "", err 360 | } 361 | 362 | if res.Status != "success" { 363 | return "", fmt.Errorf(res.Message) 364 | } 365 | 366 | log.Tracef("Folder created: %+v", res) 367 | return res.ID, nil 368 | } 369 | 370 | func (pm *Premiumizeme) DeleteTransfer(id string) error { 371 | if pm.APIKey == "" { 372 | return ErrAPIKeyNotSet 373 | } 374 | 375 | url, err := pm.createPremiumizemeURL("/transfer/delete") 376 | if err != nil { 377 | return err 378 | } 379 | 380 | client := &http.Client{} 381 | request, err := createDeleteRequest(id, &url) 382 | if err != nil { 383 | return err 384 | } 385 | 386 | resp, err := client.Do(request) 387 | if err != nil { 388 | return err 389 | } 390 | 391 | if resp.StatusCode != 200 { 392 | return fmt.Errorf("error deleting transfer: %s (%d)", resp.Status, resp.StatusCode) 393 | } 394 | 395 | defer resp.Body.Close() 396 | res := SimpleResponse{} 397 | log.Trace("Reading response") 398 | err = json.NewDecoder(resp.Body).Decode(&res) 399 | 400 | if err != nil { 401 | return err 402 | } 403 | 404 | if res.Status != "success" { 405 | return fmt.Errorf("failed to delete transfer: %s, message: %+v", id, res.Message) 406 | } 407 | 408 | log.Tracef("Transfer Deleted: %+v", res) 409 | 410 | return nil 411 | } 412 | 413 | func createNZBRequest(file *os.File, url *url.URL, parentID string) (*http.Request, error) { 414 | body := &bytes.Buffer{} 415 | writer := multipart.NewWriter(body) 416 | part, err := writer.CreateFormFile("src", filepath.Base(file.Name())) 417 | 418 | if err != nil { 419 | return nil, err 420 | } 421 | 422 | io.Copy(part, file) 423 | writer.Close() 424 | 425 | part, err = writer.CreateFormField("folder_id") 426 | 427 | if err != nil { 428 | return nil, err 429 | } 430 | 431 | _, err = part.Write([]byte(parentID)) 432 | 433 | if err != nil { 434 | return nil, err 435 | } 436 | 437 | request, err := http.NewRequest("POST", url.String(), body) 438 | request.Header.Add("Content-Type", writer.FormDataContentType()) 439 | 440 | if err != nil { 441 | return nil, err 442 | } 443 | 444 | return request, nil 445 | } 446 | 447 | func createMagnetRequest(file *os.File, url *url.URL, parentID string) (*http.Request, error) { 448 | body := &bytes.Buffer{} 449 | writer := multipart.NewWriter(body) 450 | part, err := writer.CreateFormField("src") 451 | 452 | if err != nil { 453 | return nil, err 454 | } 455 | 456 | io.Copy(part, file) 457 | writer.Close() 458 | 459 | part, err = writer.CreateFormField("folder_id") 460 | 461 | if err != nil { 462 | return nil, err 463 | } 464 | 465 | _, err = part.Write([]byte(parentID)) 466 | 467 | if err != nil { 468 | return nil, err 469 | } 470 | 471 | request, err := http.NewRequest("POST", url.String(), body) 472 | request.Header.Add("Content-Type", writer.FormDataContentType()) 473 | 474 | if err != nil { 475 | return nil, err 476 | } 477 | 478 | return request, nil 479 | } 480 | 481 | func createTorrentRequest(file *os.File, url *url.URL, parentID string) (*http.Request, error) { 482 | body := &bytes.Buffer{} 483 | writer := multipart.NewWriter(body) 484 | part, err := writer.CreateFormFile("src", filepath.Base(file.Name())) 485 | 486 | if err != nil { 487 | return nil, err 488 | } 489 | 490 | io.Copy(part, file) 491 | writer.Close() 492 | 493 | part, err = writer.CreateFormField("folder_id") 494 | 495 | if err != nil { 496 | return nil, err 497 | } 498 | 499 | _, err = part.Write([]byte(parentID)) 500 | 501 | if err != nil { 502 | return nil, err 503 | } 504 | 505 | request, err := http.NewRequest("POST", url.String(), body) 506 | request.Header.Add("Content-Type", writer.FormDataContentType()) 507 | 508 | if err != nil { 509 | return nil, err 510 | } 511 | 512 | return request, nil 513 | } 514 | 515 | func createDeleteRequest(id string, URL *url.URL) (*http.Request, error) { 516 | // Build Values to send to endpoint 517 | data := url.Values{} 518 | data.Set("id", id) 519 | 520 | // Create and encode request 521 | request, err := http.NewRequest("POST", URL.String(), strings.NewReader(data.Encode())) 522 | if err != nil { 523 | return nil, err 524 | } 525 | 526 | // Setup headers 527 | request.Header.Add("Content-Type", "application/x-www-form-urlencoded") 528 | request.Header.Add("Content-Length", strconv.Itoa(len(data.Encode()))) 529 | 530 | return request, nil 531 | } 532 | 533 | type SRCType = int 534 | 535 | const ( 536 | SRC_FILE = iota 537 | SRC_FOLDER 538 | ) 539 | 540 | func (pm *Premiumizeme) GenerateZippedFileLink(fileID string) (string, error) { 541 | dlLink, err := pm.generateZip(fileID, SRC_FILE) 542 | if err != nil { 543 | return "", err 544 | } 545 | return dlLink, nil 546 | } 547 | 548 | func (pm *Premiumizeme) GenerateZippedFolderLink(fileID string) (string, error) { 549 | dlLink, err := pm.generateZip(fileID, SRC_FOLDER) 550 | if err != nil { 551 | return "", err 552 | } 553 | return dlLink, nil 554 | } 555 | 556 | func (pm *Premiumizeme) generateZip(ID string, srcType SRCType) (string, error) { 557 | if pm.APIKey == "" { 558 | return "", ErrAPIKeyNotSet 559 | } 560 | 561 | // Build URL with apikey 562 | URL, err := pm.createPremiumizemeURL("/zip/generate") 563 | if err != nil { 564 | return "", err 565 | } 566 | 567 | // Build Values to send to endpoint 568 | data := url.Values{} 569 | 570 | if srcType == SRC_FILE { 571 | data.Set("files[]", ID) 572 | } else if srcType == SRC_FOLDER { 573 | data.Set("folders[]", ID) 574 | } else { 575 | return "", fmt.Errorf("unknown source type: %d", srcType) 576 | } 577 | 578 | // Create and encode request 579 | request, err := http.NewRequest("POST", URL.String(), strings.NewReader(data.Encode())) 580 | if err != nil { 581 | return "", err 582 | } 583 | 584 | // Setup headers 585 | request.Header.Add("Content-Type", "application/x-www-form-urlencoded") 586 | request.Header.Add("Content-Length", strconv.Itoa(len(data.Encode()))) 587 | 588 | //Fire request 589 | client := &http.Client{} 590 | resp, err := client.Do(request) 591 | if err != nil { 592 | return "", err 593 | } 594 | 595 | if resp.StatusCode != 200 { 596 | return "", fmt.Errorf("error getting zip link for: %s response: %s (%d)", ID, resp.Status, resp.StatusCode) 597 | } 598 | 599 | // Decode response 600 | defer resp.Body.Close() 601 | var res GenerateZipResponse 602 | log.Trace("Reading response") 603 | err = json.NewDecoder(resp.Body).Decode(&res) 604 | 605 | log.Tracef("Zip Response: %+v", res) 606 | if err != nil { 607 | return "", err 608 | } 609 | 610 | if res.Status != "success" { 611 | return "", fmt.Errorf("error getting zip link for: %s, Status: %s", ID, res.Status) 612 | } 613 | 614 | log.Debugf("Zip link created: %+v", res.Location) 615 | 616 | return res.Location, nil 617 | } 618 | 619 | func (pm *Premiumizeme) GenerateFileLink(ID string) (string, error) { 620 | if pm.APIKey == "" { 621 | return "", ErrAPIKeyNotSet 622 | } 623 | 624 | // Build URL with apikey 625 | log.Trace("Getting Download Link for Item: ", ID) 626 | url, err := pm.createPremiumizemeURL("/item/details") 627 | if err != nil { 628 | return "", err 629 | } 630 | 631 | // Build Values to send to endpoint 632 | q := url.Query() 633 | q.Set("id", ID) 634 | url.RawQuery = q.Encode() 635 | 636 | client := &http.Client{} 637 | request, err := http.NewRequest("GET", url.String(), nil) 638 | if err != nil { 639 | return "", err 640 | } 641 | 642 | resp, err := client.Do(request) 643 | if err != nil { 644 | return "", err 645 | } 646 | 647 | if resp.StatusCode != 200 { 648 | return "", fmt.Errorf("Error listing Item Details: %s (%d)", resp.Status, resp.StatusCode) 649 | } 650 | 651 | defer resp.Body.Close() 652 | var res GenerateFileLinkResponse 653 | err = json.NewDecoder(resp.Body).Decode(&res) 654 | 655 | if res.Type != "file" { 656 | return "", fmt.Errorf("Item Type was not File: %s", res.Type) 657 | } 658 | 659 | if err != nil { 660 | return "Unknown Error: ", err 661 | } 662 | 663 | log.Debugf("File link created: %+v", res.Link) 664 | return res.Link, nil 665 | } 666 | -------------------------------------------------------------------------------- /web/public/814.814.js: -------------------------------------------------------------------------------- 1 | /*! For license information please see 814.814.js.LICENSE.txt */ 2 | (self.webpackChunksvelte_app=self.webpackChunksvelte_app||[]).push([[814],{814:(t,e)=>{var r;void 0===(r=function(){"use strict";var t="14.6.1";function e(t){t.parentElement.removeChild(t)}function r(t){return null!=t}function n(t){t.preventDefault()}function i(t){return"number"==typeof t&&!isNaN(t)&&isFinite(t)}function o(t,e,r){0=e[r];)r+=1;return r}function m(t,e,r){var n;if("number"==typeof e&&(e=[e]),!Array.isArray(e))throw new Error("noUiSlider (14.6.1): 'range' contains invalid value.");if(!i(n="min"===t?0:"max"===t?100:parseFloat(t))||!i(e[0]))throw new Error("noUiSlider (14.6.1): 'range' value isn't numeric.");r.xPct.push(n),r.xVal.push(e[0]),n?r.xSteps.push(!isNaN(e[1])&&e[1]):isNaN(e[1])||(r.xSteps[0]=e[1]),r.xHighestCompleteStep.push(0)}function g(t,e,r){if(e)if(r.xVal[t]!==r.xVal[t+1]){r.xSteps[t]=d([r.xVal[t],r.xVal[t+1]],e,0)/f(r.xPct[t],r.xPct[t+1]);var n=(r.xVal[t+1]-r.xVal[t])/r.xNumSteps[t],i=Math.ceil(Number(n.toFixed(3))-1),o=r.xVal[t]+r.xNumSteps[t]*i;r.xHighestCompleteStep[t]=o}else r.xSteps[t]=r.xHighestCompleteStep[t]=r.xVal[t]}function v(t,e,r){var n;this.xPct=[],this.xVal=[],this.xSteps=[r||!1],this.xNumSteps=[!1],this.xHighestCompleteStep=[],this.snap=e;var i=[];for(n in t)t.hasOwnProperty(n)&&i.push([t[n],n]);for(i.length&&"object"==typeof i[0][0]?i.sort((function(t,e){return t[0][0]-e[0][0]})):i.sort((function(t,e){return t[0]-e[0]})),n=0;nthis.xPct[i+1];)i++;else t===this.xPct[this.xPct.length-1]&&(i=this.xPct.length-2);r||t!==this.xPct[i+1]||i++;var o=1,s=e[i],a=0,l=0,u=0,c=0;for(n=r?(t-this.xPct[i])/(this.xPct[i+1]-this.xPct[i]):(this.xPct[i+1]-t)/(this.xPct[i+1]-this.xPct[i]);0=t.slice(-1)[0])return 100;var n,i,o=h(r,t),s=t[o-1],a=t[o],l=e[o-1],u=e[o];return l+(i=r,d(n=[s,a],n[0]<0?i+Math.abs(n[0]):i-n[0],0)/f(l,u))}(this.xVal,this.xPct,t)},v.prototype.fromStepping=function(t){return function(t,e,r){if(100<=r)return t.slice(-1)[0];var n,i=h(r,e),o=t[i-1],s=t[i],a=e[i-1];return n=[o,s],(r-a)*f(a,e[i])*(n[1]-n[0])/100+n[0]}(this.xVal,this.xPct,t)},v.prototype.getStep=function(t){return function(t,e,r,n){if(100===n)return n;var i,o,s=h(n,t),a=t[s-1],l=t[s];return r?(l-a)/2= 2) required for mode 'count'.");var n=e-1,i=100/n;for(e=[];n--;)e[n]=n*i;e.push(100),t="positions"}return"positions"===t?e.map((function(t){return y.fromStepping(r?y.getStep(t):t)})):"values"===t?r?e.map((function(t){return y.fromStepping(y.getStep(y.toStepping(t)))})):e:void 0}(c,t.values||!1,t.stepped||!1),m=(e=p,r=c,n=d,i={},o=y.xVal[0],s=y.xVal[y.xVal.length-1],l=a=!1,u=0,(n=n.slice().sort((function(t,e){return t-e})).filter((function(t){return!this[t]&&(this[t]=!0)}),{}))[0]!==o&&(n.unshift(o),a=!0),n[n.length-1]!==s&&(n.push(s),l=!0),n.forEach((function(t,o){var s,c,p,f,d,h,m,g,v,b,x=t,S=n[o+1],w="steps"===r;if(w&&(s=y.xNumSteps[o]),s||(s=S-x),!1!==x)for(void 0===S&&(S=x),s=Math.max(s,1e-7),c=x;c<=S;c=(c+s).toFixed(7)/1){for(g=(d=(f=y.toStepping(c))-u)/e,b=d/(v=Math.round(g)),p=1;p<=v;p+=1)i[(h=u+p*b).toFixed(5)]=[y.fromStepping(h),0];m=-1n.stepAfter.startValue&&(o=n.stepAfter.startValue-i),s=i>n.thisStep.startValue?n.thisStep.step:!1!==n.stepBefore.step&&i-n.stepBefore.highestStep,100===e?o=null:0===e&&(s=null);var a=y.countStepDecimals();return null!==o&&!1!==o&&(o=Number(o.toFixed(a))),null!==s&&!1!==s&&(s=Number(s.toFixed(a))),[s,o]}return u(v=w,r.cssClasses.target),0===r.dir?u(v,r.cssClasses.ltr):u(v,r.cssClasses.rtl),0===r.ort?u(v,r.cssClasses.horizontal):u(v,r.cssClasses.vertical),u(v,"rtl"===getComputedStyle(v).direction?r.cssClasses.textDirectionRtl:r.cssClasses.textDirectionLtr),l=M(v,r.cssClasses.base),function(t,e){var n=M(e,r.cssClasses.connects);f=[],(d=[]).push(L(n,t[0]));for(var i=0;i 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | Preamble 9 | 10 | The GNU General Public License is a free, copyleft license for 11 | software and other kinds of works. 12 | 13 | The licenses for most software and other practical works are designed 14 | to take away your freedom to share and change the works. By contrast, 15 | the GNU General Public License is intended to guarantee your freedom to 16 | share and change all versions of a program--to make sure it remains free 17 | software for all its users. We, the Free Software Foundation, use the 18 | GNU General Public License for most of our software; it applies also to 19 | any other work released this way by its authors. You can apply it to 20 | your programs, too. 21 | 22 | When we speak of free software, we are referring to freedom, not 23 | price. Our General Public Licenses are designed to make sure that you 24 | have the freedom to distribute copies of free software (and charge for 25 | them if you wish), that you receive source code or can get it if you 26 | want it, that you can change the software or use pieces of it in new 27 | free programs, and that you know you can do these things. 28 | 29 | To protect your rights, we need to prevent others from denying you 30 | these rights or asking you to surrender the rights. Therefore, you have 31 | certain responsibilities if you distribute copies of the software, or if 32 | you modify it: responsibilities to respect the freedom of others. 33 | 34 | For example, if you distribute copies of such a program, whether 35 | gratis or for a fee, you must pass on to the recipients the same 36 | freedoms that you received. You must make sure that they, too, receive 37 | or can get the source code. And you must show them these terms so they 38 | know their rights. 39 | 40 | Developers that use the GNU GPL protect your rights with two steps: 41 | (1) assert copyright on the software, and (2) offer you this License 42 | giving you legal permission to copy, distribute and/or modify it. 43 | 44 | For the developers' and authors' protection, the GPL clearly explains 45 | that there is no warranty for this free software. For both users' and 46 | authors' sake, the GPL requires that modified versions be marked as 47 | changed, so that their problems will not be attributed erroneously to 48 | authors of previous versions. 49 | 50 | Some devices are designed to deny users access to install or run 51 | modified versions of the software inside them, although the manufacturer 52 | can do so. This is fundamentally incompatible with the aim of 53 | protecting users' freedom to change the software. The systematic 54 | pattern of such abuse occurs in the area of products for individuals to 55 | use, which is precisely where it is most unacceptable. Therefore, we 56 | have designed this version of the GPL to prohibit the practice for those 57 | products. If such problems arise substantially in other domains, we 58 | stand ready to extend this provision to those domains in future versions 59 | of the GPL, as needed to protect the freedom of users. 60 | 61 | Finally, every program is threatened constantly by software patents. 62 | States should not allow patents to restrict development and use of 63 | software on general-purpose computers, but in those that do, we wish to 64 | avoid the special danger that patents applied to a free program could 65 | make it effectively proprietary. To prevent this, the GPL assures that 66 | patents cannot be used to render the program non-free. 67 | 68 | The precise terms and conditions for copying, distribution and 69 | modification follow. 70 | 71 | TERMS AND CONDITIONS 72 | 73 | 0. Definitions. 74 | 75 | "This License" refers to version 3 of the GNU General Public License. 76 | 77 | "Copyright" also means copyright-like laws that apply to other kinds of 78 | works, such as semiconductor masks. 79 | 80 | "The Program" refers to any copyrightable work licensed under this 81 | License. Each licensee is addressed as "you". "Licensees" and 82 | "recipients" may be individuals or organizations. 83 | 84 | To "modify" a work means to copy from or adapt all or part of the work 85 | in a fashion requiring copyright permission, other than the making of an 86 | exact copy. The resulting work is called a "modified version" of the 87 | earlier work or a work "based on" the earlier work. 88 | 89 | A "covered work" means either the unmodified Program or a work based 90 | on the Program. 91 | 92 | To "propagate" a work means to do anything with it that, without 93 | permission, would make you directly or secondarily liable for 94 | infringement under applicable copyright law, except executing it on a 95 | computer or modifying a private copy. Propagation includes copying, 96 | distribution (with or without modification), making available to the 97 | public, and in some countries other activities as well. 98 | 99 | To "convey" a work means any kind of propagation that enables other 100 | parties to make or receive copies. Mere interaction with a user through 101 | a computer network, with no transfer of a copy, is not conveying. 102 | 103 | An interactive user interface displays "Appropriate Legal Notices" 104 | to the extent that it includes a convenient and prominently visible 105 | feature that (1) displays an appropriate copyright notice, and (2) 106 | tells the user that there is no warranty for the work (except to the 107 | extent that warranties are provided), that licensees may convey the 108 | work under this License, and how to view a copy of this License. If 109 | the interface presents a list of user commands or options, such as a 110 | menu, a prominent item in the list meets this criterion. 111 | 112 | 1. Source Code. 113 | 114 | The "source code" for a work means the preferred form of the work 115 | for making modifications to it. "Object code" means any non-source 116 | form of a work. 117 | 118 | A "Standard Interface" means an interface that either is an official 119 | standard defined by a recognized standards body, or, in the case of 120 | interfaces specified for a particular programming language, one that 121 | is widely used among developers working in that language. 122 | 123 | The "System Libraries" of an executable work include anything, other 124 | than the work as a whole, that (a) is included in the normal form of 125 | packaging a Major Component, but which is not part of that Major 126 | Component, and (b) serves only to enable use of the work with that 127 | Major Component, or to implement a Standard Interface for which an 128 | implementation is available to the public in source code form. A 129 | "Major Component", in this context, means a major essential component 130 | (kernel, window system, and so on) of the specific operating system 131 | (if any) on which the executable work runs, or a compiler used to 132 | produce the work, or an object code interpreter used to run it. 133 | 134 | The "Corresponding Source" for a work in object code form means all 135 | the source code needed to generate, install, and (for an executable 136 | work) run the object code and to modify the work, including scripts to 137 | control those activities. However, it does not include the work's 138 | System Libraries, or general-purpose tools or generally available free 139 | programs which are used unmodified in performing those activities but 140 | which are not part of the work. For example, Corresponding Source 141 | includes interface definition files associated with source files for 142 | the work, and the source code for shared libraries and dynamically 143 | linked subprograms that the work is specifically designed to require, 144 | such as by intimate data communication or control flow between those 145 | subprograms and other parts of the work. 146 | 147 | The Corresponding Source need not include anything that users 148 | can regenerate automatically from other parts of the Corresponding 149 | Source. 150 | 151 | The Corresponding Source for a work in source code form is that 152 | same work. 153 | 154 | 2. Basic Permissions. 155 | 156 | All rights granted under this License are granted for the term of 157 | copyright on the Program, and are irrevocable provided the stated 158 | conditions are met. This License explicitly affirms your unlimited 159 | permission to run the unmodified Program. The output from running a 160 | covered work is covered by this License only if the output, given its 161 | content, constitutes a covered work. This License acknowledges your 162 | rights of fair use or other equivalent, as provided by copyright law. 163 | 164 | You may make, run and propagate covered works that you do not 165 | convey, without conditions so long as your license otherwise remains 166 | in force. You may convey covered works to others for the sole purpose 167 | of having them make modifications exclusively for you, or provide you 168 | with facilities for running those works, provided that you comply with 169 | the terms of this License in conveying all material for which you do 170 | not control copyright. Those thus making or running the covered works 171 | for you must do so exclusively on your behalf, under your direction 172 | and control, on terms that prohibit them from making any copies of 173 | your copyrighted material outside their relationship with you. 174 | 175 | Conveying under any other circumstances is permitted solely under 176 | the conditions stated below. Sublicensing is not allowed; section 10 177 | makes it unnecessary. 178 | 179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 180 | 181 | No covered work shall be deemed part of an effective technological 182 | measure under any applicable law fulfilling obligations under article 183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 184 | similar laws prohibiting or restricting circumvention of such 185 | measures. 186 | 187 | When you convey a covered work, you waive any legal power to forbid 188 | circumvention of technological measures to the extent such circumvention 189 | is effected by exercising rights under this License with respect to 190 | the covered work, and you disclaim any intention to limit operation or 191 | modification of the work as a means of enforcing, against the work's 192 | users, your or third parties' legal rights to forbid circumvention of 193 | technological measures. 194 | 195 | 4. Conveying Verbatim Copies. 196 | 197 | You may convey verbatim copies of the Program's source code as you 198 | receive it, in any medium, provided that you conspicuously and 199 | appropriately publish on each copy an appropriate copyright notice; 200 | keep intact all notices stating that this License and any 201 | non-permissive terms added in accord with section 7 apply to the code; 202 | keep intact all notices of the absence of any warranty; and give all 203 | recipients a copy of this License along with the Program. 204 | 205 | You may charge any price or no price for each copy that you convey, 206 | and you may offer support or warranty protection for a fee. 207 | 208 | 5. Conveying Modified Source Versions. 209 | 210 | You may convey a work based on the Program, or the modifications to 211 | produce it from the Program, in the form of source code under the 212 | terms of section 4, provided that you also meet all of these conditions: 213 | 214 | a) The work must carry prominent notices stating that you modified 215 | it, and giving a relevant date. 216 | 217 | b) The work must carry prominent notices stating that it is 218 | released under this License and any conditions added under section 219 | 7. This requirement modifies the requirement in section 4 to 220 | "keep intact all notices". 221 | 222 | c) You must license the entire work, as a whole, under this 223 | License to anyone who comes into possession of a copy. This 224 | License will therefore apply, along with any applicable section 7 225 | additional terms, to the whole of the work, and all its parts, 226 | regardless of how they are packaged. This License gives no 227 | permission to license the work in any other way, but it does not 228 | invalidate such permission if you have separately received it. 229 | 230 | d) If the work has interactive user interfaces, each must display 231 | Appropriate Legal Notices; however, if the Program has interactive 232 | interfaces that do not display Appropriate Legal Notices, your 233 | work need not make them do so. 234 | 235 | A compilation of a covered work with other separate and independent 236 | works, which are not by their nature extensions of the covered work, 237 | and which are not combined with it such as to form a larger program, 238 | in or on a volume of a storage or distribution medium, is called an 239 | "aggregate" if the compilation and its resulting copyright are not 240 | used to limit the access or legal rights of the compilation's users 241 | beyond what the individual works permit. Inclusion of a covered work 242 | in an aggregate does not cause this License to apply to the other 243 | parts of the aggregate. 244 | 245 | 6. Conveying Non-Source Forms. 246 | 247 | You may convey a covered work in object code form under the terms 248 | of sections 4 and 5, provided that you also convey the 249 | machine-readable Corresponding Source under the terms of this License, 250 | in one of these ways: 251 | 252 | a) Convey the object code in, or embodied in, a physical product 253 | (including a physical distribution medium), accompanied by the 254 | Corresponding Source fixed on a durable physical medium 255 | customarily used for software interchange. 256 | 257 | b) Convey the object code in, or embodied in, a physical product 258 | (including a physical distribution medium), accompanied by a 259 | written offer, valid for at least three years and valid for as 260 | long as you offer spare parts or customer support for that product 261 | model, to give anyone who possesses the object code either (1) a 262 | copy of the Corresponding Source for all the software in the 263 | product that is covered by this License, on a durable physical 264 | medium customarily used for software interchange, for a price no 265 | more than your reasonable cost of physically performing this 266 | conveying of source, or (2) access to copy the 267 | Corresponding Source from a network server at no charge. 268 | 269 | c) Convey individual copies of the object code with a copy of the 270 | written offer to provide the Corresponding Source. This 271 | alternative is allowed only occasionally and noncommercially, and 272 | only if you received the object code with such an offer, in accord 273 | with subsection 6b. 274 | 275 | d) Convey the object code by offering access from a designated 276 | place (gratis or for a charge), and offer equivalent access to the 277 | Corresponding Source in the same way through the same place at no 278 | further charge. You need not require recipients to copy the 279 | Corresponding Source along with the object code. If the place to 280 | copy the object code is a network server, the Corresponding Source 281 | may be on a different server (operated by you or a third party) 282 | that supports equivalent copying facilities, provided you maintain 283 | clear directions next to the object code saying where to find the 284 | Corresponding Source. Regardless of what server hosts the 285 | Corresponding Source, you remain obligated to ensure that it is 286 | available for as long as needed to satisfy these requirements. 287 | 288 | e) Convey the object code using peer-to-peer transmission, provided 289 | you inform other peers where the object code and Corresponding 290 | Source of the work are being offered to the general public at no 291 | charge under subsection 6d. 292 | 293 | A separable portion of the object code, whose source code is excluded 294 | from the Corresponding Source as a System Library, need not be 295 | included in conveying the object code work. 296 | 297 | A "User Product" is either (1) a "consumer product", which means any 298 | tangible personal property which is normally used for personal, family, 299 | or household purposes, or (2) anything designed or sold for incorporation 300 | into a dwelling. In determining whether a product is a consumer product, 301 | doubtful cases shall be resolved in favor of coverage. For a particular 302 | product received by a particular user, "normally used" refers to a 303 | typical or common use of that class of product, regardless of the status 304 | of the particular user or of the way in which the particular user 305 | actually uses, or expects or is expected to use, the product. A product 306 | is a consumer product regardless of whether the product has substantial 307 | commercial, industrial or non-consumer uses, unless such uses represent 308 | the only significant mode of use of the product. 309 | 310 | "Installation Information" for a User Product means any methods, 311 | procedures, authorization keys, or other information required to install 312 | and execute modified versions of a covered work in that User Product from 313 | a modified version of its Corresponding Source. The information must 314 | suffice to ensure that the continued functioning of the modified object 315 | code is in no case prevented or interfered with solely because 316 | modification has been made. 317 | 318 | If you convey an object code work under this section in, or with, or 319 | specifically for use in, a User Product, and the conveying occurs as 320 | part of a transaction in which the right of possession and use of the 321 | User Product is transferred to the recipient in perpetuity or for a 322 | fixed term (regardless of how the transaction is characterized), the 323 | Corresponding Source conveyed under this section must be accompanied 324 | by the Installation Information. But this requirement does not apply 325 | if neither you nor any third party retains the ability to install 326 | modified object code on the User Product (for example, the work has 327 | been installed in ROM). 328 | 329 | The requirement to provide Installation Information does not include a 330 | requirement to continue to provide support service, warranty, or updates 331 | for a work that has been modified or installed by the recipient, or for 332 | the User Product in which it has been modified or installed. Access to a 333 | network may be denied when the modification itself materially and 334 | adversely affects the operation of the network or violates the rules and 335 | protocols for communication across the network. 336 | 337 | Corresponding Source conveyed, and Installation Information provided, 338 | in accord with this section must be in a format that is publicly 339 | documented (and with an implementation available to the public in 340 | source code form), and must require no special password or key for 341 | unpacking, reading or copying. 342 | 343 | 7. Additional Terms. 344 | 345 | "Additional permissions" are terms that supplement the terms of this 346 | License by making exceptions from one or more of its conditions. 347 | Additional permissions that are applicable to the entire Program shall 348 | be treated as though they were included in this License, to the extent 349 | that they are valid under applicable law. If additional permissions 350 | apply only to part of the Program, that part may be used separately 351 | under those permissions, but the entire Program remains governed by 352 | this License without regard to the additional permissions. 353 | 354 | When you convey a copy of a covered work, you may at your option 355 | remove any additional permissions from that copy, or from any part of 356 | it. (Additional permissions may be written to require their own 357 | removal in certain cases when you modify the work.) You may place 358 | additional permissions on material, added by you to a covered work, 359 | for which you have or can give appropriate copyright permission. 360 | 361 | Notwithstanding any other provision of this License, for material you 362 | add to a covered work, you may (if authorized by the copyright holders of 363 | that material) supplement the terms of this License with terms: 364 | 365 | a) Disclaiming warranty or limiting liability differently from the 366 | terms of sections 15 and 16 of this License; or 367 | 368 | b) Requiring preservation of specified reasonable legal notices or 369 | author attributions in that material or in the Appropriate Legal 370 | Notices displayed by works containing it; or 371 | 372 | c) Prohibiting misrepresentation of the origin of that material, or 373 | requiring that modified versions of such material be marked in 374 | reasonable ways as different from the original version; or 375 | 376 | d) Limiting the use for publicity purposes of names of licensors or 377 | authors of the material; or 378 | 379 | e) Declining to grant rights under trademark law for use of some 380 | trade names, trademarks, or service marks; or 381 | 382 | f) Requiring indemnification of licensors and authors of that 383 | material by anyone who conveys the material (or modified versions of 384 | it) with contractual assumptions of liability to the recipient, for 385 | any liability that these contractual assumptions directly impose on 386 | those licensors and authors. 387 | 388 | All other non-permissive additional terms are considered "further 389 | restrictions" within the meaning of section 10. If the Program as you 390 | received it, or any part of it, contains a notice stating that it is 391 | governed by this License along with a term that is a further 392 | restriction, you may remove that term. If a license document contains 393 | a further restriction but permits relicensing or conveying under this 394 | License, you may add to a covered work material governed by the terms 395 | of that license document, provided that the further restriction does 396 | not survive such relicensing or conveying. 397 | 398 | If you add terms to a covered work in accord with this section, you 399 | must place, in the relevant source files, a statement of the 400 | additional terms that apply to those files, or a notice indicating 401 | where to find the applicable terms. 402 | 403 | Additional terms, permissive or non-permissive, may be stated in the 404 | form of a separately written license, or stated as exceptions; 405 | the above requirements apply either way. 406 | 407 | 8. Termination. 408 | 409 | You may not propagate or modify a covered work except as expressly 410 | provided under this License. Any attempt otherwise to propagate or 411 | modify it is void, and will automatically terminate your rights under 412 | this License (including any patent licenses granted under the third 413 | paragraph of section 11). 414 | 415 | However, if you cease all violation of this License, then your 416 | license from a particular copyright holder is reinstated (a) 417 | provisionally, unless and until the copyright holder explicitly and 418 | finally terminates your license, and (b) permanently, if the copyright 419 | holder fails to notify you of the violation by some reasonable means 420 | prior to 60 days after the cessation. 421 | 422 | Moreover, your license from a particular copyright holder is 423 | reinstated permanently if the copyright holder notifies you of the 424 | violation by some reasonable means, this is the first time you have 425 | received notice of violation of this License (for any work) from that 426 | copyright holder, and you cure the violation prior to 30 days after 427 | your receipt of the notice. 428 | 429 | Termination of your rights under this section does not terminate the 430 | licenses of parties who have received copies or rights from you under 431 | this License. If your rights have been terminated and not permanently 432 | reinstated, you do not qualify to receive new licenses for the same 433 | material under section 10. 434 | 435 | 9. Acceptance Not Required for Having Copies. 436 | 437 | You are not required to accept this License in order to receive or 438 | run a copy of the Program. Ancillary propagation of a covered work 439 | occurring solely as a consequence of using peer-to-peer transmission 440 | to receive a copy likewise does not require acceptance. However, 441 | nothing other than this License grants you permission to propagate or 442 | modify any covered work. These actions infringe copyright if you do 443 | not accept this License. Therefore, by modifying or propagating a 444 | covered work, you indicate your acceptance of this License to do so. 445 | 446 | 10. Automatic Licensing of Downstream Recipients. 447 | 448 | Each time you convey a covered work, the recipient automatically 449 | receives a license from the original licensors, to run, modify and 450 | propagate that work, subject to this License. You are not responsible 451 | for enforcing compliance by third parties with this License. 452 | 453 | An "entity transaction" is a transaction transferring control of an 454 | organization, or substantially all assets of one, or subdividing an 455 | organization, or merging organizations. If propagation of a covered 456 | work results from an entity transaction, each party to that 457 | transaction who receives a copy of the work also receives whatever 458 | licenses to the work the party's predecessor in interest had or could 459 | give under the previous paragraph, plus a right to possession of the 460 | Corresponding Source of the work from the predecessor in interest, if 461 | the predecessor has it or can get it with reasonable efforts. 462 | 463 | You may not impose any further restrictions on the exercise of the 464 | rights granted or affirmed under this License. For example, you may 465 | not impose a license fee, royalty, or other charge for exercise of 466 | rights granted under this License, and you may not initiate litigation 467 | (including a cross-claim or counterclaim in a lawsuit) alleging that 468 | any patent claim is infringed by making, using, selling, offering for 469 | sale, or importing the Program or any portion of it. 470 | 471 | 11. Patents. 472 | 473 | A "contributor" is a copyright holder who authorizes use under this 474 | License of the Program or a work on which the Program is based. The 475 | work thus licensed is called the contributor's "contributor version". 476 | 477 | A contributor's "essential patent claims" are all patent claims 478 | owned or controlled by the contributor, whether already acquired or 479 | hereafter acquired, that would be infringed by some manner, permitted 480 | by this License, of making, using, or selling its contributor version, 481 | but do not include claims that would be infringed only as a 482 | consequence of further modification of the contributor version. For 483 | purposes of this definition, "control" includes the right to grant 484 | patent sublicenses in a manner consistent with the requirements of 485 | this License. 486 | 487 | Each contributor grants you a non-exclusive, worldwide, royalty-free 488 | patent license under the contributor's essential patent claims, to 489 | make, use, sell, offer for sale, import and otherwise run, modify and 490 | propagate the contents of its contributor version. 491 | 492 | In the following three paragraphs, a "patent license" is any express 493 | agreement or commitment, however denominated, not to enforce a patent 494 | (such as an express permission to practice a patent or covenant not to 495 | sue for patent infringement). To "grant" such a patent license to a 496 | party means to make such an agreement or commitment not to enforce a 497 | patent against the party. 498 | 499 | If you convey a covered work, knowingly relying on a patent license, 500 | and the Corresponding Source of the work is not available for anyone 501 | to copy, free of charge and under the terms of this License, through a 502 | publicly available network server or other readily accessible means, 503 | then you must either (1) cause the Corresponding Source to be so 504 | available, or (2) arrange to deprive yourself of the benefit of the 505 | patent license for this particular work, or (3) arrange, in a manner 506 | consistent with the requirements of this License, to extend the patent 507 | license to downstream recipients. "Knowingly relying" means you have 508 | actual knowledge that, but for the patent license, your conveying the 509 | covered work in a country, or your recipient's use of the covered work 510 | in a country, would infringe one or more identifiable patents in that 511 | country that you have reason to believe are valid. 512 | 513 | If, pursuant to or in connection with a single transaction or 514 | arrangement, you convey, or propagate by procuring conveyance of, a 515 | covered work, and grant a patent license to some of the parties 516 | receiving the covered work authorizing them to use, propagate, modify 517 | or convey a specific copy of the covered work, then the patent license 518 | you grant is automatically extended to all recipients of the covered 519 | work and works based on it. 520 | 521 | A patent license is "discriminatory" if it does not include within 522 | the scope of its coverage, prohibits the exercise of, or is 523 | conditioned on the non-exercise of one or more of the rights that are 524 | specifically granted under this License. You may not convey a covered 525 | work if you are a party to an arrangement with a third party that is 526 | in the business of distributing software, under which you make payment 527 | to the third party based on the extent of your activity of conveying 528 | the work, and under which the third party grants, to any of the 529 | parties who would receive the covered work from you, a discriminatory 530 | patent license (a) in connection with copies of the covered work 531 | conveyed by you (or copies made from those copies), or (b) primarily 532 | for and in connection with specific products or compilations that 533 | contain the covered work, unless you entered into that arrangement, 534 | or that patent license was granted, prior to 28 March 2007. 535 | 536 | Nothing in this License shall be construed as excluding or limiting 537 | any implied license or other defenses to infringement that may 538 | otherwise be available to you under applicable patent law. 539 | 540 | 12. No Surrender of Others' Freedom. 541 | 542 | If conditions are imposed on you (whether by court order, agreement or 543 | otherwise) that contradict the conditions of this License, they do not 544 | excuse you from the conditions of this License. If you cannot convey a 545 | covered work so as to satisfy simultaneously your obligations under this 546 | License and any other pertinent obligations, then as a consequence you may 547 | not convey it at all. For example, if you agree to terms that obligate you 548 | to collect a royalty for further conveying from those to whom you convey 549 | the Program, the only way you could satisfy both those terms and this 550 | License would be to refrain entirely from conveying the Program. 551 | 552 | 13. Use with the GNU Affero General Public License. 553 | 554 | Notwithstanding any other provision of this License, you have 555 | permission to link or combine any covered work with a work licensed 556 | under version 3 of the GNU Affero General Public License into a single 557 | combined work, and to convey the resulting work. The terms of this 558 | License will continue to apply to the part which is the covered work, 559 | but the special requirements of the GNU Affero General Public License, 560 | section 13, concerning interaction through a network will apply to the 561 | combination as such. 562 | 563 | 14. Revised Versions of this License. 564 | 565 | The Free Software Foundation may publish revised and/or new versions of 566 | the GNU General Public License from time to time. Such new versions will 567 | be similar in spirit to the present version, but may differ in detail to 568 | address new problems or concerns. 569 | 570 | Each version is given a distinguishing version number. If the 571 | Program specifies that a certain numbered version of the GNU General 572 | Public License "or any later version" applies to it, you have the 573 | option of following the terms and conditions either of that numbered 574 | version or of any later version published by the Free Software 575 | Foundation. If the Program does not specify a version number of the 576 | GNU General Public License, you may choose any version ever published 577 | by the Free Software Foundation. 578 | 579 | If the Program specifies that a proxy can decide which future 580 | versions of the GNU General Public License can be used, that proxy's 581 | public statement of acceptance of a version permanently authorizes you 582 | to choose that version for the Program. 583 | 584 | Later license versions may give you additional or different 585 | permissions. However, no additional obligations are imposed on any 586 | author or copyright holder as a result of your choosing to follow a 587 | later version. 588 | 589 | 15. Disclaimer of Warranty. 590 | 591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 599 | 600 | 16. Limitation of Liability. 601 | 602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 610 | SUCH DAMAGES. 611 | 612 | 17. Interpretation of Sections 15 and 16. 613 | 614 | If the disclaimer of warranty and limitation of liability provided 615 | above cannot be given local legal effect according to their terms, 616 | reviewing courts shall apply local law that most closely approximates 617 | an absolute waiver of all civil liability in connection with the 618 | Program, unless a warranty or assumption of liability accompanies a 619 | copy of the Program in return for a fee. 620 | 621 | END OF TERMS AND CONDITIONS 622 | 623 | How to Apply These Terms to Your New Programs 624 | 625 | If you develop a new program, and you want it to be of the greatest 626 | possible use to the public, the best way to achieve this is to make it 627 | free software which everyone can redistribute and change under these terms. 628 | 629 | To do so, attach the following notices to the program. It is safest 630 | to attach them to the start of each source file to most effectively 631 | state the exclusion of warranty; and each file should have at least 632 | the "copyright" line and a pointer to where the full notice is found. 633 | 634 | 635 | Copyright (C) 636 | 637 | This program is free software: you can redistribute it and/or modify 638 | it under the terms of the GNU General Public License as published by 639 | the Free Software Foundation, either version 3 of the License, or 640 | (at your option) any later version. 641 | 642 | This program is distributed in the hope that it will be useful, 643 | but WITHOUT ANY WARRANTY; without even the implied warranty of 644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 645 | GNU General Public License for more details. 646 | 647 | You should have received a copy of the GNU General Public License 648 | along with this program. If not, see . 649 | 650 | Also add information on how to contact you by electronic and paper mail. 651 | 652 | If the program does terminal interaction, make it output a short 653 | notice like this when it starts in an interactive mode: 654 | 655 | Copyright (C) 656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 657 | This is free software, and you are welcome to redistribute it 658 | under certain conditions; type `show c' for details. 659 | 660 | The hypothetical commands `show w' and `show c' should show the appropriate 661 | parts of the General Public License. Of course, your program's commands 662 | might be different; for a GUI interface, you would use an "about box". 663 | 664 | You should also get your employer (if you work as a programmer) or school, 665 | if any, to sign a "copyright disclaimer" for the program, if necessary. 666 | For more information on this, and how to apply and follow the GNU GPL, see 667 | . 668 | 669 | The GNU General Public License does not permit incorporating your program 670 | into proprietary programs. If your program is a subroutine library, you 671 | may consider it more useful to permit linking proprietary applications with 672 | the library. If this is what you want to do, use the GNU Lesser General 673 | Public License instead of this License. But first, please read 674 | . 675 | --------------------------------------------------------------------------------