├── .github
├── FUNDING.yml
├── ISSUE_TEMPLATE
│ ├── bug_report.md
│ └── feature_request.md
├── actions
│ ├── docker-multi-login-action
│ │ └── action.yml
│ └── docker-target-image-list-action
│ │ └── action.yml
└── workflows
│ ├── docker.yaml
│ ├── invalid_template.yml
│ └── support.yml
├── .gitignore
├── CHANGELOG.md
├── Dockerfile
├── LICENSE
├── README.md
├── Varken.py
├── assets
├── varken_full_banner.jpg
├── varken_full_banner_transparent.png
├── varken_head_only transparent.png
├── varken_head_only_primary_discord.png
└── varken_original.jpg
├── data
└── varken.example.ini
├── docker-compose.yml
├── requirements.txt
├── utilities
├── grafana_build.py
└── historical_tautulli_import.py
├── varken.systemd
├── varken.xml
└── varken
├── __init__.py
├── dbmanager.py
├── helpers.py
├── iniparser.py
├── lidarr.py
├── ombi.py
├── radarr.py
├── sickchill.py
├── sonarr.py
├── structures.py
├── tautulli.py
├── unifi.py
└── varkenlogger.py
/.github/FUNDING.yml:
--------------------------------------------------------------------------------
1 | ko_fi: varken
2 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 | title: "[BUG]"
5 | labels: awaiting-triage
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Describe the bug**
11 | A clear and concise description of what the bug is.
12 |
13 | **To Reproduce**
14 | Steps to reproduce the behavior:
15 | 1. ...
16 | 2. ...
17 | 3. ...
18 | 4. ...
19 |
20 | **Expected behavior**
21 | A clear and concise description of what you expected to happen.
22 |
23 | **Screenshots**
24 | If applicable, add screenshots to help explain your problem.
25 |
26 | **Environment (please complete the following information):**
27 | - OS: [e.g. Ubuntu 18.04.1 or Docker:Tag]
28 | - Version [e.g. v1.1]
29 |
30 | **Additional context**
31 | Add any other context about the problem here.
32 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature request
3 | about: Suggest an idea for this project
4 | title: "[Feature Request]"
5 | labels: awaiting-triage
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Is your feature request related to a problem? Please describe.**
11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
12 |
13 | **Describe the solution you'd like**
14 | A clear and concise description of what you want to happen.
15 |
16 | **Describe alternatives you've considered**
17 | A clear and concise description of any alternative solutions or features you've considered.
18 |
19 | **Additional context**
20 | Add any other context or screenshots about the feature request here.
21 |
--------------------------------------------------------------------------------
/.github/actions/docker-multi-login-action/action.yml:
--------------------------------------------------------------------------------
1 | name: 'Docker Multi Login Action'
2 | description: 'Log in to dockerhub, quay, and github container registry'
3 | runs:
4 | using: "composite"
5 | steps:
6 | - shell: bash
7 | run: |
8 | echo "🔑 Logging into dockerhub..."
9 | if docker login --username ${{ fromJSON(env.secrets).DOCKERHUB_USERNAME }} --password ${{ fromJSON(env.secrets).DOCKERHUB_PASSWORD }} > /dev/null 2>&1; then
10 | echo "🎉 Login Succeeded!"
11 | fi
12 | - shell: bash
13 | run: |
14 | echo "🔑 Logging into quay.io..."
15 | if docker login quay.io --username ${{ fromJSON(env.secrets).QUAY_USERNAME }} --password ${{ fromJSON(env.secrets).QUAY_PASSWORD }} > /dev/null 2>&1; then
16 | echo "🎉 Login Succeeded!"
17 | fi
18 | - shell: bash
19 | run: |
20 | echo "🔑 Logging into ghcr.io..."
21 | if docker login ghcr.io --username ${{ fromJSON(env.secrets).GHCR_USERNAME }} --password ${{ fromJSON(env.secrets).GHCR_PASSWORD }} > /dev/null 2>&1; then
22 | echo "🎉 Login Succeeded!"
23 | fi
24 |
--------------------------------------------------------------------------------
/.github/actions/docker-target-image-list-action/action.yml:
--------------------------------------------------------------------------------
1 | name: 'Docker Target Image List Generator'
2 | description: 'A Github Action to generate a list of fully qualified target images for docker related steps'
3 | inputs:
4 | registries:
5 | description: "Comma separated list of docker registries"
6 | required: false
7 | default: "docker.io,quay.io,ghcr.io"
8 | images:
9 | description: "Comma separated list of images"
10 | required: true
11 | tags:
12 | description: "Comma separated list of image tags"
13 | required: false
14 | default: "edge"
15 | outputs:
16 | fully-qualified-target-images:
17 | description: "List of fully qualified docker target images"
18 | value: ${{ steps.gen-fqti.outputs.fully-qualified-target-images }}
19 | runs:
20 | using: "composite"
21 | steps:
22 | - name: Generate fully qualified docker target images
23 | id: gen-fqti
24 | shell: bash
25 | run: |
26 | IFS=',' read -r -a registries <<< "${{ inputs.registries }}"
27 | IFS=',' read -r -a images <<< "${{ inputs.images }}"
28 | IFS=',' read -r -a tags <<< "${{ inputs.tags }}"
29 | FQTI=""
30 | echo "Generating fully qualified docker target images for:"
31 | echo "🐋 Registries: ${#registries[@]}"
32 | echo "📷 Images: ${#images[@]}"
33 | echo "🏷️ Tags: ${#tags[@]}"
34 | echo "🧮 Total: $((${#registries[@]}*${#images[@]}*${#tags[@]}))"
35 | for registry in "${registries[@]}"; do
36 | for image in "${images[@]}"; do
37 | for tag in "${tags[@]}"; do
38 | if [ -z "$FQTI" ]; then
39 | FQTI="${registry}/${image}:${tag}"
40 | else
41 | FQTI="$FQTI,${registry}/${image}:${tag}"
42 | fi
43 | done
44 | done
45 | done
46 | echo ::set-output name=fully-qualified-target-images::${FQTI}
47 |
--------------------------------------------------------------------------------
/.github/workflows/docker.yaml:
--------------------------------------------------------------------------------
1 | name: varken
2 | on:
3 | schedule:
4 | - cron: '0 10 * * *'
5 | push:
6 | branches:
7 | - master
8 | - develop
9 | tags:
10 | - 'v*.*.*'
11 | paths:
12 | - '.github/workflows/docker.yaml'
13 | - 'varken/**'
14 | - 'Varken.py'
15 | - 'Dockerfile'
16 | pull_request:
17 | branches:
18 | - master
19 | - develop
20 | paths:
21 | - '.github/workflows/docker.yaml'
22 | - 'varken/**'
23 | - 'Varken.py'
24 | - 'Dockerfile'
25 | workflow_dispatch:
26 | inputs:
27 | tag:
28 | description: 'Use this tag instead of most recent'
29 | required: false
30 | ignore-existing-tag:
31 | description: 'Ignore existing tag if "true"'
32 | required: false
33 | env:
34 | IMAGES: boerderij/varken
35 | PLATFORMS: "linux/amd64,linux/arm64,linux/arm/v7"
36 | jobs:
37 | lint-and-test:
38 | runs-on: ubuntu-latest
39 | steps:
40 | - name: Checkout
41 | uses: actions/checkout@v2
42 | - name: Setup Python
43 | uses: actions/setup-python@v2
44 | with:
45 | python-version: '3.x'
46 | - name: Lint
47 | run: pip install flake8 && flake8 --max-line-length 120 Varken.py varken/*.py
48 | build:
49 | runs-on: ubuntu-latest
50 | needs: lint-and-test
51 | steps:
52 | - name: Checkout
53 | uses: actions/checkout@v2
54 | - name: Prepare
55 | id: prep
56 | run: |
57 | VERSION=edge
58 | if [[ $GITHUB_REF == refs/tags/* ]]; then
59 | VERSION=${GITHUB_REF#refs/tags/v}
60 | fi
61 | if [ "${{ github.event_name }}" = "schedule" ]; then
62 | VERSION=nightly
63 | fi
64 | if [[ ${GITHUB_REF##*/} == "develop" ]]; then
65 | VERSION=develop
66 | fi
67 | TAGS="${VERSION}"
68 | if [[ $VERSION =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
69 | TAGS="$TAGS,latest"
70 | fi
71 | echo ::set-output name=version::${VERSION}
72 | echo ::set-output name=tags::${TAGS}
73 | echo ::set-output name=branch::${GITHUB_REF##*/}
74 | echo ::set-output name=build_date::$(date -u +'%Y-%m-%dT%H:%M:%SZ')
75 | echo ::set-output name=vcs_ref::${GITHUB_SHA::8}
76 | - uses: ./.github/actions/docker-target-image-list-action
77 | name: Generate Target Images
78 | id: gen-tags
79 | with:
80 | images: ${{ env.IMAGES }}
81 | tags: ${{ steps.prep.outputs.tags }}
82 | - name: Set up QEMU
83 | uses: docker/setup-qemu-action@v1
84 | with:
85 | platforms: ${{ env.PLATFORMS }}
86 | - name: Set up Docker Buildx
87 | uses: docker/setup-buildx-action@v1
88 | with:
89 | install: true
90 | version: latest
91 | driver-opts: image=moby/buildkit:master
92 | - name: Docker Multi Login
93 | uses: ./.github/actions/docker-multi-login-action
94 | env:
95 | secrets: ${{ toJSON(secrets) }}
96 | - name: Build and Push
97 | uses: docker/build-push-action@v2
98 | with:
99 | context: .
100 | file: ./Dockerfile
101 | platforms: ${{ env.PLATFORMS }}
102 | pull: true
103 | push: ${{ github.event_name != 'pull_request' }}
104 | tags: ${{ steps.gen-tags.outputs.fully-qualified-target-images }}
105 | build-args: |
106 | VERSION=${{ steps.prep.outputs.version }}
107 | BRANCH=${{ steps.prep.outputs.branch }}
108 | BUILD_DATE=${{ steps.prep.outputs.build_date }}
109 | VCS_REF=${{ steps.prep.outputs.vcs_ref }}
110 | - name: Inspect
111 | if: ${{ github.event_name != 'pull_request' }}
112 | run: |
113 | IFS=',' read -r -a images <<< "${{ steps.gen-tags.outputs.fully-qualified-target-images }}"
114 | for image in "${images[@]}"; do
115 | docker buildx imagetools inspect ${image}
116 | done
117 |
--------------------------------------------------------------------------------
/.github/workflows/invalid_template.yml:
--------------------------------------------------------------------------------
1 | name: 'Invalid Template'
2 |
3 | on:
4 | issues:
5 | types: [labeled, unlabeled, reopened]
6 |
7 | jobs:
8 | support:
9 | runs-on: ubuntu-latest
10 | steps:
11 | - uses: dessant/support-requests@v2
12 | with:
13 | github-token: ${{ github.token }}
14 | support-label: 'invalid:template-incomplete'
15 | issue-comment: >
16 | :wave: @{issue-author}, please edit your issue and follow the template provided.
17 | close-issue: false
18 | lock-issue: false
19 | issue-lock-reason: 'resolved'
20 |
--------------------------------------------------------------------------------
/.github/workflows/support.yml:
--------------------------------------------------------------------------------
1 | name: 'Support Request'
2 |
3 | on:
4 | issues:
5 | types: [labeled, unlabeled, reopened]
6 |
7 | jobs:
8 | support:
9 | runs-on: ubuntu-latest
10 | steps:
11 | - uses: dessant/support-requests@v2
12 | with:
13 | github-token: ${{ github.token }}
14 | support-label: 'support'
15 | issue-comment: >
16 | :wave: @{issue-author}, we use the issue tracker exclusively
17 | for bug reports and feature requests. However, this issue appears
18 | to be a support request. Please use our support channels
19 | to get help with Varken!
20 |
21 | - [Discord](https://discord.gg/VjZ6qSM)
22 | - [Discord Quick Access](http://cyborg.decreator.dev/channels/518970285773422592/530424560504537105/)
23 | close-issue: true
24 | lock-issue: false
25 | issue-lock-reason: 'off-topic'
26 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 | .DS_Store?
3 | ._*
4 | .Spotlight-V100
5 | .Trashes
6 | ehthumbs.db
7 | Thumbs.db
8 | GeoLite2-City.mmdb
9 | GeoLite2-City.tar.gz
10 | data/varken.ini
11 | .idea/
12 | varken-venv/
13 | venv/
14 | logs/
15 | __pycache__
16 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # Change Log
2 |
3 | ## [v1.7.7](https://github.com/Boerderij/Varken/tree/v1.7.7) (2020-12-21)
4 | [Full Changelog](https://github.com/Boerderij/Varken/compare/1.7.6...v1.7.7)
5 |
6 | **Implemented enhancements:**
7 | - \[Enhancement\] Ombi 4.0 compatibility [\#186](https://github.com/Boerderij/Varken/issues/186)
8 | ([samwiseg0](https://github.com/samwiseg0))
9 |
10 | **Merged pull requests:**
11 |
12 | - v1.7.7 Merge [\#191](https://github.com/Boerderij/Varken/pull/191)
13 | ([DirtyCajunRice](https://github.com/DirtyCajunRice))
14 | - Type Error fix [\#177](https://github.com/Boerderij/Varken/pull/177)
15 | ([derek-miller](https://github.com/derek-miller))
16 |
17 | **Fixed bugs:**
18 |
19 | - \[BUG\] Influxdb exit code [\#174](https://github.com/Boerderij/Varken/issues/174)
20 | ([samwiseg0](https://github.com/samwiseg0))
21 |
22 | **Notes:**
23 | - Now built via github actions
24 | - Available on ghcr, quay.io, and dockerhub
25 | - Nightly builds done to accommodate dependabot MRs
26 |
27 | ## [v1.7.6](https://github.com/Boerderij/Varken/tree/v1.7.6) (2020-01-01)
28 | [Full Changelog](https://github.com/Boerderij/Varken/compare/1.7.5...v1.7.6)
29 |
30 | **Merged pull requests:**
31 |
32 | - v1.7.6 Merge [\#165](https://github.com/Boerderij/Varken/pull/165) ([samwiseg0](https://github.com/samwiseg0))
33 |
34 | **Fixed bugs:**
35 |
36 | - \[BUG\] Geolite database download failing [\#164](https://github.com/Boerderij/Varken/issues/164)
37 |
38 | **Notes:**
39 | - A MaxMind license key will be required in order to download the GeoLite2 DB. Please see the [wiki](https://wiki.cajun.pro/link/5#bkmrk-maxmind) for more details.
40 |
41 | ## [v1.7.5](https://github.com/Boerderij/Varken/tree/v1.7.5) (2019-12-11)
42 | [Full Changelog](https://github.com/Boerderij/Varken/compare/1.7.4...v1.7.5)
43 |
44 | **Merged pull requests:**
45 |
46 | - v1.7.5 Merge [\#162](https://github.com/Boerderij/Varken/pull/162) ([DirtyCajunRice](https://github.com/DirtyCajunRice))
47 | - Add helper itemgetter function for TautulliStream fields [\#157](https://github.com/Boerderij/Varken/pull/157) ([JonnyWong16](https://github.com/JonnyWong16))
48 | - Fix to only use NamedTuple fields from TautulliStream [\#156](https://github.com/Boerderij/Varken/pull/156) ([JonnyWong16](https://github.com/JonnyWong16))
49 |
50 | ## [1.7.4](https://github.com/Boerderij/Varken/tree/1.7.4) (2019-10-07)
51 | [Full Changelog](https://github.com/Boerderij/Varken/compare/1.7.3...1.7.4)
52 |
53 | **Implemented enhancements:**
54 |
55 | - \[Enhancement\] Update Tautulli structures to include grandparent\_guid and parent\_guid [\#154](https://github.com/Boerderij/Varken/issues/154)
56 | - \[Enhancement\] Update Tautulli structures to reflect recent changes [\#153](https://github.com/Boerderij/Varken/issues/153)
57 |
58 | **Merged pull requests:**
59 |
60 | - v1.7.4 Merge [\#155](https://github.com/Boerderij/Varken/pull/155) ([DirtyCajunRice](https://github.com/DirtyCajunRice))
61 |
62 | ## [1.7.3](https://github.com/Boerderij/Varken/tree/1.7.3) (2019-08-09)
63 | [Full Changelog](https://github.com/Boerderij/Varken/compare/1.7.2...1.7.3)
64 |
65 | **Implemented enhancements:**
66 |
67 | - \#141 Take monitored status into account for Missing Available Movies check [\#145](https://github.com/Boerderij/Varken/pull/145) ([mikeporterdev](https://github.com/mikeporterdev))
68 |
69 | **Fixed bugs:**
70 |
71 | - \[BUG\] Varken Crashes when ini is read only [\#146](https://github.com/Boerderij/Varken/issues/146)
72 | - \[BUG\] Missing Available Movies/TV Shows does not take Monitored status into account [\#141](https://github.com/Boerderij/Varken/issues/141)
73 |
74 | **Closed issues:**
75 |
76 | - \[Feature Request\] Medusa Support [\#148](https://github.com/Boerderij/Varken/issues/148)
77 |
78 | **Merged pull requests:**
79 |
80 | - v1.7.3 Merge [\#149](https://github.com/Boerderij/Varken/pull/149) ([DirtyCajunRice](https://github.com/DirtyCajunRice))
81 |
82 | ## [1.7.2](https://github.com/Boerderij/Varken/tree/1.7.2) (2019-06-24)
83 | [Full Changelog](https://github.com/Boerderij/Varken/compare/1.7.1...1.7.2)
84 |
85 | **Implemented enhancements:**
86 |
87 | - Allow configuration via environment variables [\#137](https://github.com/Boerderij/Varken/issues/137)
88 |
89 | **Fixed bugs:**
90 |
91 | - \[BUG\] logger invoked before initialization in dbmanager [\#138](https://github.com/Boerderij/Varken/issues/138)
92 |
93 | **Merged pull requests:**
94 |
95 | - v1.7.2 Merge [\#144](https://github.com/Boerderij/Varken/pull/144) ([DirtyCajunRice](https://github.com/DirtyCajunRice))
96 |
97 | ## [1.7.1](https://github.com/Boerderij/Varken/tree/1.7.1) (2019-06-04)
98 | [Full Changelog](https://github.com/Boerderij/Varken/compare/1.7.0...1.7.1)
99 |
100 | **Fixed bugs:**
101 |
102 | - \[BUG\] Sonarr Missing episodes column ordering is incorrect [\#133](https://github.com/Boerderij/Varken/pull/133) ([nicolerenee](https://github.com/nicolerenee))
103 |
104 | **Merged pull requests:**
105 |
106 | - v1.7.1 Merge [\#134](https://github.com/Boerderij/Varken/pull/134) ([DirtyCajunRice](https://github.com/DirtyCajunRice))
107 |
108 | ## [1.7.0](https://github.com/Boerderij/Varken/tree/1.7.0) (2019-05-06)
109 | [Full Changelog](https://github.com/Boerderij/Varken/compare/1.6.8...1.7.0)
110 |
111 | **Implemented enhancements:**
112 |
113 | - \[ENHANCEMENT\] Add album and track totals to artist library from Tautulli [\#127](https://github.com/Boerderij/Varken/issues/127)
114 | - \[Feature Request\] No way to show music album / track count [\#125](https://github.com/Boerderij/Varken/issues/125)
115 |
116 | **Fixed bugs:**
117 |
118 | - \[BUG\] Invalid retention policy name causing retention policy creation failure [\#129](https://github.com/Boerderij/Varken/issues/129)
119 | - \[BUG\] Unifi errors on unnamed devices [\#126](https://github.com/Boerderij/Varken/issues/126)
120 |
121 | **Merged pull requests:**
122 |
123 | - v1.7.0 Merge [\#131](https://github.com/Boerderij/Varken/pull/131) ([DirtyCajunRice](https://github.com/DirtyCajunRice))
124 |
125 | ## [1.6.8](https://github.com/Boerderij/Varken/tree/1.6.8) (2019-04-19)
126 | [Full Changelog](https://github.com/Boerderij/Varken/compare/1.6.7...1.6.8)
127 |
128 | **Implemented enhancements:**
129 |
130 | - \[Enhancement\] Only drop the invalid episodes from sonarr [\#121](https://github.com/Boerderij/Varken/issues/121)
131 |
132 | **Merged pull requests:**
133 |
134 | - v1.6.8 Merge [\#122](https://github.com/Boerderij/Varken/pull/122) ([DirtyCajunRice](https://github.com/DirtyCajunRice))
135 |
136 | ## [1.6.7](https://github.com/Boerderij/Varken/tree/1.6.7) (2019-04-18)
137 | [Full Changelog](https://github.com/Boerderij/Varken/compare/1.6.6...1.6.7)
138 |
139 | **Implemented enhancements:**
140 |
141 | - \[BUG\] Ombi null childRequest output [\#119](https://github.com/Boerderij/Varken/issues/119)
142 | - \[ENHANCEMENT\] Invalid entries in Sonarr's queue leaves varken unable to process the rest of the queue [\#117](https://github.com/Boerderij/Varken/issues/117)
143 |
144 | **Merged pull requests:**
145 |
146 | - v1.6.7 Merge [\#120](https://github.com/Boerderij/Varken/pull/120) ([DirtyCajunRice](https://github.com/DirtyCajunRice))
147 |
148 | ## [1.6.6](https://github.com/Boerderij/Varken/tree/1.6.6) (2019-03-12)
149 | [Full Changelog](https://github.com/Boerderij/Varken/compare/1.6.5...1.6.6)
150 |
151 | **Fixed bugs:**
152 |
153 | - \[BUG\] TZDATA issue in docker images [\#112](https://github.com/Boerderij/Varken/issues/112)
154 | - \[BUG\] Unifi job does not try again after failure [\#107](https://github.com/Boerderij/Varken/issues/107)
155 | - \[BUG\] Catch ChunkError [\#106](https://github.com/Boerderij/Varken/issues/106)
156 |
157 | **Merged pull requests:**
158 |
159 | - v1.6.6 Merge [\#116](https://github.com/Boerderij/Varken/pull/116) ([samwiseg0](https://github.com/samwiseg0))
160 |
161 | ## [1.6.5](https://github.com/Boerderij/Varken/tree/1.6.5) (2019-03-11)
162 | [Full Changelog](https://github.com/Boerderij/Varken/compare/v1.6.4...1.6.5)
163 |
164 | **Implemented enhancements:**
165 |
166 | - \[Feature Request\] Add new "relayed" and "secure" to Tautulli data pushed to influx [\#114](https://github.com/Boerderij/Varken/issues/114)
167 | - \[BUG\] Changes to Tautulli breaks Varken `TypeError` `Secure` `relayed` [\#111](https://github.com/Boerderij/Varken/issues/111)
168 |
169 | **Fixed bugs:**
170 |
171 | - \[BUG\] Handle GeoIP Downloads better [\#113](https://github.com/Boerderij/Varken/issues/113)
172 | - \[BUG\] - "None" outputted to stdout many times with no benefit? [\#105](https://github.com/Boerderij/Varken/issues/105)
173 | - \[BUG\] windows file open error [\#104](https://github.com/Boerderij/Varken/issues/104)
174 | - \[BUG\] Not catching DB url resolve [\#103](https://github.com/Boerderij/Varken/issues/103)
175 |
176 | **Merged pull requests:**
177 |
178 | - v1.6.5 Merge [\#115](https://github.com/Boerderij/Varken/pull/115) ([samwiseg0](https://github.com/samwiseg0))
179 |
180 | ## [v1.6.4](https://github.com/Boerderij/Varken/tree/v1.6.4) (2019-02-04)
181 | [Full Changelog](https://github.com/Boerderij/Varken/compare/1.6.3...v1.6.4)
182 |
183 | **Fixed bugs:**
184 |
185 | - \[BUG\] fstring in Varken.py Doesnt allow py version check [\#102](https://github.com/Boerderij/Varken/issues/102)
186 | - \[BUG\] Unifi loadavg is str instead of float [\#101](https://github.com/Boerderij/Varken/issues/101)
187 | - \[BUG\] requestedByAlias to added to Ombi structures [\#97](https://github.com/Boerderij/Varken/issues/97)
188 |
189 | **Merged pull requests:**
190 |
191 | - v1.6.4 Merge [\#100](https://github.com/Boerderij/Varken/pull/100) ([DirtyCajunRice](https://github.com/DirtyCajunRice))
192 |
193 | ## [1.6.3](https://github.com/Boerderij/Varken/tree/1.6.3) (2019-01-16)
194 | [Full Changelog](https://github.com/Boerderij/Varken/compare/v1.6.2...1.6.3)
195 |
196 | **Implemented enhancements:**
197 |
198 | - \[Feature Request\] ARM, ARMHF and ARM64 Docker Images [\#71](https://github.com/Boerderij/Varken/issues/71)
199 |
200 | **Fixed bugs:**
201 |
202 | - \[BUG\] Newer influxdb has timeouts and connection errors [\#93](https://github.com/Boerderij/Varken/issues/93)
203 |
204 | **Merged pull requests:**
205 |
206 | - double typo [\#96](https://github.com/Boerderij/Varken/pull/96) ([DirtyCajunRice](https://github.com/DirtyCajunRice))
207 | - tweaks [\#95](https://github.com/Boerderij/Varken/pull/95) ([DirtyCajunRice](https://github.com/DirtyCajunRice))
208 | - v1.6.3 Merge [\#94](https://github.com/Boerderij/Varken/pull/94) ([DirtyCajunRice](https://github.com/DirtyCajunRice))
209 |
210 | ## [v1.6.2](https://github.com/Boerderij/Varken/tree/v1.6.2) (2019-01-12)
211 | [Full Changelog](https://github.com/Boerderij/Varken/compare/v1.6.1...v1.6.2)
212 |
213 | **Fixed bugs:**
214 |
215 | - Rectify influxdb ini [\#91](https://github.com/Boerderij/Varken/issues/91)
216 |
217 | **Merged pull requests:**
218 |
219 | - v1.6.2 Merge [\#92](https://github.com/Boerderij/Varken/pull/92) ([DirtyCajunRice](https://github.com/DirtyCajunRice))
220 |
221 | ## [v1.6.1](https://github.com/Boerderij/Varken/tree/v1.6.1) (2019-01-12)
222 | [Full Changelog](https://github.com/Boerderij/Varken/compare/v1.6...v1.6.1)
223 |
224 | **Implemented enhancements:**
225 |
226 | - \[Feature Request\] Unifi Integration [\#79](https://github.com/Boerderij/Varken/issues/79)
227 |
228 | **Fixed bugs:**
229 |
230 | - \[BUG\] Unexpected keyword argument 'langCode' while creating OmbiMovieRequest structure [\#88](https://github.com/Boerderij/Varken/issues/88)
231 |
232 | **Closed issues:**
233 |
234 | - Remove Cisco ASA since Telegraf + SNMP can do the same [\#86](https://github.com/Boerderij/Varken/issues/86)
235 |
236 | **Merged pull requests:**
237 |
238 | - v1.6.1 Merge [\#90](https://github.com/Boerderij/Varken/pull/90) ([DirtyCajunRice](https://github.com/DirtyCajunRice))
239 |
240 | ## [v1.6](https://github.com/Boerderij/Varken/tree/v1.6) (2019-01-04)
241 | [Full Changelog](https://github.com/Boerderij/Varken/compare/v1.5...v1.6)
242 |
243 | **Implemented enhancements:**
244 |
245 | - \[Feature Request\] docker-compose stack install option [\#84](https://github.com/Boerderij/Varken/issues/84)
246 | - Fix missing variables in varken.ini automatically [\#81](https://github.com/Boerderij/Varken/issues/81)
247 | - Create Wiki for FAQ and help docs [\#80](https://github.com/Boerderij/Varken/issues/80)
248 |
249 | **Fixed bugs:**
250 |
251 | - \[BUG\] url:port does not filter [\#82](https://github.com/Boerderij/Varken/issues/82)
252 |
253 | **Merged pull requests:**
254 |
255 | - v1.6 Merge [\#85](https://github.com/Boerderij/Varken/pull/85) ([DirtyCajunRice](https://github.com/DirtyCajunRice))
256 |
257 | ## [v1.5](https://github.com/Boerderij/Varken/tree/v1.5) (2018-12-30)
258 | [Full Changelog](https://github.com/Boerderij/Varken/compare/v1.4...v1.5)
259 |
260 | **Implemented enhancements:**
261 |
262 | - \[Feature Request\] Add issues from Ombi [\#70](https://github.com/Boerderij/Varken/issues/70)
263 | - Replace static grafana configs with a Public Example [\#32](https://github.com/Boerderij/Varken/issues/32)
264 |
265 | **Fixed bugs:**
266 |
267 | - \[BUG\] unexpected keyword argument 'channel\_icon' [\#73](https://github.com/Boerderij/Varken/issues/73)
268 | - \[BUG\] Unexpected keyword argument 'addOptions' [\#68](https://github.com/Boerderij/Varken/issues/68)
269 |
270 | **Merged pull requests:**
271 |
272 | - v1.5 Merge [\#75](https://github.com/Boerderij/Varken/pull/75) ([DirtyCajunRice](https://github.com/DirtyCajunRice))
273 | - Add Ombi Issues [\#74](https://github.com/Boerderij/Varken/pull/74) ([anderssonoscar0](https://github.com/anderssonoscar0))
274 |
275 | ## [v1.4](https://github.com/Boerderij/Varken/tree/v1.4) (2018-12-19)
276 | [Full Changelog](https://github.com/Boerderij/Varken/compare/v1.1...v1.4)
277 |
278 | **Implemented enhancements:**
279 |
280 | - \[Feature Request\] Add tautulli request for library stats [\#64](https://github.com/Boerderij/Varken/issues/64)
281 | - Create randomized 12-24 hour checks to update GeoLite DB after the first wednesday of the month [\#60](https://github.com/Boerderij/Varken/issues/60)
282 | - \[Feature Request\]: Pull list of requests \(instead of just counts\) [\#58](https://github.com/Boerderij/Varken/issues/58)
283 | - Feature Request , Add Sickchill [\#48](https://github.com/Boerderij/Varken/issues/48)
284 |
285 | **Fixed bugs:**
286 |
287 | - \[BUG\] Ombi all requests missing half of "pending" option [\#63](https://github.com/Boerderij/Varken/issues/63)
288 | - \[BUG\] asa bug with checking for apikey [\#62](https://github.com/Boerderij/Varken/issues/62)
289 | - \[BUG\] Add Catchall to ombi requests [\#59](https://github.com/Boerderij/Varken/issues/59)
290 |
291 | **Closed issues:**
292 |
293 | - Unify naming and cleanup duplication in iniparser [\#61](https://github.com/Boerderij/Varken/issues/61)
294 |
295 | **Merged pull requests:**
296 |
297 | - v1.4 Merge [\#65](https://github.com/Boerderij/Varken/pull/65) ([DirtyCajunRice](https://github.com/DirtyCajunRice))
298 |
299 | ## [v1.1](https://github.com/Boerderij/Varken/tree/v1.1) (2018-12-11)
300 | [Full Changelog](https://github.com/Boerderij/Varken/compare/v1.0...v1.1)
301 |
302 | **Implemented enhancements:**
303 |
304 | - Convert missing available to True False [\#54](https://github.com/Boerderij/Varken/issues/54)
305 | - Handle invalid config better and log it [\#51](https://github.com/Boerderij/Varken/issues/51)
306 | - Feature Request - Include value from Radarr [\#50](https://github.com/Boerderij/Varken/issues/50)
307 | - Change true/false to 0/1 for missing movies [\#47](https://github.com/Boerderij/Varken/issues/47)
308 |
309 | **Fixed bugs:**
310 |
311 | - \[BUG\] Time does not update from "today" [\#56](https://github.com/Boerderij/Varken/issues/56)
312 | - geoip\_download does not account for moving data folder [\#46](https://github.com/Boerderij/Varken/issues/46)
313 |
314 | **Closed issues:**
315 |
316 | - Initial startup requires admin access to InfluxDB [\#53](https://github.com/Boerderij/Varken/issues/53)
317 |
318 | **Merged pull requests:**
319 |
320 | - v1.1 Merge [\#57](https://github.com/Boerderij/Varken/pull/57) ([DirtyCajunRice](https://github.com/DirtyCajunRice))
321 | - Update issue templates [\#55](https://github.com/Boerderij/Varken/pull/55) ([DirtyCajunRice](https://github.com/DirtyCajunRice))
322 |
323 | ## [v1.0](https://github.com/Boerderij/Varken/tree/v1.0) (2018-12-10)
324 | [Full Changelog](https://github.com/Boerderij/Varken/compare/v0.1...v1.0)
325 |
326 | **Implemented enhancements:**
327 |
328 | - Add cisco asa from legacy [\#44](https://github.com/Boerderij/Varken/issues/44)
329 | - Add server ID to ombi to differenciate [\#43](https://github.com/Boerderij/Varken/issues/43)
330 | - Create Changelog for nightly release [\#39](https://github.com/Boerderij/Varken/issues/39)
331 | - Create proper logging [\#34](https://github.com/Boerderij/Varken/issues/34)
332 |
333 | **Closed issues:**
334 |
335 | - Remove "dashboard" folder and subfolders [\#42](https://github.com/Boerderij/Varken/issues/42)
336 | - Remove "Legacy" folder [\#41](https://github.com/Boerderij/Varken/issues/41)
337 | - Create the DB if it does not exist. [\#38](https://github.com/Boerderij/Varken/issues/38)
338 | - create systemd examples [\#37](https://github.com/Boerderij/Varken/issues/37)
339 | - Create a GeoIP db downloader and refresher [\#36](https://github.com/Boerderij/Varken/issues/36)
340 | - Create unique IDs for all scripts to prevent duplicate data [\#35](https://github.com/Boerderij/Varken/issues/35)
341 | - use a config.ini instead of command-line flags [\#33](https://github.com/Boerderij/Varken/issues/33)
342 | - Migrate crontab to python schedule package [\#31](https://github.com/Boerderij/Varken/issues/31)
343 | - Consolidate missing and missing\_days in sonarr.py [\#30](https://github.com/Boerderij/Varken/issues/30)
344 | - Ombi something new \[Request\] [\#26](https://github.com/Boerderij/Varken/issues/26)
345 | - Support for Linux without ASA [\#21](https://github.com/Boerderij/Varken/issues/21)
346 |
347 | **Merged pull requests:**
348 |
349 | - v1.0 Merge [\#45](https://github.com/Boerderij/Varken/pull/45) ([DirtyCajunRice](https://github.com/DirtyCajunRice))
350 | - varken to nightly [\#40](https://github.com/Boerderij/Varken/pull/40) ([DirtyCajunRice](https://github.com/DirtyCajunRice))
351 |
352 | ## [v0.1](https://github.com/Boerderij/Varken/tree/v0.1) (2018-10-20)
353 | **Implemented enhancements:**
354 |
355 | - The address 172.17.0.1 is not in the database. [\#17](https://github.com/Boerderij/Varken/issues/17)
356 | - Local streams aren't showing with Tautulli [\#16](https://github.com/Boerderij/Varken/issues/16)
357 | - Worldmap panel [\#15](https://github.com/Boerderij/Varken/issues/15)
358 |
359 | **Closed issues:**
360 |
361 | - Issues with scripts [\#12](https://github.com/Boerderij/Varken/issues/12)
362 | - issue with new tautulli.py [\#10](https://github.com/Boerderij/Varken/issues/10)
363 | - ombi.py fails when attempting to update influxdb [\#9](https://github.com/Boerderij/Varken/issues/9)
364 | - GeoIP Going to Break July 1st [\#8](https://github.com/Boerderij/Varken/issues/8)
365 | - \[Request\] Documentation / How-to Guide [\#1](https://github.com/Boerderij/Varken/issues/1)
366 |
367 | **Merged pull requests:**
368 |
369 | - v0.1 [\#20](https://github.com/Boerderij/Varken/pull/20) ([samwiseg0](https://github.com/samwiseg0))
370 | - Added selfplug [\#19](https://github.com/Boerderij/Varken/pull/19) ([Roxedus](https://github.com/Roxedus))
371 | - Major rework of the scripts [\#14](https://github.com/Boerderij/Varken/pull/14) ([samwiseg0](https://github.com/samwiseg0))
372 | - fix worldmap after change to maxmind local db [\#11](https://github.com/Boerderij/Varken/pull/11) ([madbuda](https://github.com/madbuda))
373 | - Update sonarr.py [\#7](https://github.com/Boerderij/Varken/pull/7) ([ghost](https://github.com/ghost))
374 | - Create crontabs [\#6](https://github.com/Boerderij/Varken/pull/6) ([ghost](https://github.com/ghost))
375 | - update plex\_dashboard.json [\#5](https://github.com/Boerderij/Varken/pull/5) ([ghost](https://github.com/ghost))
376 | - Update README.md [\#4](https://github.com/Boerderij/Varken/pull/4) ([ghost](https://github.com/ghost))
377 | - added sickrage portion [\#3](https://github.com/Boerderij/Varken/pull/3) ([ghost](https://github.com/ghost))
378 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.9.1-alpine
2 |
3 | ENV DEBUG="True" \
4 | DATA_FOLDER="/config" \
5 | VERSION="0.0.0" \
6 | BRANCH="edge" \
7 | BUILD_DATE="1/1/1970"
8 |
9 | LABEL maintainer="dirtycajunrice,samwiseg0" \
10 | org.opencontainers.image.created=$BUILD_DATE \
11 | org.opencontainers.image.url="https://github.com/Boerderij/Varken" \
12 | org.opencontainers.image.source="https://github.com/Boerderij/Varken" \
13 | org.opencontainers.image.version=$VERSION \
14 | org.opencontainers.image.revision=$VCS_REF \
15 | org.opencontainers.image.vendor="boerderij" \
16 | org.opencontainers.image.title="varken" \
17 | org.opencontainers.image.description="Varken is a standalone application to aggregate data from the Plex ecosystem into InfluxDB using Grafana for a frontend" \
18 | org.opencontainers.image.licenses="MIT"
19 |
20 | WORKDIR /app
21 |
22 | COPY /requirements.txt /Varken.py /app/
23 |
24 | COPY /varken /app/varken
25 |
26 | COPY /data /app/data
27 |
28 | COPY /utilities /app/data/utilities
29 |
30 | RUN \
31 | apk add --no-cache tzdata \
32 | && pip install --no-cache-dir -r /app/requirements.txt \
33 | && sed -i "s/0.0.0/${VERSION}/;s/develop/${BRANCH}/;s/1\/1\/1970/${BUILD_DATE//\//\\/}/" varken/__init__.py
34 |
35 | CMD cp /app/data/varken.example.ini /config/varken.example.ini && python3 /app/Varken.py
36 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2018 Boerderij
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | [](https://github.com/Boerderij/Varken/actions?query=workflow%3Avarken)
6 | [](https://discord.gg/VjZ6qSM)
7 | [](https://ko-fi.com/varken)
8 | [](https://microbadger.com/images/boerderij/varken)
9 | [](https://github.com/Boerderij/Varken/releases/latest)
10 | [](https://hub.docker.com/r/boerderij/varken/)
11 |
12 | Dutch for PIG. PIG is an Acronym for Plex/InfluxDB/Grafana
13 |
14 | Varken is a standalone application to aggregate data from the Plex
15 | ecosystem into InfluxDB using Grafana for a frontend
16 |
17 | Requirements:
18 | * [Python 3.6.7+](https://www.python.org/downloads/release/python-367/)
19 | * [Python3-pip](https://pip.pypa.io/en/stable/installing/)
20 | * [InfluxDB 1.8.x](https://www.influxdata.com/)
21 | * [Grafana](https://grafana.com/)
22 |
23 |
24 | Example Dashboard
25 |
26 |
27 |
28 |
29 | Supported Modules:
30 | * [Sonarr](https://sonarr.tv/) - Smart PVR for newsgroup and bittorrent users.
31 | * [SickChill](https://sickchill.github.io/) - SickChill is an automatic Video Library Manager for TV Shows.
32 | * [Radarr](https://radarr.video/) - A fork of Sonarr to work with movies à la Couchpotato.
33 | * [Tautulli](https://tautulli.com/) - A Python based monitoring and tracking tool for Plex Media Server.
34 | * [Ombi](https://ombi.io/) - Want a Movie or TV Show on Plex or Emby? Use Ombi!
35 | * [Lidarr](https://lidarr.audio/) - Looks and smells like Sonarr but made for music.
36 |
37 | Key features:
38 | * Multiple server support for all modules
39 | * Geolocation mapping from [GeoLite2](https://dev.maxmind.com/geoip/geoip2/geolite2/)
40 | * Grafana [Worldmap Panel](https://grafana.com/plugins/grafana-worldmap-panel/installation) support
41 |
42 |
43 | ## Installation Guides
44 | Varken Installation guides can be found in the [wiki](https://wiki.cajun.pro/books/varken/chapter/installation).
45 |
46 | ## Support
47 | Please read [Asking for Support](https://wiki.cajun.pro/books/varken/chapter/asking-for-support) before seeking support.
48 |
49 | [Click here for quick access to discord support](http://cyborg.decreator.dev/channels/518970285773422592/530424560504537105/). No app or account needed!
50 |
51 | ### InfluxDB
52 | [InfluxDB Installation Documentation](https://wiki.cajun.pro/books/varken/page/influxdb-d1f)
53 | Note: Only v1.8.x is currently supported.
54 |
55 | Influxdb is required but not packaged as part of Varken. Varken will create
56 | its database on its own. If you choose to give varken user permissions that
57 | do not include database creation, please ensure you create an influx database
58 | named `varken`
59 |
60 | ### Grafana
61 | [Grafana Installation/Dashboard Documentation](https://wiki.cajun.pro/books/varken/page/grafana)
62 |
--------------------------------------------------------------------------------
/Varken.py:
--------------------------------------------------------------------------------
1 | import platform
2 | import schedule
3 | from time import sleep
4 | from queue import Queue
5 | from sys import version
6 | from threading import Thread
7 | from os import environ as env
8 | from os import access, R_OK, getenv
9 | from distro import linux_distribution
10 | from os.path import isdir, abspath, dirname, join
11 | from argparse import ArgumentParser, RawTextHelpFormatter
12 | from logging import getLogger, StreamHandler, Formatter, DEBUG
13 |
14 | # Needed to check version of python
15 | from varken import structures # noqa
16 | from varken.ombi import OmbiAPI
17 | from varken.unifi import UniFiAPI
18 | from varken import VERSION, BRANCH, BUILD_DATE
19 | from varken.sonarr import SonarrAPI
20 | from varken.radarr import RadarrAPI
21 | from varken.lidarr import LidarrAPI
22 | from varken.iniparser import INIParser
23 | from varken.dbmanager import DBManager
24 | from varken.helpers import GeoIPHandler
25 | from varken.tautulli import TautulliAPI
26 | from varken.sickchill import SickChillAPI
27 | from varken.varkenlogger import VarkenLogger
28 |
29 |
30 | PLATFORM_LINUX_DISTRO = ' '.join(x for x in linux_distribution() if x)
31 |
32 |
33 | def thread(job, **kwargs):
34 | worker = Thread(target=job, kwargs=dict(**kwargs))
35 | worker.start()
36 |
37 |
38 | if __name__ == "__main__":
39 | parser = ArgumentParser(prog='varken',
40 | description='Command-line utility to aggregate data from the plex ecosystem into InfluxDB',
41 | formatter_class=RawTextHelpFormatter)
42 |
43 | parser.add_argument("-d", "--data-folder", help='Define an alternate data folder location')
44 | parser.add_argument("-D", "--debug", action='store_true', help='Use to enable DEBUG logging. (Depreciated)')
45 | parser.add_argument("-ND", "--no_debug", action='store_true', help='Use to disable DEBUG logging')
46 |
47 | opts = parser.parse_args()
48 |
49 | templogger = getLogger('temp')
50 | templogger.setLevel(DEBUG)
51 | tempch = StreamHandler()
52 | tempformatter = Formatter('%(asctime)s : %(levelname)s : %(module)s : %(message)s', '%Y-%m-%d %H:%M:%S')
53 | tempch.setFormatter(tempformatter)
54 | templogger.addHandler(tempch)
55 |
56 | DATA_FOLDER = env.get('DATA_FOLDER', vars(opts).get('data_folder') or abspath(join(dirname(__file__), 'data')))
57 |
58 | if isdir(DATA_FOLDER):
59 | if not access(DATA_FOLDER, R_OK):
60 | templogger.error("Read permission error for %s", DATA_FOLDER)
61 | exit(1)
62 | else:
63 | templogger.error("%s does not exist", DATA_FOLDER)
64 | exit(1)
65 |
66 | # Set Debug to True if DEBUG env is set
67 | enable_opts = ['True', 'true', 'yes']
68 | debug_opts = ['debug', 'Debug', 'DEBUG']
69 |
70 | opts.debug = True
71 |
72 | if getenv('DEBUG') is not None:
73 | opts.debug = True if any([getenv(string, False) for true in enable_opts
74 | for string in debug_opts if getenv(string, False) == true]) else False
75 |
76 | elif opts.no_debug:
77 | opts.debug = False
78 |
79 | # Initiate the logger
80 | vl = VarkenLogger(data_folder=DATA_FOLDER, debug=opts.debug)
81 | vl.logger.info('Starting Varken...')
82 |
83 | vl.logger.info('Data folder is "%s"', DATA_FOLDER)
84 |
85 | vl.logger.info(u"%s %s (%s%s)", platform.system(), platform.release(), platform.version(),
86 | ' - ' + PLATFORM_LINUX_DISTRO if PLATFORM_LINUX_DISTRO else '')
87 |
88 | vl.logger.info(u"Python %s", version)
89 |
90 | vl.logger.info("Varken v%s-%s %s", VERSION, BRANCH, BUILD_DATE)
91 |
92 | CONFIG = INIParser(DATA_FOLDER)
93 | DBMANAGER = DBManager(CONFIG.influx_server)
94 | QUEUE = Queue()
95 |
96 | if CONFIG.sonarr_enabled:
97 | for server in CONFIG.sonarr_servers:
98 | SONARR = SonarrAPI(server, DBMANAGER)
99 | if server.queue:
100 | at_time = schedule.every(server.queue_run_seconds).seconds
101 | at_time.do(thread, SONARR.get_queue).tag("sonarr-{}-get_queue".format(server.id))
102 | if server.missing_days > 0:
103 | at_time = schedule.every(server.missing_days_run_seconds).seconds
104 | at_time.do(thread, SONARR.get_calendar, query="Missing").tag("sonarr-{}-get_missing".format(server.id))
105 | if server.future_days > 0:
106 | at_time = schedule.every(server.future_days_run_seconds).seconds
107 | at_time.do(thread, SONARR.get_calendar, query="Future").tag("sonarr-{}-get_future".format(server.id))
108 |
109 | if CONFIG.tautulli_enabled:
110 | GEOIPHANDLER = GeoIPHandler(DATA_FOLDER, CONFIG.tautulli_servers[0].maxmind_license_key)
111 | schedule.every(12).to(24).hours.do(thread, GEOIPHANDLER.update)
112 | for server in CONFIG.tautulli_servers:
113 | TAUTULLI = TautulliAPI(server, DBMANAGER, GEOIPHANDLER)
114 | if server.get_activity:
115 | at_time = schedule.every(server.get_activity_run_seconds).seconds
116 | at_time.do(thread, TAUTULLI.get_activity).tag("tautulli-{}-get_activity".format(server.id))
117 | if server.get_stats:
118 | at_time = schedule.every(server.get_stats_run_seconds).seconds
119 | at_time.do(thread, TAUTULLI.get_stats).tag("tautulli-{}-get_stats".format(server.id))
120 |
121 | if CONFIG.radarr_enabled:
122 | for server in CONFIG.radarr_servers:
123 | RADARR = RadarrAPI(server, DBMANAGER)
124 | if server.get_missing:
125 | at_time = schedule.every(server.get_missing_run_seconds).seconds
126 | at_time.do(thread, RADARR.get_missing).tag("radarr-{}-get_missing".format(server.id))
127 | if server.queue:
128 | at_time = schedule.every(server.queue_run_seconds).seconds
129 | at_time.do(thread, RADARR.get_queue).tag("radarr-{}-get_queue".format(server.id))
130 |
131 | if CONFIG.lidarr_enabled:
132 | for server in CONFIG.lidarr_servers:
133 | LIDARR = LidarrAPI(server, DBMANAGER)
134 | if server.queue:
135 | at_time = schedule.every(server.queue_run_seconds).seconds
136 | at_time.do(thread, LIDARR.get_queue).tag("lidarr-{}-get_queue".format(server.id))
137 | if server.missing_days > 0:
138 | at_time = schedule.every(server.missing_days_run_seconds).seconds
139 | at_time.do(thread, LIDARR.get_calendar, query="Missing").tag(
140 | "lidarr-{}-get_missing".format(server.id))
141 | if server.future_days > 0:
142 | at_time = schedule.every(server.future_days_run_seconds).seconds
143 | at_time.do(thread, LIDARR.get_calendar, query="Future").tag("lidarr-{}-get_future".format(
144 | server.id))
145 |
146 | if CONFIG.ombi_enabled:
147 | for server in CONFIG.ombi_servers:
148 | OMBI = OmbiAPI(server, DBMANAGER)
149 | if server.request_type_counts:
150 | at_time = schedule.every(server.request_type_run_seconds).seconds
151 | at_time.do(thread, OMBI.get_request_counts).tag("ombi-{}-get_request_counts".format(server.id))
152 | if server.request_total_counts:
153 | at_time = schedule.every(server.request_total_run_seconds).seconds
154 | at_time.do(thread, OMBI.get_all_requests).tag("ombi-{}-get_all_requests".format(server.id))
155 | if server.issue_status_counts:
156 | at_time = schedule.every(server.issue_status_run_seconds).seconds
157 | at_time.do(thread, OMBI.get_issue_counts).tag("ombi-{}-get_issue_counts".format(server.id))
158 |
159 | if CONFIG.sickchill_enabled:
160 | for server in CONFIG.sickchill_servers:
161 | SICKCHILL = SickChillAPI(server, DBMANAGER)
162 | if server.get_missing:
163 | at_time = schedule.every(server.get_missing_run_seconds).seconds
164 | at_time.do(thread, SICKCHILL.get_missing).tag("sickchill-{}-get_missing".format(server.id))
165 |
166 | if CONFIG.unifi_enabled:
167 | for server in CONFIG.unifi_servers:
168 | UNIFI = UniFiAPI(server, DBMANAGER)
169 | at_time = schedule.every(server.get_usg_stats_run_seconds).seconds
170 | at_time.do(thread, UNIFI.get_usg_stats).tag("unifi-{}-get_usg_stats".format(server.id))
171 |
172 | # Run all on startup
173 | SERVICES_ENABLED = [CONFIG.ombi_enabled, CONFIG.radarr_enabled, CONFIG.tautulli_enabled, CONFIG.unifi_enabled,
174 | CONFIG.sonarr_enabled, CONFIG.sickchill_enabled, CONFIG.lidarr_enabled]
175 | if not [enabled for enabled in SERVICES_ENABLED if enabled]:
176 | vl.logger.error("All services disabled. Exiting")
177 | exit(1)
178 |
179 | schedule.run_all()
180 |
181 | while schedule.jobs:
182 | schedule.run_pending()
183 | sleep(1)
184 |
--------------------------------------------------------------------------------
/assets/varken_full_banner.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Boerderij/Varken/ec79d22df715d53cd92e1c04e215e602be3b9304/assets/varken_full_banner.jpg
--------------------------------------------------------------------------------
/assets/varken_full_banner_transparent.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Boerderij/Varken/ec79d22df715d53cd92e1c04e215e602be3b9304/assets/varken_full_banner_transparent.png
--------------------------------------------------------------------------------
/assets/varken_head_only transparent.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Boerderij/Varken/ec79d22df715d53cd92e1c04e215e602be3b9304/assets/varken_head_only transparent.png
--------------------------------------------------------------------------------
/assets/varken_head_only_primary_discord.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Boerderij/Varken/ec79d22df715d53cd92e1c04e215e602be3b9304/assets/varken_head_only_primary_discord.png
--------------------------------------------------------------------------------
/assets/varken_original.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Boerderij/Varken/ec79d22df715d53cd92e1c04e215e602be3b9304/assets/varken_original.jpg
--------------------------------------------------------------------------------
/data/varken.example.ini:
--------------------------------------------------------------------------------
1 | [global]
2 | sonarr_server_ids = 1,2
3 | radarr_server_ids = 1,2
4 | lidarr_server_ids = false
5 | tautulli_server_ids = 1
6 | ombi_server_ids = 1
7 | sickchill_server_ids = false
8 | unifi_server_ids = false
9 | maxmind_license_key = xxxxxxxxxxxxxxxx
10 |
11 | [influxdb]
12 | url = influxdb.domain.tld
13 | port = 8086
14 | ssl = false
15 | verify_ssl = false
16 | username = root
17 | password = root
18 |
19 | [tautulli-1]
20 | url = tautulli.domain.tld:8181
21 | fallback_ip = 1.1.1.1
22 | apikey = xxxxxxxxxxxxxxxx
23 | ssl = false
24 | verify_ssl = false
25 | get_activity = true
26 | get_activity_run_seconds = 30
27 | get_stats = true
28 | get_stats_run_seconds = 3600
29 |
30 | [sonarr-1]
31 | url = sonarr1.domain.tld:8989
32 | apikey = xxxxxxxxxxxxxxxx
33 | ssl = false
34 | verify_ssl = false
35 | missing_days = 7
36 | missing_days_run_seconds = 300
37 | future_days = 1
38 | future_days_run_seconds = 300
39 | queue = true
40 | queue_run_seconds = 300
41 |
42 | [sonarr-2]
43 | url = sonarr2.domain.tld:8989
44 | apikey = yyyyyyyyyyyyyyyy
45 | ssl = false
46 | verify_ssl = false
47 | missing_days = 7
48 | missing_days_run_seconds = 300
49 | future_days = 1
50 | future_days_run_seconds = 300
51 | queue = true
52 | queue_run_seconds = 300
53 |
54 | [radarr-1]
55 | url = radarr1.domain.tld
56 | apikey = xxxxxxxxxxxxxxxx
57 | ssl = false
58 | verify_ssl = false
59 | queue = true
60 | queue_run_seconds = 300
61 | get_missing = true
62 | get_missing_run_seconds = 300
63 |
64 | [radarr-2]
65 | url = radarr2.domain.tld
66 | apikey = yyyyyyyyyyyyyyyy
67 | ssl = false
68 | verify_ssl = false
69 | queue = true
70 | queue_run_seconds = 300
71 | get_missing = true
72 | get_missing_run_seconds = 300
73 |
74 | [lidarr-1]
75 | url = lidarr1.domain.tld:8686
76 | apikey = xxxxxxxxxxxxxxxx
77 | ssl = false
78 | verify_ssl = false
79 | missing_days = 30
80 | missing_days_run_seconds = 300
81 | future_days = 30
82 | future_days_run_seconds = 300
83 | queue = true
84 | queue_run_seconds = 300
85 |
86 | [ombi-1]
87 | url = ombi.domain.tld
88 | apikey = xxxxxxxxxxxxxxxx
89 | ssl = false
90 | verify_ssl = false
91 | get_request_type_counts = true
92 | request_type_run_seconds = 300
93 | get_request_total_counts = true
94 | request_total_run_seconds = 300
95 | get_issue_status_counts = true
96 | issue_status_run_seconds = 300
97 |
98 | [sickchill-1]
99 | url = sickchill.domain.tld:8081
100 | apikey = xxxxxxxxxxxxxxxx
101 | ssl = false
102 | verify_ssl = false
103 | get_missing = true
104 | get_missing_run_seconds = 300
105 |
106 | [unifi-1]
107 | url = unifi.domain.tld:8443
108 | username = ubnt
109 | password = ubnt
110 | site = default
111 | usg_name = MyRouter
112 | ssl = false
113 | verify_ssl = false
114 | get_usg_stats_run_seconds = 300
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3'
2 | networks:
3 | internal:
4 | driver: bridge
5 | services:
6 | influxdb:
7 | hostname: influxdb
8 | container_name: influxdb
9 | image: influxdb
10 | networks:
11 | - internal
12 | volumes:
13 | - /path/to/docker-influxdb/config-folder:/var/lib/influxdb
14 | restart: unless-stopped
15 | varken:
16 | hostname: varken
17 | container_name: varken
18 | image: boerderij/varken
19 | networks:
20 | - internal
21 | volumes:
22 | - /path/to/docker-varken/config-folder:/config
23 | environment:
24 | - TZ=America/Chicago
25 | - VRKN_GLOBAL_SONARR_SERVER_IDS=1,2
26 | - VRKN_GLOBAL_RADARR_SERVER_IDS=1,2
27 | - VRKN_GLOBAL_LIDARR_SERVER_IDS=false
28 | - VRKN_GLOBAL_TAUTULLI_SERVER_IDS=1
29 | - VRKN_GLOBAL_OMBI_SERVER_IDS=1
30 | - VRKN_GLOBAL_SICKCHILL_SERVER_IDS=false
31 | - VRKN_GLOBAL_UNIFI_SERVER_IDS=false
32 | - VRKN_GLOBAL_MAXMIND_LICENSE_KEY=xxxxxxxxxxxxxxxx
33 | - VRKN_INFLUXDB_URL=influxdb.domain.tld
34 | - VRKN_INFLUXDB_PORT=8086
35 | - VRKN_INFLUXDB_SSL=false
36 | - VRKN_INFLUXDB_VERIFY_SSL=false
37 | - VRKN_INFLUXDB_USERNAME=root
38 | - VRKN_INFLUXDB_PASSWORD=root
39 | - VRKN_TAUTULLI_1_URL=tautulli.domain.tld:8181
40 | - VRKN_TAUTULLI_1_FALLBACK_IP=1.1.1.1
41 | - VRKN_TAUTULLI_1_APIKEY=xxxxxxxxxxxxxxxx
42 | - VRKN_TAUTULLI_1_SSL=false
43 | - VRKN_TAUTULLI_1_VERIFY_SSL=false
44 | - VRKN_TAUTULLI_1_GET_ACTIVITY=true
45 | - VRKN_TAUTULLI_1_GET_ACTIVITY_RUN_SECONDS=30
46 | - VRKN_TAUTULLI_1_GET_STATS=true
47 | - VRKN_TAUTULLI_1_GET_STATS_RUN_SECONDS=3600
48 | - VRKN_SONARR_1_URL=sonarr1.domain.tld:8989
49 | - VRKN_SONARR_1_APIKEY=xxxxxxxxxxxxxxxx
50 | - VRKN_SONARR_1_SSL=false
51 | - VRKN_SONARR_1_VERIFY_SSL=false
52 | - VRKN_SONARR_1_MISSING_DAYS=7
53 | - VRKN_SONARR_1_MISSING_DAYS_RUN_SECONDS=300
54 | - VRKN_SONARR_1_FUTURE_DAYS=1
55 | - VRKN_SONARR_1_FUTURE_DAYS_RUN_SECONDS=300
56 | - VRKN_SONARR_1_QUEUE=true
57 | - VRKN_SONARR_1_QUEUE_RUN_SECONDS=300
58 | - VRKN_SONARR_2_URL=sonarr2.domain.tld:8989
59 | - VRKN_SONARR_2_APIKEY=yyyyyyyyyyyyyyyy
60 | - VRKN_SONARR_2_SSL=false
61 | - VRKN_SONARR_2_VERIFY_SSL=false
62 | - VRKN_SONARR_2_MISSING_DAYS=7
63 | - VRKN_SONARR_2_MISSING_DAYS_RUN_SECONDS=300
64 | - VRKN_SONARR_2_FUTURE_DAYS=1
65 | - VRKN_SONARR_2_FUTURE_DAYS_RUN_SECONDS=300
66 | - VRKN_SONARR_2_QUEUE=true
67 | - VRKN_SONARR_2_QUEUE_RUN_SECONDS=300
68 | - VRKN_RADARR_1_URL=radarr1.domain.tld
69 | - VRKN_RADARR_1_APIKEY=xxxxxxxxxxxxxxxx
70 | - VRKN_RADARR_1_SSL=false
71 | - VRKN_RADARR_1_VERIFY_SSL=false
72 | - VRKN_RADARR_1_QUEUE=true
73 | - VRKN_RADARR_1_QUEUE_RUN_SECONDS=300
74 | - VRKN_RADARR_1_GET_MISSING=true
75 | - VRKN_RADARR_1_GET_MISSING_RUN_SECONDS=300
76 | - VRKN_RADARR_2_URL=radarr2.domain.tld
77 | - VRKN_RADARR_2_APIKEY=yyyyyyyyyyyyyyyy
78 | - VRKN_RADARR_2_SSL=false
79 | - VRKN_RADARR_2_VERIFY_SSL=false
80 | - VRKN_RADARR_2_QUEUE=true
81 | - VRKN_RADARR_2_QUEUE_RUN_SECONDS=300
82 | - VRKN_RADARR_2_GET_MISSING=true
83 | - VRKN_RADARR_2_GET_MISSING_RUN_SECONDS=300
84 | - VRKN_LIDARR_1_URL=lidarr1.domain.tld:8686
85 | - VRKN_LIDARR_1_APIKEY=xxxxxxxxxxxxxxxx
86 | - VRKN_LIDARR_1_SSL=false
87 | - VRKN_LIDARR_1_VERIFY_SSL=false
88 | - VRKN_LIDARR_1_MISSING_DAYS=30
89 | - VRKN_LIDARR_1_MISSING_DAYS_RUN_SECONDS=300
90 | - VRKN_LIDARR_1_FUTURE_DAYS=30
91 | - VRKN_LIDARR_1_FUTURE_DAYS_RUN_SECONDS=300
92 | - VRKN_LIDARR_1_QUEUE=true
93 | - VRKN_LIDARR_1_QUEUE_RUN_SECONDS=300
94 | - VRKN_OMBI_1_URL=ombi.domain.tld
95 | - VRKN_OMBI_1_APIKEY=xxxxxxxxxxxxxxxx
96 | - VRKN_OMBI_1_SSL=false
97 | - VRKN_OMBI_1_VERIFY_SSL=false
98 | - VRKN_OMBI_1_GET_REQUEST_TYPE_COUNTS=true
99 | - VRKN_OMBI_1_REQUEST_TYPE_RUN_SECONDS=300
100 | - VRKN_OMBI_1_GET_REQUEST_TOTAL_COUNTS=true
101 | - VRKN_OMBI_1_REQUEST_TOTAL_RUN_SECONDS=300
102 | - VRKN_OMBI_1_GET_ISSUE_STATUS_COUNTS=true
103 | - VRKN_OMBI_1_ISSUE_STATUS_RUN_SECONDS=300
104 | - VRKN_SICKCHILL_1_URL=sickchill.domain.tld:8081
105 | - VRKN_SICKCHILL_1_APIKEY=xxxxxxxxxxxxxxxx
106 | - VRKN_SICKCHILL_1_SSL=false
107 | - VRKN_SICKCHILL_1_VERIFY_SSL=false
108 | - VRKN_SICKCHILL_1_GET_MISSING=true
109 | - VRKN_SICKCHILL_1_GET_MISSING_RUN_SECONDS=300
110 | depends_on:
111 | - influxdb
112 | restart: unless-stopped
113 | grafana:
114 | hostname: grafana
115 | container_name: grafana
116 | image: grafana/grafana
117 | networks:
118 | - internal
119 | ports:
120 | - 3000:3000
121 | volumes:
122 | - /path/to/docker-grafana/config-folder:/config
123 | environment:
124 | - GF_PATHS_DATA=/config/data
125 | - GF_PATHS_LOGS=/config/logs
126 | - GF_PATHS_PLUGINS=/config/plugins
127 | - GF_INSTALL_PLUGINS=grafana-piechart-panel,grafana-worldmap-panel
128 | depends_on:
129 | - influxdb
130 | - varken
131 | restart: unless-stopped
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | #---------------------------------------------------------
2 | # Potential requirements.
3 | # pip3 install -r requirements.txt
4 | #---------------------------------------------------------
5 | requests==2.21
6 | geoip2==2.9.0
7 | influxdb==5.2.0
8 | schedule==0.6.0
9 | distro==1.4.0
10 | urllib3==1.24.2
--------------------------------------------------------------------------------
/utilities/grafana_build.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # To use:
3 | # docker exec -it varken cp /app/data/utilities/grafana_build.py /config/grafana_build.py
4 | # nano /opt/dockerconfigs/varken/grafana_build.py # Edit vars. This assumes you have your persistent data there
5 | # docker exec -it varken python3 /config/grafana_build.py
6 | from sys import exit
7 | from requests import Session
8 | from json.decoder import JSONDecodeError
9 |
10 | docker = True # True if using a docker container, False if not
11 | host_ip = '127.0.0.1' # Only relevant if docker = False
12 | username = 'admin' # Grafana username
13 | password = 'admin' # Grafana password
14 | grafana_url = 'http://grafana:3000'
15 | verify = False # Verify SSL
16 |
17 | # Do not remove any of these, just change the ones you use
18 | movies_library = 'Movies'
19 | fourk_movies_library = 'Movies 4K'
20 | tv_shows_library = 'TV Shows'
21 | fourk_tv_shows_library = 'TV Shows 4K'
22 | music_library = 'Music'
23 | usg_name = 'Gateway'
24 | ombi_url = 'https://yourdomain.com/ombi'
25 | tautulli_url = 'https://yourdomain.com/tautulli'
26 | sonarr_url = 'https://yourdomain.com/sonarr'
27 | radarr_url = 'https://yourdomain.com/radarr'
28 | sickchill_url = 'https://yourdomain.com/sickchill'
29 | lidarr_url = 'https://yourdomain.com/lidarr'
30 |
31 | # Do not edit past this line #
32 | session = Session()
33 | auth = (username, password)
34 | url_base = f"{grafana_url.rstrip('/')}/api"
35 |
36 | varken_datasource = []
37 | datasource_name = "Varken-Script"
38 | try:
39 | datasources = session.get(url_base + '/datasources', auth=auth, verify=verify).json()
40 | varken_datasource = [source for source in datasources if source['database'] == 'varken']
41 | if varken_datasource:
42 | print(f'varken datasource already exists with the name "{varken_datasource[0]["name"]}"')
43 | datasource_name = varken_datasource[0]["name"]
44 | except JSONDecodeError:
45 | exit(f"Could not talk to grafana at {grafana_url}. Check URL/Username/Password")
46 |
47 | if not varken_datasource:
48 | datasource_data = {
49 | "name": datasource_name,
50 | "type": "influxdb",
51 | "url": f"http://{'influxdb' if docker else host_ip}:8086",
52 | "access": "proxy",
53 | "basicAuth": False,
54 | "database": 'varken'
55 | }
56 | post = session.post(url_base + '/datasources', auth=auth, verify=verify, json=datasource_data).json()
57 | print(f'Created {datasource_name} datasource (id:{post["datasource"]["id"]})')
58 |
59 | our_dashboard = session.get(url_base + '/gnet/dashboards/9585', auth=auth, verify=verify).json()['json']
60 | dashboard_data = {
61 | "dashboard": our_dashboard,
62 | "overwrite": True,
63 | "inputs": [
64 | {
65 | "name": "DS_VARKEN",
66 | "label": "varken",
67 | "description": "",
68 | "type": "datasource",
69 | "pluginId": "influxdb",
70 | "pluginName": "InfluxDB",
71 | "value": datasource_name
72 | },
73 | {
74 | "name": "VAR_MOVIESLIBRARY",
75 | "type": "constant",
76 | "label": "Movies Library Name",
77 | "value": movies_library,
78 | "description": ""
79 | },
80 | {
81 | "name": "VAR_MOVIES4KLIBRARY",
82 | "type": "constant",
83 | "label": "4K Movies Library Name",
84 | "value": fourk_movies_library,
85 | "description": ""
86 | },
87 | {
88 | "name": "VAR_TVLIBRARY",
89 | "type": "constant",
90 | "label": "TV Library Name",
91 | "value": tv_shows_library,
92 | "description": ""
93 | },
94 | {
95 | "name": "VAR_TV4KLIBRARY",
96 | "type": "constant",
97 | "label": "TV 4K Library Name",
98 | "value": fourk_tv_shows_library,
99 | "description": ""
100 | },
101 | {
102 | "name": "VAR_MUSICLIBRARY",
103 | "type": "constant",
104 | "label": "Music Library Name",
105 | "value": music_library,
106 | "description": ""
107 | },
108 | {
109 | "name": "VAR_USGNAME",
110 | "type": "constant",
111 | "label": "Unifi USG Name",
112 | "value": usg_name,
113 | "description": ""
114 | },
115 | {
116 | "name": "VAR_OMBIURL",
117 | "type": "constant",
118 | "label": "Ombi URL",
119 | "value": ombi_url,
120 | "description": ""
121 | },
122 | {
123 | "name": "VAR_TAUTULLIURL",
124 | "type": "constant",
125 | "label": "Tautulli URL",
126 | "value": tautulli_url,
127 | "description": ""
128 | },
129 | {
130 | "name": "VAR_SONARRURL",
131 | "type": "constant",
132 | "label": "Sonarr URL",
133 | "value": sonarr_url,
134 | "description": ""
135 | },
136 | {
137 | "name": "VAR_RADARRURL",
138 | "type": "constant",
139 | "label": "Radarr URL",
140 | "value": radarr_url,
141 | "description": ""
142 | },
143 | {
144 | "name": "VAR_SICKCHILLURL",
145 | "type": "constant",
146 | "label": "Sickchill URL",
147 | "value": sickchill_url,
148 | "description": ""
149 | },
150 | {
151 | "name": "VAR_LIDARRURL",
152 | "type": "constant",
153 | "label": "lidarr URL",
154 | "value": lidarr_url,
155 | "description": ""
156 | }
157 | ]
158 | }
159 | try:
160 | make_dashboard = session.post(url_base + '/dashboards/import', json=dashboard_data, auth=auth, verify=verify)
161 | if make_dashboard.status_code == 200 and make_dashboard.json().get('imported'):
162 | print(f'Created dashboard "{our_dashboard["title"]}"')
163 | except:
164 | print('Shit...')
165 |
--------------------------------------------------------------------------------
/utilities/historical_tautulli_import.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from argparse import ArgumentParser
3 | from os import access, R_OK
4 | from os.path import isdir, abspath, dirname, join
5 | from logging import getLogger, StreamHandler, Formatter, DEBUG
6 |
7 | from varken.iniparser import INIParser
8 | from varken.dbmanager import DBManager
9 | from varken.helpers import GeoIPHandler
10 | from varken.tautulli import TautulliAPI
11 |
12 | if __name__ == "__main__":
13 | parser = ArgumentParser(prog='varken',
14 | description='Tautulli historical import tool')
15 | parser.add_argument("-d", "--data-folder", help='Define an alternate data folder location')
16 | parser.add_argument("-D", "--days", default=30, type=int, help='Specify length of historical import')
17 | opts = parser.parse_args()
18 |
19 | DATA_FOLDER = abspath(join(dirname(__file__), '..', 'data'))
20 |
21 | templogger = getLogger('temp')
22 | templogger.setLevel(DEBUG)
23 | tempch = StreamHandler()
24 | tempformatter = Formatter('%(asctime)s : %(levelname)s : %(module)s : %(message)s', '%Y-%m-%d %H:%M:%S')
25 | tempch.setFormatter(tempformatter)
26 | templogger.addHandler(tempch)
27 |
28 | if opts.data_folder:
29 | ARG_FOLDER = opts.data_folder
30 |
31 | if isdir(ARG_FOLDER):
32 | DATA_FOLDER = ARG_FOLDER
33 | if not access(DATA_FOLDER, R_OK):
34 | templogger.error("Read permission error for %s", DATA_FOLDER)
35 | exit(1)
36 | else:
37 | templogger.error("%s does not exist", ARG_FOLDER)
38 | exit(1)
39 |
40 | CONFIG = INIParser(DATA_FOLDER)
41 | DBMANAGER = DBManager(CONFIG.influx_server)
42 |
43 | if CONFIG.tautulli_enabled:
44 | GEOIPHANDLER = GeoIPHandler(DATA_FOLDER)
45 | for server in CONFIG.tautulli_servers:
46 | TAUTULLI = TautulliAPI(server, DBMANAGER, GEOIPHANDLER)
47 | TAUTULLI.get_historical(days=opts.days)
48 |
--------------------------------------------------------------------------------
/varken.systemd:
--------------------------------------------------------------------------------
1 | # Varken - Command-line utility to aggregate data from the Plex ecosystem into InfluxDB.
2 | #
3 | # Service Unit file for systemd system manager
4 | #
5 | # INSTALLATION NOTES
6 | #
7 | # 1. Copy this file into your systemd service unit directory (often '/lib/systemd/system')
8 | # and name it 'varken.service' with the following command:
9 | # cp /opt/Varken/varken.systemd /lib/systemd/system/varken.service
10 | #
11 | # 2. Edit the new varken.service file with configuration settings as required.
12 | # More details in the "CONFIGURATION NOTES" section shown below.
13 | #
14 | # 3. Enable boot-time autostart with the following commands:
15 | # systemctl daemon-reload
16 | # systemctl enable varken.service
17 | #
18 | # 4. Start now with the following command:
19 | # systemctl start varken.service
20 | #
21 | # CONFIGURATION NOTES
22 | #
23 | # - The example settings in this file assume that you will run varken as user: varken
24 | # - The example settings in this file assume that varken is installed to: /opt/Varken
25 | #
26 | # - To create this user and give it ownership of the Varken directory:
27 | # Ubuntu/Debian: sudo addgroup varken && sudo adduser --system --no-create-home varken --ingroup varken
28 | # CentOS/Fedora: sudo adduser --system --no-create-home varken
29 | # sudo chown varken:varken -R /opt/Varken
30 | #
31 | # - Adjust User= and Group= to the user/group you want Varken to run as.
32 | #
33 | # - WantedBy= specifies which target (i.e. runlevel) to start Varken for.
34 | # multi-user.target equates to runlevel 3 (multi-user text mode)
35 | # graphical.target equates to runlevel 5 (multi-user X11 graphical mode)
36 |
37 | [Unit]
38 | Description=Varken - Command-line utility to aggregate data from the Plex ecosystem into InfluxDB.
39 | After=network-online.target
40 | StartLimitInterval=200
41 | StartLimitBurst=3
42 |
43 | [Service]
44 | Type=simple
45 | User=varken
46 | Group=varken
47 | WorkingDirectory=/opt/Varken
48 | ExecStart=/opt/Varken/varken-venv/bin/python /opt/Varken/Varken.py
49 | Restart=always
50 | RestartSec=30
51 |
52 | [Install]
53 | WantedBy=multi-user.target
54 |
--------------------------------------------------------------------------------
/varken.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | Varken
5 | boerderij/varken
6 | https://hub.docker.com/r/boerderij/varken/~/dockerfile/
7 | bridge
8 |
9 | sh
10 | false
11 | https://discord.gg/VjZ6qSM
12 |
13 |
14 | Varken is a standalone command-line utility to aggregate data from the Plex ecosystem into InfluxDB. Examples use Grafana for a frontend
15 |
16 |
17 |
18 |
19 | Pig.png
20 |
21 |
22 |
23 |
24 |
25 |
26 | Varken is a standalone command-line utility to aggregate data from the Plex ecosystem into InfluxDB. Examples use Grafana for a frontend
27 |
28 |
29 | bridge
30 |
31 |
32 |
33 |
34 | /mnt/user/appdata/varken
35 | /config
36 | rw
37 |
38 |
39 |
40 |
41 | 99
42 | PGID
43 |
44 |
45 |
46 | 100
47 | PUID
48 |
49 |
50 |
51 |
52 | 99
53 | 100
54 | /mnt/user/appdata/varken
55 |
--------------------------------------------------------------------------------
/varken/__init__.py:
--------------------------------------------------------------------------------
1 | VERSION = "0.0.0"
2 | BRANCH = 'develop'
3 | BUILD_DATE = '1/1/1970'
4 |
--------------------------------------------------------------------------------
/varken/dbmanager.py:
--------------------------------------------------------------------------------
1 | from sys import exit
2 | from logging import getLogger
3 | from influxdb import InfluxDBClient
4 | from requests.exceptions import ConnectionError
5 | from influxdb.exceptions import InfluxDBServerError
6 |
7 |
8 | class DBManager(object):
9 | def __init__(self, server):
10 | self.server = server
11 | self.logger = getLogger()
12 | if self.server.url == "influxdb.domain.tld":
13 | self.logger.critical("You have not configured your varken.ini. Please read Wiki page for configuration")
14 | exit()
15 | self.influx = InfluxDBClient(host=self.server.url, port=self.server.port, username=self.server.username,
16 | password=self.server.password, ssl=self.server.ssl, database='varken',
17 | verify_ssl=self.server.verify_ssl)
18 | try:
19 | version = self.influx.request('ping', expected_response_code=204).headers['X-Influxdb-Version']
20 | self.logger.info('Influxdb version: %s', version)
21 | except ConnectionError:
22 | self.logger.critical("Error testing connection to InfluxDB. Please check your url/hostname")
23 | exit(1)
24 |
25 | databases = [db['name'] for db in self.influx.get_list_database()]
26 |
27 | if 'varken' not in databases:
28 | self.logger.info("Creating varken database")
29 | self.influx.create_database('varken')
30 |
31 | retention_policies = [policy['name'] for policy in
32 | self.influx.get_list_retention_policies(database='varken')]
33 | if 'varken 30d-1h' not in retention_policies:
34 | self.logger.info("Creating varken retention policy (30d-1h)")
35 | self.influx.create_retention_policy(name='varken 30d-1h', duration='30d', replication='1',
36 | database='varken', default=True, shard_duration='1h')
37 |
38 | def write_points(self, data):
39 | d = data
40 | self.logger.debug('Writing Data to InfluxDB %s', d)
41 | try:
42 | self.influx.write_points(d)
43 | except (InfluxDBServerError, ConnectionError) as e:
44 | self.logger.error('Error writing data to influxdb. Dropping this set of data. '
45 | 'Check your database! Error: %s', e)
46 |
--------------------------------------------------------------------------------
/varken/helpers.py:
--------------------------------------------------------------------------------
1 | from hashlib import md5
2 | from datetime import date, timedelta
3 | from time import sleep
4 | from logging import getLogger
5 | from ipaddress import IPv4Address
6 | from urllib.error import HTTPError, URLError
7 | from geoip2.database import Reader
8 | from tarfile import open as taropen
9 | from urllib3 import disable_warnings
10 | from os import stat, remove, makedirs
11 | from urllib.request import urlretrieve
12 | from json.decoder import JSONDecodeError
13 | from os.path import abspath, join, basename, isdir
14 | from urllib3.exceptions import InsecureRequestWarning
15 | from requests.exceptions import InvalidSchema, SSLError, ConnectionError, ChunkedEncodingError
16 |
17 | logger = getLogger()
18 |
19 |
20 | class GeoIPHandler(object):
21 | def __init__(self, data_folder, maxmind_license_key):
22 | self.data_folder = data_folder
23 | self.maxmind_license_key = maxmind_license_key
24 | self.dbfile = abspath(join(self.data_folder, 'GeoLite2-City.mmdb'))
25 | self.logger = getLogger()
26 | self.reader = None
27 | self.reader_manager(action='open')
28 |
29 | self.logger.info('Opening persistent connection to the MaxMind DB...')
30 |
31 | def reader_manager(self, action=None):
32 | if action == 'open':
33 | try:
34 | self.reader = Reader(self.dbfile)
35 | except FileNotFoundError:
36 | self.logger.error("Could not find MaxMind DB! Downloading!")
37 | result_status = self.download()
38 | if result_status:
39 | self.logger.error("Could not download MaxMind DB! You may need to manually install it.")
40 | exit(1)
41 | else:
42 | self.reader = Reader(self.dbfile)
43 | else:
44 | self.reader.close()
45 |
46 | def lookup(self, ipaddress):
47 | ip = ipaddress
48 | self.logger.debug('Getting lat/long for Tautulli stream using ip with last octet ending in %s',
49 | ip.split('.')[-1:][0])
50 | return self.reader.city(ip)
51 |
52 | def update(self):
53 | today = date.today()
54 |
55 | try:
56 | dbdate = date.fromtimestamp(stat(self.dbfile).st_mtime)
57 | db_next_update = date.fromtimestamp(stat(self.dbfile).st_mtime) + timedelta(days=30)
58 |
59 | except FileNotFoundError:
60 | self.logger.error("Could not find MaxMind DB as: %s", self.dbfile)
61 | self.download()
62 | dbdate = date.fromtimestamp(stat(self.dbfile).st_mtime)
63 | db_next_update = date.fromtimestamp(stat(self.dbfile).st_mtime) + timedelta(days=30)
64 |
65 | if db_next_update < today:
66 | self.logger.info("Newer MaxMind DB available, Updating...")
67 | self.logger.debug("MaxMind DB date %s, DB updates after: %s, Today: %s",
68 | dbdate, db_next_update, today)
69 | self.reader_manager(action='close')
70 | self.download()
71 | self.reader_manager(action='open')
72 | else:
73 | db_days_update = db_next_update - today
74 | self.logger.debug("MaxMind DB will update in %s days", abs(db_days_update.days))
75 | self.logger.debug("MaxMind DB date %s, DB updates after: %s, Today: %s",
76 | dbdate, db_next_update, today)
77 |
78 | def download(self):
79 | tar_dbfile = abspath(join(self.data_folder, 'GeoLite2-City.tar.gz'))
80 | maxmind_url = ('https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-City'
81 | f'&suffix=tar.gz&license_key={self.maxmind_license_key}')
82 | downloaded = False
83 |
84 | retry_counter = 0
85 |
86 | while not downloaded:
87 | self.logger.info('Downloading GeoLite2 DB from MaxMind...')
88 | try:
89 | urlretrieve(maxmind_url, tar_dbfile)
90 | downloaded = True
91 | except URLError as e:
92 | self.logger.error("Problem downloading new MaxMind DB: %s", e)
93 | result_status = 1
94 | return result_status
95 | except HTTPError as e:
96 | if e.code == 401:
97 | self.logger.error("Your MaxMind license key is incorect! Check your config: %s", e)
98 | result_status = 1
99 | return result_status
100 | else:
101 | self.logger.error("Problem downloading new MaxMind DB... Trying again: %s", e)
102 | sleep(2)
103 | retry_counter = (retry_counter + 1)
104 |
105 | if retry_counter >= 3:
106 | self.logger.error("Retried downloading the new MaxMind DB 3 times and failed... Aborting!")
107 | result_status = 1
108 | return result_status
109 | try:
110 | remove(self.dbfile)
111 | except FileNotFoundError:
112 | self.logger.warning("Cannot remove MaxMind DB as it does not exist!")
113 |
114 | self.logger.debug("Opening MaxMind tar file : %s", tar_dbfile)
115 |
116 | tar = taropen(tar_dbfile, 'r:gz')
117 |
118 | for files in tar.getmembers():
119 | if 'GeoLite2-City.mmdb' in files.name:
120 | self.logger.debug('"GeoLite2-City.mmdb" FOUND in tar file')
121 | files.name = basename(files.name)
122 | tar.extract(files, self.data_folder)
123 | self.logger.debug('%s has been extracted to %s', files, self.data_folder)
124 | tar.close()
125 | try:
126 | remove(tar_dbfile)
127 | self.logger.debug('Removed the MaxMind DB tar file.')
128 | except FileNotFoundError:
129 | self.logger.warning("Cannot remove MaxMind DB TAR file as it does not exist!")
130 |
131 |
132 | def hashit(string):
133 | encoded = string.encode()
134 | hashed = md5(encoded).hexdigest()
135 |
136 | return hashed
137 |
138 |
139 | def rfc1918_ip_check(ip):
140 | rfc1918_ip = IPv4Address(ip).is_private
141 |
142 | return rfc1918_ip
143 |
144 |
145 | def connection_handler(session, request, verify, as_is_reply=False):
146 | air = as_is_reply
147 | s = session
148 | r = request
149 | v = verify
150 | return_json = False
151 |
152 | disable_warnings(InsecureRequestWarning)
153 |
154 | try:
155 | get = s.send(r, verify=v)
156 | if get.status_code == 401:
157 | if 'NoSiteContext' in str(get.content):
158 | logger.info('Your Site is incorrect for %s', r.url)
159 | elif 'LoginRequired' in str(get.content):
160 | logger.info('Your login credentials are incorrect for %s', r.url)
161 | else:
162 | logger.info('Your api key is incorrect for %s', r.url)
163 | elif get.status_code == 404:
164 | logger.info('This url doesnt even resolve: %s', r.url)
165 | elif get.status_code == 200:
166 | try:
167 | return_json = get.json()
168 | except JSONDecodeError:
169 | logger.error('No JSON response. Response is: %s', get.text)
170 | if air:
171 | return get
172 | except InvalidSchema:
173 | logger.error("You added http(s):// in the config file. Don't do that.")
174 | except SSLError as e:
175 | logger.error('Either your host is unreachable or you have an SSL issue. : %s', e)
176 | except ConnectionError as e:
177 | logger.error('Cannot resolve the url/ip/port. Check connectivity. Error: %s', e)
178 | except ChunkedEncodingError as e:
179 | logger.error('Broken connection during request... oops? Error: %s', e)
180 |
181 | return return_json
182 |
183 |
184 | def mkdir_p(path):
185 | templogger = getLogger('temp')
186 | try:
187 | if not isdir(path):
188 | templogger.info('Creating folder %s ', path)
189 | makedirs(path, exist_ok=True)
190 | except Exception as e:
191 | templogger.error('Could not create folder %s : %s ', path, e)
192 |
193 |
194 | def clean_sid_check(server_id_list, server_type=None):
195 | t = server_type
196 | sid_list = server_id_list
197 | cleaned_list = sid_list.replace(' ', '').split(',')
198 | valid_sids = []
199 | for sid in cleaned_list:
200 | try:
201 | valid_sids.append(int(sid))
202 | except ValueError:
203 | logger.error("%s is not a valid server id number", sid)
204 | if valid_sids:
205 | logger.info('%s : %s', t.upper(), valid_sids)
206 | return valid_sids
207 | else:
208 | logger.error('No valid %s', t.upper())
209 | return False
210 |
211 |
212 | def boolcheck(var):
213 | if var.lower() in ['true', 'yes']:
214 | return True
215 | else:
216 | return False
217 |
218 |
219 | def itemgetter_with_default(**defaults):
220 | return lambda obj: tuple(obj.get(k, v) for k, v in defaults.items())
221 |
--------------------------------------------------------------------------------
/varken/iniparser.py:
--------------------------------------------------------------------------------
1 | from os import W_OK, access
2 | from shutil import copyfile
3 | from os import environ as env
4 | from logging import getLogger
5 | from os.path import join, exists
6 | from re import match, compile, IGNORECASE
7 | from configparser import ConfigParser, NoOptionError, NoSectionError
8 |
9 | from varken.varkenlogger import BlacklistFilter
10 | from varken.structures import SickChillServer, UniFiServer
11 | from varken.helpers import clean_sid_check, rfc1918_ip_check, boolcheck
12 | from varken.structures import SonarrServer, RadarrServer, OmbiServer, TautulliServer, InfluxServer
13 |
14 |
15 | class INIParser(object):
16 | def __init__(self, data_folder):
17 | self.config = None
18 | self.data_folder = data_folder
19 | self.filtered_strings = None
20 | self.services = ['sonarr', 'radarr', 'lidarr', 'ombi', 'tautulli', 'sickchill', 'unifi']
21 |
22 | self.logger = getLogger()
23 | self.influx_server = InfluxServer()
24 |
25 | try:
26 | self.parse_opts(read_file=True)
27 | except NoSectionError as e:
28 | self.logger.error('Missing section in (varken.ini): %s', e)
29 | self.rectify_ini()
30 |
31 | def config_blacklist(self):
32 | filtered_strings = [section.get(k) for key, section in self.config.items()
33 | for k in section if k in BlacklistFilter.blacklisted_strings]
34 | self.filtered_strings = list(filter(None, filtered_strings))
35 | # Added matching for domains that use /locations. ConnectionPool ignores the location in logs
36 | domains_only = [string.split('/')[0] for string in filtered_strings if '/' in string]
37 | self.filtered_strings.extend(domains_only)
38 | # Added matching for domains that use :port. ConnectionPool splits the domain/ip from the port
39 | without_port = [string.split(':')[0] for string in filtered_strings if ':' in string]
40 | self.filtered_strings.extend(without_port)
41 |
42 | for handler in self.logger.handlers:
43 | handler.addFilter(BlacklistFilter(set(self.filtered_strings)))
44 |
45 | def enable_check(self, server_type=None):
46 | t = server_type
47 | global_server_ids = env.get(f'VRKN_GLOBAL_{t.upper()}', self.config.get('global', t))
48 | if global_server_ids.lower() in ['false', 'no']:
49 | self.logger.info('%s disabled.', t.upper())
50 | else:
51 | sids = clean_sid_check(global_server_ids, t)
52 | return sids
53 |
54 | def read_file(self, inifile):
55 | config = ConfigParser(interpolation=None)
56 | ini = inifile
57 | file_path = join(self.data_folder, ini)
58 |
59 | if not exists(file_path):
60 | self.logger.error('File missing (%s) in %s', ini, self.data_folder)
61 | if inifile == 'varken.ini':
62 | try:
63 | self.logger.debug('Creating varken.ini from varken.example.ini')
64 | copyfile(join(self.data_folder, 'varken.example.ini'), file_path)
65 | except IOError as e:
66 | self.logger.error("Varken does not have permission to write to %s. Error: %s - Exiting.", e,
67 | self.data_folder)
68 | exit(1)
69 |
70 | self.logger.debug('Reading from %s', inifile)
71 | with open(file_path) as config_ini:
72 | config.read_file(config_ini)
73 |
74 | return config
75 |
76 | def write_file(self, inifile):
77 | ini = inifile
78 | file_path = join(self.data_folder, ini)
79 | if exists(file_path):
80 | self.logger.debug('Writing to %s', inifile)
81 | if not access(file_path, W_OK):
82 | self.logger.error("Config file is incomplete and read-only. Exiting.")
83 | exit(1)
84 | with open(file_path, 'w') as config_ini:
85 | self.config.write(config_ini)
86 | else:
87 | self.logger.error('File missing (%s) in %s', ini, self.data_folder)
88 | exit(1)
89 |
90 | def url_check(self, url=None, include_port=True, section=None):
91 | url_check = url
92 | module = section
93 | inc_port = include_port
94 |
95 | search = (r'(?:([a-z0-9]+(-[a-z0-9]+)*\.)+[a-z]{2,}|' # domain...
96 | r'localhost|' # localhost...
97 | r'^[a-zA-Z0-9_-]*|' # hostname only. My soul dies a little every time this is used...
98 | r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
99 | )
100 | # Include search for port if it is needed.
101 | if inc_port:
102 | search = (search + r'(?::\d+)?' + r'(?:/?|[/?]\S+)$')
103 | else:
104 | search = (search + r'(?:/?|[/?]\S+)$')
105 |
106 | regex = compile('{}'.format(search), IGNORECASE)
107 |
108 | valid = match(regex, url_check) is not None
109 | if not valid:
110 | if inc_port:
111 | self.logger.error('%s is invalid in module [%s]! URL must host/IP and '
112 | 'port if not 80 or 443. ie. localhost:8080',
113 | url_check, module)
114 | exit(1)
115 | else:
116 | self.logger.error('%s is invalid in module [%s]! URL must host/IP. ie. localhost', url_check, module)
117 | exit(1)
118 | else:
119 | self.logger.debug('%s is a valid URL in module [%s].', url_check, module)
120 | return url_check
121 |
122 | def rectify_ini(self):
123 | self.logger.debug('Rectifying varken.ini with varken.example.ini')
124 | current_ini = self.config
125 | example_ini = self.read_file('varken.example.ini')
126 |
127 | for name, section in example_ini.items():
128 | if name not in current_ini:
129 | self.logger.debug('Section %s missing. Adding...', name)
130 | current_ini[name] = {}
131 | for key, value in section.items():
132 | if not current_ini[name].get(key):
133 | self.logger.debug('%s is missing in %s. Adding defaults...', key, name)
134 | current_ini[name][key] = value
135 |
136 | self.config = current_ini
137 | self.write_file('varken.ini')
138 | self.parse_opts()
139 |
140 | def parse_opts(self, read_file=False):
141 | for service in self.services:
142 | setattr(self, f'{service}_servers', [])
143 |
144 | if read_file:
145 | self.config = self.read_file('varken.ini')
146 | self.config_blacklist()
147 | # Parse InfluxDB options
148 | try:
149 | url = self.url_check(env.get('VRKN_INFLUXDB_URL', self.config.get('influxdb', 'url')),
150 | include_port=False, section='influxdb')
151 | port = int(env.get('VRKN_INFLUXDB_PORT', self.config.getint('influxdb', 'port')))
152 | ssl = boolcheck(env.get('VRKN_INFLUXDB_SSL', self.config.get('influxdb', 'ssl')))
153 | verify_ssl = boolcheck(env.get('VRKN_INFLUXDB_VERIFY_SSL', self.config.get('influxdb', 'verify_ssl')))
154 |
155 | username = env.get('VRKN_INFLUXDB_USERNAME', self.config.get('influxdb', 'username'))
156 | password = env.get('VRKN_INFLUXDB_PASSWORD', self.config.get('influxdb', 'password'))
157 | except NoOptionError as e:
158 | self.logger.error('Missing key in %s. Error: %s', "influxdb", e)
159 | self.rectify_ini()
160 | return
161 |
162 | self.influx_server = InfluxServer(url=url, port=port, username=username, password=password, ssl=ssl,
163 | verify_ssl=verify_ssl)
164 |
165 | # Check for all enabled services
166 | for service in self.services:
167 | try:
168 | setattr(self, f'{service}_enabled', self.enable_check(f'{service}_server_ids'))
169 | except NoOptionError as e:
170 | self.logger.error('Missing global %s. Error: %s', f'{service}_server_ids', e)
171 | self.rectify_ini()
172 | return
173 | service_enabled = getattr(self, f'{service}_enabled')
174 |
175 | if service_enabled:
176 | for server_id in service_enabled:
177 | server = None
178 | section = f"{service}-{server_id}"
179 | envsection = f"{service}_{server_id}".upper()
180 | try:
181 | url = self.url_check(env.get(f'VRKN_{envsection}_URL', self.config.get(section, 'url')),
182 | section=section)
183 |
184 | apikey = None
185 | if service != 'unifi':
186 | apikey = env.get(f'VRKN_{envsection}_APIKEY', self.config.get(section, 'apikey'))
187 | ssl_scheme = boolcheck(env.get(f'VRKN_{envsection}_SSL', self.config.get(section, 'ssl')))
188 | scheme = 'https://' if ssl_scheme else 'http://'
189 | verify_ssl = boolcheck(env.get(f'VRKN_{envsection}_VERIFY_SSL',
190 | self.config.get(section, 'verify_ssl')))
191 |
192 | if scheme != 'https://':
193 | verify_ssl = False
194 |
195 | if service in ['sonarr', 'radarr', 'lidarr']:
196 | queue = boolcheck(env.get(f'VRKN_{envsection}_QUEUE',
197 | self.config.get(section, 'queue')))
198 | queue_run_seconds = int(env.get(f'VRKN_{envsection}_QUEUE_RUN_SECONDS',
199 | self.config.getint(section, 'queue_run_seconds')))
200 |
201 | if service in ['sonarr', 'lidarr']:
202 | missing_days = int(env.get(f'VRKN_{envsection}_MISSING_DAYS',
203 | self.config.getint(section, 'missing_days')))
204 | future_days = int(env.get(f'VRKN_{envsection}_FUTURE_DAYS',
205 | self.config.getint(section, 'future_days')))
206 |
207 | missing_days_run_seconds = int(env.get(
208 | f'VRKN_{envsection}_MISSING_DAYS_RUN_SECONDS',
209 | self.config.getint(section, 'missing_days_run_seconds')))
210 |
211 | future_days_run_seconds = int(env.get(
212 | f'VRKN_{envsection}_FUTURE_DAYS_RUN_SECONDS',
213 | self.config.getint(section, 'future_days_run_seconds')))
214 |
215 | server = SonarrServer(id=server_id, url=scheme + url, api_key=apikey, verify_ssl=verify_ssl,
216 | missing_days=missing_days, future_days=future_days,
217 | missing_days_run_seconds=missing_days_run_seconds,
218 | future_days_run_seconds=future_days_run_seconds,
219 | queue=queue, queue_run_seconds=queue_run_seconds)
220 |
221 | if service == 'radarr':
222 | get_missing = boolcheck(env.get(f'VRKN_{envsection}_GET_MISSING',
223 | self.config.get(section, 'get_missing')))
224 | get_missing_run_seconds = int(env.get(
225 | f'VRKN_{envsection}_GET_MISSING_RUN_SECONDS',
226 | self.config.getint(section, 'get_missing_run_seconds')))
227 |
228 | server = RadarrServer(id=server_id, url=scheme + url, api_key=apikey, verify_ssl=verify_ssl,
229 | queue_run_seconds=queue_run_seconds, get_missing=get_missing,
230 | queue=queue, get_missing_run_seconds=get_missing_run_seconds)
231 |
232 | if service == 'tautulli':
233 | fallback_ip = env.get(f'VRKN_{envsection}_FALLBACK_IP',
234 | self.config.get(section, 'fallback_ip'))
235 |
236 | get_stats = boolcheck(env.get(f'VRKN_{envsection}_GET_STATS',
237 | self.config.get(section, 'get_stats')))
238 |
239 | get_activity = boolcheck(env.get(f'VRKN_{envsection}_GET_ACTIVITY',
240 | self.config.get(section, 'get_activity')))
241 |
242 | get_activity_run_seconds = int(env.get(
243 | f'VRKN_{envsection}_GET_ACTIVITY_RUN_SECONDS',
244 | self.config.getint(section, 'get_activity_run_seconds')))
245 |
246 | get_stats_run_seconds = int(env.get(
247 | f'VRKN_{envsection}_GET_STATS_RUN_SECONDS',
248 | self.config.getint(section, 'get_stats_run_seconds')))
249 |
250 | invalid_wan_ip = rfc1918_ip_check(fallback_ip)
251 |
252 | if invalid_wan_ip:
253 | self.logger.error('Invalid fallback_ip [%s] set for %s-%s!', fallback_ip, service,
254 | server_id)
255 | exit(1)
256 |
257 | maxmind_license_key = env.get('VRKN_GLOBAL_MAXMIND_LICENSE_KEY',
258 | self.config.get('global', 'maxmind_license_key'))
259 |
260 | server = TautulliServer(id=server_id, url=scheme + url, api_key=apikey,
261 | verify_ssl=verify_ssl, get_activity=get_activity,
262 | fallback_ip=fallback_ip, get_stats=get_stats,
263 | get_activity_run_seconds=get_activity_run_seconds,
264 | get_stats_run_seconds=get_stats_run_seconds,
265 | maxmind_license_key=maxmind_license_key)
266 |
267 | if service == 'ombi':
268 | issue_status_counts = boolcheck(env.get(
269 | f'VRKN_{envsection}_GET_ISSUE_STATUS_COUNTS',
270 | self.config.get(section, 'get_issue_status_counts')))
271 | request_type_counts = boolcheck(env.get(
272 | f'VRKN_{envsection}_GET_REQUEST_TYPE_COUNTS',
273 | self.config.get(section, 'get_request_type_counts')))
274 | request_total_counts = boolcheck(env.get(
275 | f'VRKN_{envsection}_GET_REQUEST_TOTAL_COUNTS',
276 | self.config.get(section, 'get_request_total_counts')))
277 |
278 | issue_status_run_seconds = int(env.get(
279 | f'VRKN_{envsection}_ISSUE_STATUS_RUN_SECONDS',
280 | self.config.getint(section, 'issue_status_run_seconds')))
281 | request_type_run_seconds = int(env.get(
282 | f'VRKN_{envsection}_REQUEST_TYPE_RUN_SECONDS',
283 | self.config.getint(section, 'request_type_run_seconds')))
284 | request_total_run_seconds = int(env.get(
285 | f'VRKN_{envsection}_REQUEST_TOTAL_RUN_SECONDS',
286 | self.config.getint(section, 'request_total_run_seconds')))
287 |
288 | server = OmbiServer(id=server_id, url=scheme + url, api_key=apikey, verify_ssl=verify_ssl,
289 | request_type_counts=request_type_counts,
290 | request_type_run_seconds=request_type_run_seconds,
291 | request_total_counts=request_total_counts,
292 | request_total_run_seconds=request_total_run_seconds,
293 | issue_status_counts=issue_status_counts,
294 | issue_status_run_seconds=issue_status_run_seconds)
295 |
296 | if service == 'sickchill':
297 | get_missing = boolcheck(env.get(f'VRKN_{envsection}_GET_MISSING',
298 | self.config.get(section, 'get_missing')))
299 | get_missing_run_seconds = int(env.get(
300 | f'VRKN_{envsection}_GET_MISSING_RUN_SECONDS',
301 | self.config.getint(section, 'get_missing_run_seconds')))
302 |
303 | server = SickChillServer(id=server_id, url=scheme + url, api_key=apikey,
304 | verify_ssl=verify_ssl, get_missing=get_missing,
305 | get_missing_run_seconds=get_missing_run_seconds)
306 |
307 | if service == 'unifi':
308 | username = env.get(f'VRKN_{envsection}_USERNAME', self.config.get(section, 'username'))
309 | password = env.get(f'VRKN_{envsection}_PASSWORD', self.config.get(section, 'password'))
310 | site = env.get(f'VRKN_{envsection}_SITE', self.config.get(section, 'site')).lower()
311 | usg_name = env.get(f'VRKN_{envsection}_USG_NAME', self.config.get(section, 'usg_name'))
312 | get_usg_stats_run_seconds = int(env.get(
313 | f'VRKN_{envsection}_GET_USG_STATS_RUN_SECONDS',
314 | self.config.getint(section, 'get_usg_stats_run_seconds')))
315 |
316 | server = UniFiServer(id=server_id, url=scheme + url, verify_ssl=verify_ssl, site=site,
317 | username=username, password=password, usg_name=usg_name,
318 | get_usg_stats_run_seconds=get_usg_stats_run_seconds)
319 |
320 | getattr(self, f'{service}_servers').append(server)
321 | except NoOptionError as e:
322 | self.logger.error('Missing key in %s. Error: %s', section, e)
323 | self.rectify_ini()
324 | return
325 | except ValueError as e:
326 | self.logger.error("Invalid configuration value in %s. Error: %s", section, e)
327 |
--------------------------------------------------------------------------------
/varken/lidarr.py:
--------------------------------------------------------------------------------
1 | from logging import getLogger
2 | from requests import Session, Request
3 | from datetime import datetime, timezone, date, timedelta
4 |
5 | from varken.structures import LidarrQueue, LidarrAlbum
6 | from varken.helpers import hashit, connection_handler
7 |
8 |
9 | class LidarrAPI(object):
10 | def __init__(self, server, dbmanager):
11 | self.dbmanager = dbmanager
12 | self.server = server
13 | # Create session to reduce server web thread load, and globally define pageSize for all requests
14 | self.session = Session()
15 | self.session.headers = {'X-Api-Key': self.server.api_key}
16 | self.logger = getLogger()
17 |
18 | def __repr__(self):
19 | return f""
20 |
21 | def get_calendar(self, query="Missing"):
22 | endpoint = '/api/v1/calendar'
23 | today = str(date.today())
24 | last_days = str(date.today() - timedelta(days=self.server.missing_days))
25 | future = str(date.today() + timedelta(days=self.server.future_days))
26 | now = datetime.now(timezone.utc).astimezone().isoformat()
27 | if query == "Missing":
28 | params = {'start': last_days, 'end': today}
29 | else:
30 | params = {'start': today, 'end': future}
31 | influx_payload = []
32 | influx_albums = []
33 |
34 | req = self.session.prepare_request(Request('GET', self.server.url + endpoint, params=params))
35 | get = connection_handler(self.session, req, self.server.verify_ssl)
36 |
37 | if not get:
38 | return
39 |
40 | # Iteratively create a list of LidarrAlbum Objects from response json
41 | albums = []
42 | for album in get:
43 | try:
44 | albums.append(LidarrAlbum(**album))
45 | except TypeError as e:
46 | self.logger.error('TypeError has occurred : %s while creating LidarrAlbum structure for album. Data '
47 | 'attempted is: %s', e, album)
48 |
49 | # Add Album to missing list if album is not complete
50 | for album in albums:
51 | percent_of_tracks = album.statistics.get('percentOfTracks', 0)
52 | if percent_of_tracks != 100:
53 | influx_albums.append(
54 | (album.title, album.releaseDate, album.artist['artistName'], album.id, percent_of_tracks,
55 | f"{album.statistics.get('trackFileCount', 0)}/{album.statistics.get('trackCount', 0)}")
56 | )
57 |
58 | for title, release_date, artist_name, album_id, percent_complete, complete_count in influx_albums:
59 | hash_id = hashit(f'{self.server.id}{title}{album_id}')
60 | influx_payload.append(
61 | {
62 | "measurement": "Lidarr",
63 | "tags": {
64 | "type": query,
65 | "sonarrId": album_id,
66 | "server": self.server.id,
67 | "albumName": title,
68 | "artistName": artist_name,
69 | "percentComplete": percent_complete,
70 | "completeCount": complete_count,
71 | "releaseDate": release_date
72 | },
73 | "time": now,
74 | "fields": {
75 | "hash": hash_id
76 |
77 | }
78 | }
79 | )
80 |
81 | self.dbmanager.write_points(influx_payload)
82 |
83 | def get_queue(self):
84 | endpoint = '/api/v1/queue'
85 | now = datetime.now(timezone.utc).astimezone().isoformat()
86 | influx_payload = []
87 | params = {'pageSize': 1000}
88 |
89 | req = self.session.prepare_request(Request('GET', self.server.url + endpoint, params=params))
90 | get = connection_handler(self.session, req, self.server.verify_ssl)
91 |
92 | if not get:
93 | return
94 |
95 | queue = []
96 | for song in get['records']:
97 | try:
98 | queue.append(LidarrQueue(**song))
99 | except TypeError as e:
100 | self.logger.error('TypeError has occurred : %s while creating LidarrQueue structure for show. Data '
101 | 'attempted is: %s', e, song)
102 |
103 | if not queue:
104 | return
105 |
106 | for song in queue:
107 | if song.protocol.upper() == 'USENET':
108 | protocol_id = 1
109 | else:
110 | protocol_id = 0
111 | hash_id = hashit(f'{self.server.id}{song.title}{song.artistId}')
112 | influx_payload.append(
113 | {
114 | "measurement": "Lidarr",
115 | "tags": {
116 | "type": "Queue",
117 | "id": song.id,
118 | "server": self.server.id,
119 | "title": song.title,
120 | "quality": song.quality['quality']['name'],
121 | "protocol": song.protocol,
122 | "protocol_id": protocol_id,
123 | "indexer": song.indexer
124 | },
125 | "time": now,
126 | "fields": {
127 | "hash": hash_id
128 | }
129 | }
130 | )
131 |
132 | self.dbmanager.write_points(influx_payload)
133 |
--------------------------------------------------------------------------------
/varken/ombi.py:
--------------------------------------------------------------------------------
1 | from logging import getLogger
2 | from requests import Session, Request
3 | from datetime import datetime, timezone
4 |
5 | from varken.helpers import connection_handler, hashit
6 | from varken.structures import OmbiRequestCounts, OmbiIssuesCounts, OmbiMovieRequest, OmbiTVRequest
7 |
8 |
9 | class OmbiAPI(object):
10 | def __init__(self, server, dbmanager):
11 | self.dbmanager = dbmanager
12 | self.server = server
13 | # Create session to reduce server web thread load, and globally define pageSize for all requests
14 | self.session = Session()
15 | self.session.headers = {'Apikey': self.server.api_key}
16 | self.logger = getLogger()
17 |
18 | def __repr__(self):
19 | return f""
20 |
21 | def get_all_requests(self):
22 | now = datetime.now(timezone.utc).astimezone().isoformat()
23 | tv_endpoint = '/api/v1/Request/tv'
24 | movie_endpoint = "/api/v1/Request/movie"
25 |
26 | tv_req = self.session.prepare_request(Request('GET', self.server.url + tv_endpoint))
27 | movie_req = self.session.prepare_request(Request('GET', self.server.url + movie_endpoint))
28 | get_tv = connection_handler(self.session, tv_req, self.server.verify_ssl) or []
29 | get_movie = connection_handler(self.session, movie_req, self.server.verify_ssl) or []
30 |
31 | if not any([get_tv, get_movie]):
32 | self.logger.error('No json replies. Discarding job')
33 | return
34 |
35 | if get_movie:
36 | movie_request_count = len(get_movie)
37 | else:
38 | movie_request_count = 0
39 |
40 | if get_tv:
41 | tv_request_count = len(get_tv)
42 | else:
43 | tv_request_count = 0
44 |
45 | tv_show_requests = []
46 | for show in get_tv:
47 | try:
48 | tv_show_requests.append(OmbiTVRequest(**show))
49 | except TypeError as e:
50 | self.logger.error('TypeError has occurred : %s while creating OmbiTVRequest structure for show. '
51 | 'data attempted is: %s', e, show)
52 |
53 | movie_requests = []
54 | for movie in get_movie:
55 | try:
56 | movie_requests.append(OmbiMovieRequest(**movie))
57 | except TypeError as e:
58 | self.logger.error('TypeError has occurred : %s while creating OmbiMovieRequest structure for movie. '
59 | 'data attempted is: %s', e, movie)
60 |
61 | influx_payload = [
62 | {
63 | "measurement": "Ombi",
64 | "tags": {
65 | "type": "Request_Total",
66 | "server": self.server.id
67 | },
68 | "time": now,
69 | "fields": {
70 | "total": movie_request_count + tv_request_count,
71 | "movies": movie_request_count,
72 | "tv_shows": tv_request_count
73 | }
74 | }
75 | ]
76 | # Request Type: Movie = 1, TV Show = 0
77 | for movie in movie_requests:
78 | hash_id = hashit(f'{movie.id}{movie.theMovieDbId}{movie.title}')
79 |
80 | # Denied = 0, Approved = 1, Completed = 2, Pending = 3
81 | if movie.denied:
82 | status = 0
83 |
84 | elif movie.approved and movie.available:
85 | status = 2
86 |
87 | elif movie.approved:
88 | status = 1
89 |
90 | else:
91 | status = 3
92 |
93 | influx_payload.append(
94 | {
95 | "measurement": "Ombi",
96 | "tags": {
97 | "type": "Requests",
98 | "server": self.server.id,
99 | "request_type": 1,
100 | "status": status,
101 | "title": movie.title,
102 | "requested_user": movie.requestedUser['userAlias'],
103 | "requested_date": movie.requestedDate
104 | },
105 | "time": now,
106 | "fields": {
107 | "hash": hash_id
108 | }
109 | }
110 | )
111 |
112 | for show in tv_show_requests:
113 | hash_id = hashit(f'{show.id}{show.tvDbId}{show.title}')
114 |
115 | # Denied = 0, Approved = 1, Completed = 2, Pending = 3
116 | if show.childRequests[0].get('denied'):
117 | status = 0
118 |
119 | elif show.childRequests[0].get('approved') and show.childRequests[0].get('available'):
120 | status = 2
121 |
122 | elif show.childRequests[0].get('approved'):
123 | status = 1
124 |
125 | else:
126 | status = 3
127 |
128 | influx_payload.append(
129 | {
130 | "measurement": "Ombi",
131 | "tags": {
132 | "type": "Requests",
133 | "server": self.server.id,
134 | "request_type": 0,
135 | "status": status,
136 | "title": show.title,
137 | "requested_user": show.childRequests[0]['requestedUser']['userAlias'],
138 | "requested_date": show.childRequests[0]['requestedDate']
139 | },
140 | "time": now,
141 | "fields": {
142 | "hash": hash_id
143 | }
144 | }
145 | )
146 |
147 | if influx_payload:
148 | self.dbmanager.write_points(influx_payload)
149 | else:
150 | self.logger.debug("Empty dataset for ombi module. Discarding...")
151 |
152 | def get_request_counts(self):
153 | now = datetime.now(timezone.utc).astimezone().isoformat()
154 | endpoint = '/api/v1/Request/count'
155 |
156 | req = self.session.prepare_request(Request('GET', self.server.url + endpoint))
157 | get = connection_handler(self.session, req, self.server.verify_ssl)
158 |
159 | if not get:
160 | return
161 |
162 | requests = OmbiRequestCounts(**get)
163 | influx_payload = [
164 | {
165 | "measurement": "Ombi",
166 | "tags": {
167 | "type": "Request_Counts"
168 | },
169 | "time": now,
170 | "fields": {
171 | "pending": requests.pending,
172 | "approved": requests.approved,
173 | "available": requests.available
174 | }
175 | }
176 | ]
177 |
178 | self.dbmanager.write_points(influx_payload)
179 |
180 | def get_issue_counts(self):
181 | now = datetime.now(timezone.utc).astimezone().isoformat()
182 | endpoint = '/api/v1/Issues/count'
183 |
184 | req = self.session.prepare_request(Request('GET', self.server.url + endpoint))
185 | get = connection_handler(self.session, req, self.server.verify_ssl)
186 |
187 | if not get:
188 | return
189 |
190 | requests = OmbiIssuesCounts(**get)
191 | influx_payload = [
192 | {
193 | "measurement": "Ombi",
194 | "tags": {
195 | "type": "Issues_Counts"
196 | },
197 | "time": now,
198 | "fields": {
199 | "pending": requests.pending,
200 | "in_progress": requests.inProgress,
201 | "resolved": requests.resolved
202 | }
203 | }
204 | ]
205 |
206 | self.dbmanager.write_points(influx_payload)
207 |
--------------------------------------------------------------------------------
/varken/radarr.py:
--------------------------------------------------------------------------------
1 | from logging import getLogger
2 | from requests import Session, Request
3 | from datetime import datetime, timezone
4 |
5 | from varken.structures import RadarrMovie, Queue
6 | from varken.helpers import hashit, connection_handler
7 |
8 |
9 | class RadarrAPI(object):
10 | def __init__(self, server, dbmanager):
11 | self.dbmanager = dbmanager
12 | self.server = server
13 | # Create session to reduce server web thread load, and globally define pageSize for all requests
14 | self.session = Session()
15 | self.session.headers = {'X-Api-Key': self.server.api_key}
16 | self.logger = getLogger()
17 |
18 | def __repr__(self):
19 | return f""
20 |
21 | def get_missing(self):
22 | endpoint = '/api/movie'
23 | now = datetime.now(timezone.utc).astimezone().isoformat()
24 | influx_payload = []
25 | missing = []
26 |
27 | req = self.session.prepare_request(Request('GET', self.server.url + endpoint))
28 | get = connection_handler(self.session, req, self.server.verify_ssl)
29 |
30 | if not get:
31 | return
32 |
33 | try:
34 | movies = [RadarrMovie(**movie) for movie in get]
35 | except TypeError as e:
36 | self.logger.error('TypeError has occurred : %s while creating RadarrMovie structure', e)
37 | return
38 |
39 | for movie in movies:
40 | if movie.monitored and not movie.downloaded:
41 | if movie.isAvailable:
42 | ma = 0
43 | else:
44 | ma = 1
45 |
46 | movie_name = f'{movie.title} ({movie.year})'
47 | missing.append((movie_name, ma, movie.tmdbId, movie.titleSlug))
48 |
49 | for title, ma, mid, title_slug in missing:
50 | hash_id = hashit(f'{self.server.id}{title}{mid}')
51 | influx_payload.append(
52 | {
53 | "measurement": "Radarr",
54 | "tags": {
55 | "Missing": True,
56 | "Missing_Available": ma,
57 | "tmdbId": mid,
58 | "server": self.server.id,
59 | "name": title,
60 | "titleSlug": title_slug
61 | },
62 | "time": now,
63 | "fields": {
64 | "hash": hash_id
65 | }
66 | }
67 | )
68 |
69 | self.dbmanager.write_points(influx_payload)
70 |
71 | def get_queue(self):
72 | endpoint = '/api/queue'
73 | now = datetime.now(timezone.utc).astimezone().isoformat()
74 | influx_payload = []
75 | queue = []
76 |
77 | req = self.session.prepare_request(Request('GET', self.server.url + endpoint))
78 | get = connection_handler(self.session, req, self.server.verify_ssl)
79 |
80 | if not get:
81 | return
82 |
83 | for movie in get:
84 | try:
85 | movie['movie'] = RadarrMovie(**movie['movie'])
86 | except TypeError as e:
87 | self.logger.error('TypeError has occurred : %s while creating RadarrMovie structure', e)
88 | return
89 |
90 | try:
91 | download_queue = [Queue(**movie) for movie in get]
92 | except TypeError as e:
93 | self.logger.error('TypeError has occurred : %s while creating Queue structure', e)
94 | return
95 |
96 | for queue_item in download_queue:
97 | movie = queue_item.movie
98 |
99 | name = f'{movie.title} ({movie.year})'
100 |
101 | if queue_item.protocol.upper() == 'USENET':
102 | protocol_id = 1
103 | else:
104 | protocol_id = 0
105 |
106 | queue.append((name, queue_item.quality['quality']['name'], queue_item.protocol.upper(),
107 | protocol_id, queue_item.id, movie.titleSlug))
108 |
109 | for name, quality, protocol, protocol_id, qid, title_slug in queue:
110 | hash_id = hashit(f'{self.server.id}{name}{quality}')
111 | influx_payload.append(
112 | {
113 | "measurement": "Radarr",
114 | "tags": {
115 | "type": "Queue",
116 | "tmdbId": qid,
117 | "server": self.server.id,
118 | "name": name,
119 | "quality": quality,
120 | "protocol": protocol,
121 | "protocol_id": protocol_id,
122 | "titleSlug": title_slug
123 | },
124 | "time": now,
125 | "fields": {
126 | "hash": hash_id
127 | }
128 | }
129 | )
130 |
131 | self.dbmanager.write_points(influx_payload)
132 |
--------------------------------------------------------------------------------
/varken/sickchill.py:
--------------------------------------------------------------------------------
1 | from logging import getLogger
2 | from requests import Session, Request
3 | from datetime import datetime, timezone
4 |
5 | from varken.structures import SickChillTVShow
6 | from varken.helpers import hashit, connection_handler
7 |
8 |
9 | class SickChillAPI(object):
10 | def __init__(self, server, dbmanager):
11 | self.dbmanager = dbmanager
12 | self.server = server
13 | # Create session to reduce server web thread load, and globally define pageSize for all requests
14 | self.session = Session()
15 | self.session.params = {'limit': 1000}
16 | self.endpoint = f"/api/{self.server.api_key}"
17 | self.logger = getLogger()
18 |
19 | def __repr__(self):
20 | return f""
21 |
22 | def get_missing(self):
23 | now = datetime.now(timezone.utc).astimezone().isoformat()
24 | influx_payload = []
25 | params = {'cmd': 'future', 'paused': 1, 'type': 'missed|today|soon|later|snatched'}
26 |
27 | req = self.session.prepare_request(Request('GET', self.server.url + self.endpoint, params=params))
28 | get = connection_handler(self.session, req, self.server.verify_ssl)
29 |
30 | if not get:
31 | return
32 |
33 | try:
34 | for key, section in get['data'].items():
35 | get['data'][key] = [SickChillTVShow(**show) for show in section]
36 | except TypeError as e:
37 | self.logger.error('TypeError has occurred : %s while creating SickChillTVShow structure', e)
38 | return
39 |
40 | for key, section in get['data'].items():
41 | for show in section:
42 | sxe = f'S{show.season:0>2}E{show.episode:0>2}'
43 | hash_id = hashit(f'{self.server.id}{show.show_name}{sxe}')
44 | missing_types = [(0, 'future'), (1, 'later'), (2, 'soon'), (3, 'today'), (4, 'missed')]
45 | try:
46 | influx_payload.append(
47 | {
48 | "measurement": "SickChill",
49 | "tags": {
50 | "type": [item[0] for item in missing_types if key in item][0],
51 | "indexerid": show.indexerid,
52 | "server": self.server.id,
53 | "name": show.show_name,
54 | "epname": show.ep_name,
55 | "sxe": sxe,
56 | "airdate": show.airdate,
57 | },
58 | "time": now,
59 | "fields": {
60 | "hash": hash_id
61 | }
62 | }
63 | )
64 | except IndexError as e:
65 | self.logger.error('Error building payload for sickchill. Discarding. Error: %s', e)
66 |
67 | if influx_payload:
68 | self.dbmanager.write_points(influx_payload)
69 |
--------------------------------------------------------------------------------
/varken/sonarr.py:
--------------------------------------------------------------------------------
1 | from logging import getLogger
2 | from requests import Session, Request
3 | from datetime import datetime, timezone, date, timedelta
4 |
5 | from varken.structures import Queue, SonarrTVShow
6 | from varken.helpers import hashit, connection_handler
7 |
8 |
9 | class SonarrAPI(object):
10 | def __init__(self, server, dbmanager):
11 | self.dbmanager = dbmanager
12 | self.server = server
13 | # Create session to reduce server web thread load, and globally define pageSize for all requests
14 | self.session = Session()
15 | self.session.headers = {'X-Api-Key': self.server.api_key}
16 | self.session.params = {'pageSize': 1000}
17 | self.logger = getLogger()
18 |
19 | def __repr__(self):
20 | return f""
21 |
22 | def get_calendar(self, query="Missing"):
23 | endpoint = '/api/calendar/'
24 | today = str(date.today())
25 | last_days = str(date.today() - timedelta(days=self.server.missing_days))
26 | future = str(date.today() + timedelta(days=self.server.future_days))
27 | now = datetime.now(timezone.utc).astimezone().isoformat()
28 | if query == "Missing":
29 | params = {'start': last_days, 'end': today}
30 | else:
31 | params = {'start': today, 'end': future}
32 | influx_payload = []
33 | air_days = []
34 | missing = []
35 |
36 | req = self.session.prepare_request(Request('GET', self.server.url + endpoint, params=params))
37 | get = connection_handler(self.session, req, self.server.verify_ssl)
38 |
39 | if not get:
40 | return
41 |
42 | tv_shows = []
43 | for show in get:
44 | try:
45 | tv_shows.append(SonarrTVShow(**show))
46 | except TypeError as e:
47 | self.logger.error('TypeError has occurred : %s while creating SonarrTVShow structure for show. Data '
48 | 'attempted is: %s', e, show)
49 |
50 | for show in tv_shows:
51 | sxe = f'S{show.seasonNumber:0>2}E{show.episodeNumber:0>2}'
52 | if show.hasFile:
53 | downloaded = 1
54 | else:
55 | downloaded = 0
56 | if query == "Missing":
57 | if show.monitored and not downloaded:
58 | missing.append((show.series['title'], downloaded, sxe, show.title, show.airDateUtc, show.id))
59 | else:
60 | air_days.append((show.series['title'], downloaded, sxe, show.title, show.airDateUtc, show.id))
61 |
62 | for series_title, dl_status, sxe, episode_title, air_date_utc, sonarr_id in (air_days or missing):
63 | hash_id = hashit(f'{self.server.id}{series_title}{sxe}')
64 | influx_payload.append(
65 | {
66 | "measurement": "Sonarr",
67 | "tags": {
68 | "type": query,
69 | "sonarrId": sonarr_id,
70 | "server": self.server.id,
71 | "name": series_title,
72 | "epname": episode_title,
73 | "sxe": sxe,
74 | "airsUTC": air_date_utc,
75 | "downloaded": dl_status
76 | },
77 | "time": now,
78 | "fields": {
79 | "hash": hash_id
80 | }
81 | }
82 | )
83 |
84 | self.dbmanager.write_points(influx_payload)
85 |
86 | def get_queue(self):
87 | influx_payload = []
88 | endpoint = '/api/queue'
89 | now = datetime.now(timezone.utc).astimezone().isoformat()
90 | queue = []
91 |
92 | req = self.session.prepare_request(Request('GET', self.server.url + endpoint))
93 | get = connection_handler(self.session, req, self.server.verify_ssl)
94 |
95 | if not get:
96 | return
97 |
98 | download_queue = []
99 | for show in get:
100 | try:
101 | download_queue.append(Queue(**show))
102 | except TypeError as e:
103 | self.logger.error('TypeError has occurred : %s while creating Queue structure. Data attempted is: '
104 | '%s', e, show)
105 | if not download_queue:
106 | return
107 |
108 | for show in download_queue:
109 | try:
110 | sxe = f"S{show.episode['seasonNumber']:0>2}E{show.episode['episodeNumber']:0>2}"
111 | except TypeError as e:
112 | self.logger.error('TypeError has occurred : %s while processing the sonarr queue. \
113 | Remove invalid queue entry. Data attempted is: %s', e, show)
114 | continue
115 |
116 | if show.protocol.upper() == 'USENET':
117 | protocol_id = 1
118 | else:
119 | protocol_id = 0
120 |
121 | queue.append((show.series['title'], show.episode['title'], show.protocol.upper(),
122 | protocol_id, sxe, show.id, show.quality['quality']['name']))
123 |
124 | for series_title, episode_title, protocol, protocol_id, sxe, sonarr_id, quality in queue:
125 | hash_id = hashit(f'{self.server.id}{series_title}{sxe}')
126 | influx_payload.append(
127 | {
128 | "measurement": "Sonarr",
129 | "tags": {
130 | "type": "Queue",
131 | "sonarrId": sonarr_id,
132 | "server": self.server.id,
133 | "name": series_title,
134 | "epname": episode_title,
135 | "sxe": sxe,
136 | "protocol": protocol,
137 | "protocol_id": protocol_id,
138 | "quality": quality
139 | },
140 | "time": now,
141 | "fields": {
142 | "hash": hash_id
143 | }
144 | }
145 | )
146 | if influx_payload:
147 | self.dbmanager.write_points(influx_payload)
148 | else:
149 | self.logger.debug("No data to send to influx for sonarr instance, discarding.")
150 |
--------------------------------------------------------------------------------
/varken/structures.py:
--------------------------------------------------------------------------------
1 | from sys import version_info
2 | from typing import NamedTuple
3 | from logging import getLogger
4 |
5 | logger = getLogger('temp')
6 | # Check for python3.6 or newer to resolve erroneous typing.NamedTuple issues
7 | if version_info < (3, 6, 2):
8 | logger.error('Varken requires python3.6.2 or newer. You are on python%s.%s.%s - Exiting...',
9 | version_info.major, version_info.minor, version_info.micro)
10 | exit(1)
11 |
12 |
13 | # Server Structures
14 | class InfluxServer(NamedTuple):
15 | password: str = 'root'
16 | port: int = 8086
17 | ssl: bool = False
18 | url: str = 'localhost'
19 | username: str = 'root'
20 | verify_ssl: bool = False
21 |
22 |
23 | class SonarrServer(NamedTuple):
24 | api_key: str = None
25 | future_days: int = 0
26 | future_days_run_seconds: int = 30
27 | id: int = None
28 | missing_days: int = 0
29 | missing_days_run_seconds: int = 30
30 | queue: bool = False
31 | queue_run_seconds: int = 30
32 | url: str = None
33 | verify_ssl: bool = False
34 |
35 |
36 | class RadarrServer(NamedTuple):
37 | api_key: str = None
38 | get_missing: bool = False
39 | get_missing_run_seconds: int = 30
40 | id: int = None
41 | queue: bool = False
42 | queue_run_seconds: int = 30
43 | url: str = None
44 | verify_ssl: bool = False
45 |
46 |
47 | class OmbiServer(NamedTuple):
48 | api_key: str = None
49 | id: int = None
50 | issue_status_counts: bool = False
51 | issue_status_run_seconds: int = 30
52 | request_total_counts: bool = False
53 | request_total_run_seconds: int = 30
54 | request_type_counts: bool = False
55 | request_type_run_seconds: int = 30
56 | url: str = None
57 | verify_ssl: bool = False
58 |
59 |
60 | class TautulliServer(NamedTuple):
61 | api_key: str = None
62 | fallback_ip: str = None
63 | get_activity: bool = False
64 | get_activity_run_seconds: int = 30
65 | get_stats: bool = False
66 | get_stats_run_seconds: int = 30
67 | id: int = None
68 | url: str = None
69 | verify_ssl: bool = None
70 | maxmind_license_key: str = None
71 |
72 |
73 | class SickChillServer(NamedTuple):
74 | api_key: str = None
75 | get_missing: bool = False
76 | get_missing_run_seconds: int = 30
77 | id: int = None
78 | url: str = None
79 | verify_ssl: bool = False
80 |
81 |
82 | class UniFiServer(NamedTuple):
83 | get_usg_stats_run_seconds: int = 30
84 | id: int = None
85 | password: str = 'ubnt'
86 | site: str = None
87 | url: str = 'unifi.domain.tld:8443'
88 | username: str = 'ubnt'
89 | usg_name: str = None
90 | verify_ssl: bool = False
91 |
92 |
93 | # Shared
94 | class Queue(NamedTuple):
95 | downloadId: str = None
96 | episode: dict = None
97 | estimatedCompletionTime: str = None
98 | id: int = None
99 | movie: dict = None
100 | protocol: str = None
101 | quality: dict = None
102 | series: dict = None
103 | size: float = None
104 | sizeleft: float = None
105 | status: str = None
106 | statusMessages: list = None
107 | timeleft: str = None
108 | title: str = None
109 | trackedDownloadStatus: str = None
110 |
111 |
112 | # Ombi Structures
113 | class OmbiRequestCounts(NamedTuple):
114 | approved: int = 0
115 | available: int = 0
116 | pending: int = 0
117 |
118 |
119 | class OmbiIssuesCounts(NamedTuple):
120 | inProgress: int = 0
121 | pending: int = 0
122 | resolved: int = 0
123 |
124 |
125 | class OmbiTVRequest(NamedTuple):
126 | background: str = None
127 | childRequests: list = None
128 | denied: bool = None
129 | deniedReason: None = None
130 | id: int = None
131 | imdbId: str = None
132 | markedAsDenied: str = None
133 | overview: str = None
134 | posterPath: str = None
135 | qualityOverride: None = None
136 | releaseDate: str = None
137 | rootFolder: None = None
138 | status: str = None
139 | title: str = None
140 | totalSeasons: int = None
141 | tvDbId: int = None
142 | requestedByAlias: str = None
143 | requestStatus: str = None
144 |
145 |
146 | class OmbiMovieRequest(NamedTuple):
147 | approved: bool = None
148 | available: bool = None
149 | background: str = None
150 | canApprove: bool = None
151 | denied: bool = None
152 | deniedReason: None = None
153 | digitalRelease: bool = None
154 | digitalReleaseDate: None = None
155 | id: int = None
156 | imdbId: str = None
157 | issueId: None = None
158 | issues: None = None
159 | markedAsApproved: str = None
160 | markedAsAvailable: None = None
161 | markedAsDenied: str = None
162 | overview: str = None
163 | posterPath: str = None
164 | qualityOverride: int = None
165 | released: bool = None
166 | releaseDate: str = None
167 | requestedDate: str = None
168 | requestedUser: dict = None
169 | requestedUserId: str = None
170 | requestType: int = None
171 | rootPathOverride: int = None
172 | showSubscribe: bool = None
173 | status: str = None
174 | subscribed: bool = None
175 | theMovieDbId: int = None
176 | title: str = None
177 | langCode: str = None
178 | languageCode: str = None
179 | requestedByAlias: str = None
180 | requestStatus: str = None
181 |
182 |
183 | # Sonarr
184 | class SonarrTVShow(NamedTuple):
185 | absoluteEpisodeNumber: int = None
186 | airDate: str = None
187 | airDateUtc: str = None
188 | episodeFile: dict = None
189 | episodeFileId: int = None
190 | episodeNumber: int = None
191 | hasFile: bool = None
192 | id: int = None
193 | lastSearchTime: str = None
194 | monitored: bool = None
195 | overview: str = None
196 | sceneAbsoluteEpisodeNumber: int = None
197 | sceneEpisodeNumber: int = None
198 | sceneSeasonNumber: int = None
199 | seasonNumber: int = None
200 | series: dict = None
201 | seriesId: int = None
202 | title: str = None
203 | unverifiedSceneNumbering: bool = None
204 |
205 |
206 | # Radarr
207 | class RadarrMovie(NamedTuple):
208 | added: str = None
209 | addOptions: str = None
210 | alternativeTitles: list = None
211 | certification: str = None
212 | cleanTitle: str = None
213 | downloaded: bool = None
214 | folderName: str = None
215 | genres: list = None
216 | hasFile: bool = None
217 | id: int = None
218 | images: list = None
219 | imdbId: str = None
220 | inCinemas: str = None
221 | isAvailable: bool = None
222 | lastInfoSync: str = None
223 | minimumAvailability: str = None
224 | monitored: bool = None
225 | movieFile: dict = None
226 | overview: str = None
227 | path: str = None
228 | pathState: str = None
229 | physicalRelease: str = None
230 | physicalReleaseNote: str = None
231 | profileId: int = None
232 | qualityProfileId: int = None
233 | ratings: dict = None
234 | runtime: int = None
235 | secondaryYear: str = None
236 | secondaryYearSourceId: int = None
237 | sizeOnDisk: int = None
238 | sortTitle: str = None
239 | status: str = None
240 | studio: str = None
241 | tags: list = None
242 | title: str = None
243 | titleSlug: str = None
244 | tmdbId: int = None
245 | website: str = None
246 | year: int = None
247 | youTubeTrailerId: str = None
248 |
249 |
250 | # Sickchill
251 | class SickChillTVShow(NamedTuple):
252 | airdate: str = None
253 | airs: str = None
254 | episode: int = None
255 | ep_name: str = None
256 | ep_plot: str = None
257 | indexerid: int = None
258 | network: str = None
259 | paused: int = None
260 | quality: str = None
261 | season: int = None
262 | show_name: str = None
263 | show_status: str = None
264 | tvdbid: int = None
265 | weekday: int = None
266 |
267 |
268 | # Tautulli
269 | class TautulliStream(NamedTuple):
270 | actors: list = None
271 | added_at: str = None
272 | allow_guest: int = None
273 | art: str = None
274 | aspect_ratio: str = None
275 | audience_rating: str = None
276 | audience_rating_image: str = None
277 | audio_bitrate: str = None
278 | audio_bitrate_mode: str = None
279 | audio_channel_layout: str = None
280 | audio_channels: str = None
281 | audio_codec: str = None
282 | audio_decision: str = None
283 | audio_language: str = None
284 | audio_language_code: str = None
285 | audio_profile: str = None
286 | audio_sample_rate: str = None
287 | bandwidth: str = None
288 | banner: str = None
289 | bif_thumb: str = None
290 | bitrate: str = None
291 | channel_icon: str = None
292 | channel_stream: int = None
293 | channel_title: str = None
294 | children_count: str = None
295 | collections: list = None
296 | container: str = None
297 | content_rating: str = None
298 | current_session: str = None
299 | date: str = None
300 | deleted_user: int = None
301 | device: str = None
302 | directors: list = None
303 | do_notify: int = None
304 | duration: str = None
305 | email: str = None
306 | extra_type: str = None
307 | file: str = None
308 | file_size: str = None
309 | friendly_name: str = None
310 | full_title: str = None
311 | genres: list = None
312 | grandparent_guid: str = None
313 | grandparent_rating_key: str = None
314 | grandparent_thumb: str = None
315 | grandparent_title: str = None
316 | group_count: int = None
317 | group_ids: str = None
318 | guid: str = None
319 | height: str = None
320 | id: str = None
321 | indexes: int = None
322 | ip_address: str = None
323 | ip_address_public: str = None
324 | is_admin: int = None
325 | is_allow_sync: int = None
326 | is_home_user: int = None
327 | is_restricted: int = None
328 | keep_history: int = None
329 | labels: list = None
330 | last_viewed_at: str = None
331 | library_name: str = None
332 | live: int = None
333 | live_uuid: str = None
334 | local: str = None
335 | location: str = None
336 | machine_id: str = None
337 | media_index: str = None
338 | media_type: str = None
339 | optimized_version: int = None
340 | optimized_version_profile: str = None
341 | optimized_version_title: str = None
342 | original_title: str = None
343 | originally_available_at: str = None
344 | parent_guid: str = None
345 | parent_media_index: str = None
346 | parent_rating_key: str = None
347 | parent_thumb: str = None
348 | parent_title: str = None
349 | paused_counter: int = None
350 | percent_complete: int = None
351 | platform: str = None
352 | platform_name: str = None
353 | platform_version: str = None
354 | player: str = None
355 | pre_tautulli: str = None
356 | product: str = None
357 | product_version: str = None
358 | profile: str = None
359 | progress_percent: str = None
360 | quality_profile: str = None
361 | rating: str = None
362 | rating_image: str = None
363 | rating_key: str = None
364 | reference_id: int = None
365 | relay: int = None
366 | relayed: int = None
367 | section_id: str = None
368 | secure: str = None
369 | selected: int = None
370 | session_id: str = None
371 | session_key: str = None
372 | shared_libraries: list = None
373 | sort_title: str = None
374 | started: int = None
375 | state: str = None
376 | stopped: int = None
377 | stream_aspect_ratio: str = None
378 | stream_audio_bitrate: str = None
379 | stream_audio_bitrate_mode: str = None
380 | stream_audio_channel_layout: str = None
381 | stream_audio_channel_layout_: str = None
382 | stream_audio_channels: str = None
383 | stream_audio_codec: str = None
384 | stream_audio_decision: str = None
385 | stream_audio_language: str = None
386 | stream_audio_language_code: str = None
387 | stream_audio_sample_rate: str = None
388 | stream_bitrate: str = None
389 | stream_container: str = None
390 | stream_container_decision: str = None
391 | stream_duration: str = None
392 | stream_subtitle_codec: str = None
393 | stream_subtitle_container: str = None
394 | stream_subtitle_decision: str = None
395 | stream_subtitle_forced: int = None
396 | stream_subtitle_format: str = None
397 | stream_subtitle_language: str = None
398 | stream_subtitle_language_code: str = None
399 | stream_subtitle_location: str = None
400 | stream_video_bit_depth: str = None
401 | stream_video_bitrate: str = None
402 | stream_video_codec: str = None
403 | stream_video_codec_level: str = None
404 | stream_video_decision: str = None
405 | stream_video_framerate: str = None
406 | stream_video_full_resolution: str = None
407 | stream_video_height: str = None
408 | stream_video_language: str = None
409 | stream_video_language_code: str = None
410 | stream_video_ref_frames: str = None
411 | stream_video_resolution: str = None
412 | stream_video_scan_type: str = None
413 | stream_video_width: str = None
414 | studio: str = None
415 | sub_type: str = None
416 | subtitle_codec: str = None
417 | subtitle_container: str = None
418 | subtitle_decision: str = None
419 | subtitle_forced: int = None
420 | subtitle_format: str = None
421 | subtitle_language: str = None
422 | subtitle_language_code: str = None
423 | subtitle_location: str = None
424 | subtitles: int = None
425 | summary: str = None
426 | synced_version: int = None
427 | synced_version_profile: str = None
428 | tagline: str = None
429 | throttled: str = None
430 | thumb: str = None
431 | title: str = None
432 | transcode_audio_channels: str = None
433 | transcode_audio_codec: str = None
434 | transcode_container: str = None
435 | transcode_decision: str = None
436 | transcode_height: str = None
437 | transcode_hw_decode: str = None
438 | transcode_hw_decode_title: str = None
439 | transcode_hw_decoding: int = None
440 | transcode_hw_encode: str = None
441 | transcode_hw_encode_title: str = None
442 | transcode_hw_encoding: int = None
443 | transcode_hw_full_pipeline: int = None
444 | transcode_hw_requested: int = None
445 | transcode_key: str = None
446 | transcode_progress: int = None
447 | transcode_protocol: str = None
448 | transcode_speed: str = None
449 | transcode_throttled: int = None
450 | transcode_video_codec: str = None
451 | transcode_width: str = None
452 | type: str = None
453 | updated_at: str = None
454 | user: str = None
455 | user_id: int = None
456 | user_rating: str = None
457 | user_thumb: str = None
458 | username: str = None
459 | video_bit_depth: str = None
460 | video_bitrate: str = None
461 | video_codec: str = None
462 | video_codec_level: str = None
463 | video_decision: str = None
464 | video_frame_rate: str = None
465 | video_framerate: str = None
466 | video_full_resolution: str = None
467 | video_height: str = None
468 | video_language: str = None
469 | video_language_code: str = None
470 | video_profile: str = None
471 | video_ref_frames: str = None
472 | video_resolution: str = None
473 | video_scan_type: str = None
474 | video_width: str = None
475 | view_offset: str = None
476 | watched_status: int = None
477 | width: str = None
478 | writers: list = None
479 | year: str = None
480 |
481 |
482 | # Lidarr
483 | class LidarrQueue(NamedTuple):
484 | artistId: int = None
485 | albumId: int = None
486 | language: dict = None
487 | quality: dict = None
488 | size: float = None
489 | title: str = None
490 | timeleft: str = None
491 | sizeleft: float = None
492 | status: str = None
493 | trackedDownloadStatus: str = None
494 | statusMessages: list = None
495 | downloadId: str = None
496 | protocol: str = None
497 | downloadClient: str = None
498 | indexer: str = None
499 | outputPath: str = None
500 | downloadForced: bool = None
501 | id: int = None
502 |
503 |
504 | class LidarrAlbum(NamedTuple):
505 | title: str = None
506 | disambiguation: str = None
507 | overview: str = None
508 | artistId: int = None
509 | foreignAlbumId: str = None
510 | monitored: bool = None
511 | anyReleaseOk: bool = None
512 | profileId: int = None
513 | duration: int = None
514 | albumType: str = None
515 | secondaryTypes: list = None
516 | mediumCount: int = None
517 | ratings: dict = None
518 | releaseDate: str = None
519 | releases: list = None
520 | genres: list = None
521 | media: list = None
522 | artist: dict = None
523 | images: list = None
524 | links: list = None
525 | statistics: dict = {}
526 | id: int = None
527 |
--------------------------------------------------------------------------------
/varken/tautulli.py:
--------------------------------------------------------------------------------
1 | from logging import getLogger
2 | from requests import Session, Request
3 | from geoip2.errors import AddressNotFoundError
4 | from datetime import datetime, timezone, date, timedelta
5 | from influxdb.exceptions import InfluxDBClientError
6 |
7 | from varken.structures import TautulliStream
8 | from varken.helpers import hashit, connection_handler, itemgetter_with_default
9 |
10 |
11 | class TautulliAPI(object):
12 | def __init__(self, server, dbmanager, geoiphandler):
13 | self.dbmanager = dbmanager
14 | self.server = server
15 | self.geoiphandler = geoiphandler
16 | self.session = Session()
17 | self.session.params = {'apikey': self.server.api_key}
18 | self.endpoint = '/api/v2'
19 | self.logger = getLogger()
20 | self.my_ip = None
21 |
22 | def __repr__(self):
23 | return f""
24 |
25 | def get_activity(self):
26 | now = datetime.now(timezone.utc).astimezone().isoformat()
27 | influx_payload = []
28 | params = {'cmd': 'get_activity'}
29 |
30 | req = self.session.prepare_request(Request('GET', self.server.url + self.endpoint, params=params))
31 | g = connection_handler(self.session, req, self.server.verify_ssl)
32 |
33 | if not g:
34 | return
35 |
36 | get = g['response']['data']
37 | fields = itemgetter_with_default(**TautulliStream._field_defaults)
38 |
39 | try:
40 | sessions = [TautulliStream(*fields(session)) for session in get['sessions']]
41 | except TypeError as e:
42 | self.logger.error('TypeError has occurred : %s while creating TautulliStream structure', e)
43 | return
44 |
45 | for session in sessions:
46 | # Check to see if ip_address_public attribute exists as it was introduced in v2
47 | try:
48 | getattr(session, 'ip_address_public')
49 | except AttributeError:
50 | self.logger.error('Public IP attribute missing!!! Do you have an old version of Tautulli (v1)?')
51 | exit(1)
52 |
53 | try:
54 | geodata = self.geoiphandler.lookup(session.ip_address_public)
55 | except (ValueError, AddressNotFoundError):
56 | self.logger.debug('Public IP missing for Tautulli session...')
57 | if not self.my_ip:
58 | # Try the fallback ip in the config file
59 | try:
60 | self.logger.debug('Attempting to use the fallback IP...')
61 | geodata = self.geoiphandler.lookup(self.server.fallback_ip)
62 | except AddressNotFoundError as e:
63 | self.logger.error('%s', e)
64 |
65 | self.my_ip = self.session.get('http://ip.42.pl/raw').text
66 | self.logger.debug('Looked the public IP and set it to %s', self.my_ip)
67 |
68 | geodata = self.geoiphandler.lookup(self.my_ip)
69 |
70 | else:
71 | geodata = self.geoiphandler.lookup(self.my_ip)
72 |
73 | if not all([geodata.location.latitude, geodata.location.longitude]):
74 | latitude = 37.234332396
75 | longitude = -115.80666344
76 | else:
77 | latitude = geodata.location.latitude
78 | longitude = geodata.location.longitude
79 |
80 | if not geodata.city.name:
81 | location = '👽'
82 | else:
83 | location = geodata.city.name
84 |
85 | decision = session.transcode_decision
86 | if decision == 'copy':
87 | decision = 'direct stream'
88 |
89 | video_decision = session.stream_video_decision
90 | if video_decision == 'copy':
91 | video_decision = 'direct stream'
92 | elif video_decision == '':
93 | video_decision = 'Music'
94 |
95 | quality = session.stream_video_resolution
96 | if not quality:
97 | quality = session.container.upper()
98 | elif quality in ('SD', 'sd', '4k'):
99 | quality = session.stream_video_resolution.upper()
100 | elif session.stream_video_full_resolution:
101 | quality = session.stream_video_full_resolution
102 | else:
103 | quality = session.stream_video_resolution + 'p'
104 |
105 | player_state = session.state.lower()
106 | if player_state == 'playing':
107 | player_state = 0
108 | elif player_state == 'paused':
109 | player_state = 1
110 | elif player_state == 'buffering':
111 | player_state = 3
112 |
113 | # Platform Version Overrides
114 | product_version = session.product_version
115 | if session.platform in ('Roku', 'osx', 'windows'):
116 | product_version = session.product_version.split('-')[0]
117 |
118 | # Platform Overrides
119 | platform_name = session.platform
120 | if platform_name in 'osx':
121 | platform_name = 'macOS'
122 | if platform_name in 'windows':
123 | platform_name = 'Windows'
124 |
125 | hash_id = hashit(f'{session.session_id}{session.session_key}{session.username}{session.full_title}')
126 | influx_payload.append(
127 | {
128 | "measurement": "Tautulli",
129 | "tags": {
130 | "type": "Session",
131 | "session_id": session.session_id,
132 | "friendly_name": session.friendly_name,
133 | "username": session.username,
134 | "title": session.full_title,
135 | "product": session.product,
136 | "platform": platform_name,
137 | "product_version": product_version,
138 | "quality": quality,
139 | "video_decision": video_decision.title(),
140 | "transcode_decision": decision.title(),
141 | "transcode_hw_decoding": session.transcode_hw_decoding,
142 | "transcode_hw_encoding": session.transcode_hw_encoding,
143 | "media_type": session.media_type.title(),
144 | "audio_codec": session.audio_codec.upper(),
145 | "audio_profile": session.audio_profile.upper(),
146 | "stream_audio_codec": session.stream_audio_codec.upper(),
147 | "quality_profile": session.quality_profile,
148 | "progress_percent": session.progress_percent,
149 | "region_code": geodata.subdivisions.most_specific.iso_code,
150 | "location": location,
151 | "full_location": f'{geodata.subdivisions.most_specific.name} - {geodata.city.name}',
152 | "latitude": latitude,
153 | "longitude": longitude,
154 | "player_state": player_state,
155 | "device_type": platform_name,
156 | "relayed": session.relayed,
157 | "secure": session.secure,
158 | "server": self.server.id
159 | },
160 | "time": now,
161 | "fields": {
162 | "hash": hash_id
163 | }
164 | }
165 | )
166 |
167 | influx_payload.append(
168 | {
169 | "measurement": "Tautulli",
170 | "tags": {
171 | "type": "current_stream_stats",
172 | "server": self.server.id
173 | },
174 | "time": now,
175 | "fields": {
176 | "stream_count": int(get['stream_count']),
177 | "total_bandwidth": int(get['total_bandwidth']),
178 | "wan_bandwidth": int(get['wan_bandwidth']),
179 | "lan_bandwidth": int(get['lan_bandwidth']),
180 | "transcode_streams": int(get['stream_count_transcode']),
181 | "direct_play_streams": int(get['stream_count_direct_play']),
182 | "direct_streams": int(get['stream_count_direct_stream'])
183 | }
184 | }
185 | )
186 |
187 | self.dbmanager.write_points(influx_payload)
188 |
189 | def get_stats(self):
190 | now = datetime.now(timezone.utc).astimezone().isoformat()
191 | influx_payload = []
192 | params = {'cmd': 'get_libraries'}
193 |
194 | req = self.session.prepare_request(Request('GET', self.server.url + self.endpoint, params=params))
195 | g = connection_handler(self.session, req, self.server.verify_ssl)
196 |
197 | if not g:
198 | return
199 |
200 | get = g['response']['data']
201 |
202 | for library in get:
203 | data = {
204 | "measurement": "Tautulli",
205 | "tags": {
206 | "type": "library_stats",
207 | "server": self.server.id,
208 | "section_name": library['section_name'],
209 | "section_type": library['section_type']
210 | },
211 | "time": now,
212 | "fields": {
213 | "total": int(library['count'])
214 | }
215 | }
216 | if library['section_type'] == 'show':
217 | data['fields']['seasons'] = int(library['parent_count'])
218 | data['fields']['episodes'] = int(library['child_count'])
219 |
220 | elif library['section_type'] == 'artist':
221 | data['fields']['artists'] = int(library['count'])
222 | data['fields']['albums'] = int(library['parent_count'])
223 | data['fields']['tracks'] = int(library['child_count'])
224 | influx_payload.append(data)
225 |
226 | self.dbmanager.write_points(influx_payload)
227 |
228 | def get_historical(self, days=30):
229 | influx_payload = []
230 | start_date = date.today() - timedelta(days=days)
231 | params = {'cmd': 'get_history', 'grouping': 1, 'length': 1000000}
232 | req = self.session.prepare_request(Request('GET', self.server.url + self.endpoint, params=params))
233 | g = connection_handler(self.session, req, self.server.verify_ssl)
234 |
235 | if not g:
236 | return
237 |
238 | get = g['response']['data']['data']
239 |
240 | params = {'cmd': 'get_stream_data', 'row_id': 0}
241 | sessions = []
242 | for history_item in get:
243 | if not history_item['id']:
244 | self.logger.debug('Skipping entry with no ID. (%s)', history_item['full_title'])
245 | continue
246 | if date.fromtimestamp(history_item['started']) < start_date:
247 | continue
248 | params['row_id'] = history_item['id']
249 | req = self.session.prepare_request(Request('GET', self.server.url + self.endpoint, params=params))
250 | g = connection_handler(self.session, req, self.server.verify_ssl)
251 | if not g:
252 | self.logger.debug('Could not get historical stream data for %s. Skipping.', history_item['full_title'])
253 | try:
254 | self.logger.debug('Adding %s to history', history_item['full_title'])
255 | history_item.update(g['response']['data'])
256 | sessions.append(TautulliStream(**history_item))
257 | except TypeError as e:
258 | self.logger.error('TypeError has occurred : %s while creating TautulliStream structure', e)
259 | continue
260 |
261 | for session in sessions:
262 | try:
263 | geodata = self.geoiphandler.lookup(session.ip_address)
264 | except (ValueError, AddressNotFoundError):
265 | self.logger.debug('Public IP missing for Tautulli session...')
266 | if not self.my_ip:
267 | # Try the fallback ip in the config file
268 | try:
269 | self.logger.debug('Attempting to use the fallback IP...')
270 | geodata = self.geoiphandler.lookup(self.server.fallback_ip)
271 | except AddressNotFoundError as e:
272 | self.logger.error('%s', e)
273 |
274 | self.my_ip = self.session.get('http://ip.42.pl/raw').text
275 | self.logger.debug('Looked the public IP and set it to %s', self.my_ip)
276 |
277 | geodata = self.geoiphandler.lookup(self.my_ip)
278 |
279 | else:
280 | geodata = self.geoiphandler.lookup(self.my_ip)
281 |
282 | if not all([geodata.location.latitude, geodata.location.longitude]):
283 | latitude = 37.234332396
284 | longitude = -115.80666344
285 | else:
286 | latitude = geodata.location.latitude
287 | longitude = geodata.location.longitude
288 |
289 | if not geodata.city.name:
290 | location = '👽'
291 | else:
292 | location = geodata.city.name
293 |
294 | decision = session.transcode_decision
295 | if decision == 'copy':
296 | decision = 'direct stream'
297 |
298 | video_decision = session.stream_video_decision
299 | if video_decision == 'copy':
300 | video_decision = 'direct stream'
301 | elif video_decision == '':
302 | video_decision = 'Music'
303 |
304 | quality = session.stream_video_resolution
305 | if not quality:
306 | quality = session.container.upper()
307 | elif quality in ('SD', 'sd', '4k'):
308 | quality = session.stream_video_resolution.upper()
309 | elif session.stream_video_full_resolution:
310 | quality = session.stream_video_full_resolution
311 | else:
312 | quality = session.stream_video_resolution + 'p'
313 |
314 | # Platform Overrides
315 | platform_name = session.platform
316 | if platform_name in 'osx':
317 | platform_name = 'Plex Mac OS'
318 | if platform_name in 'windows':
319 | platform_name = 'Plex Windows'
320 |
321 | player_state = 100
322 |
323 | hash_id = hashit(f'{session.id}{session.session_key}{session.user}{session.full_title}')
324 | influx_payload.append(
325 | {
326 | "measurement": "Tautulli",
327 | "tags": {
328 | "type": "Session",
329 | "session_id": session.session_id,
330 | "friendly_name": session.friendly_name,
331 | "username": session.user,
332 | "title": session.full_title,
333 | "product": session.product,
334 | "platform": platform_name,
335 | "quality": quality,
336 | "video_decision": video_decision.title(),
337 | "transcode_decision": decision.title(),
338 | "transcode_hw_decoding": session.transcode_hw_decoding,
339 | "transcode_hw_encoding": session.transcode_hw_encoding,
340 | "media_type": session.media_type.title(),
341 | "audio_codec": session.audio_codec.upper(),
342 | "stream_audio_codec": session.stream_audio_codec.upper(),
343 | "quality_profile": session.quality_profile,
344 | "progress_percent": session.progress_percent,
345 | "region_code": geodata.subdivisions.most_specific.iso_code,
346 | "location": location,
347 | "full_location": f'{geodata.subdivisions.most_specific.name} - {geodata.city.name}',
348 | "latitude": latitude,
349 | "longitude": longitude,
350 | "player_state": player_state,
351 | "device_type": platform_name,
352 | "relayed": session.relayed,
353 | "secure": session.secure,
354 | "server": self.server.id
355 | },
356 | "time": datetime.fromtimestamp(session.stopped).astimezone().isoformat(),
357 | "fields": {
358 | "hash": hash_id
359 | }
360 | }
361 | )
362 | try:
363 | self.dbmanager.write_points(influx_payload)
364 | except InfluxDBClientError as e:
365 | if "beyond retention policy" in str(e):
366 | self.logger.debug('Only imported 30 days of data per retention policy')
367 | else:
368 | self.logger.error('Something went wrong... post this output in discord: %s', e)
369 |
--------------------------------------------------------------------------------
/varken/unifi.py:
--------------------------------------------------------------------------------
1 | from logging import getLogger
2 | from requests import Session, Request
3 | from datetime import datetime, timezone
4 |
5 | from varken.helpers import connection_handler
6 |
7 |
8 | class UniFiAPI(object):
9 | def __init__(self, server, dbmanager):
10 | self.dbmanager = dbmanager
11 | self.server = server
12 | self.site = self.server.site
13 | # Create session to reduce server web thread load, and globally define pageSize for all requests
14 | self.session = Session()
15 | self.logger = getLogger()
16 | self.get_retry = True
17 | self.get_cookie()
18 | self.get_site()
19 |
20 | def __repr__(self):
21 | return f""
22 |
23 | def get_cookie(self):
24 | endpoint = '/api/login'
25 | pre_cookies = {'username': self.server.username, 'password': self.server.password, 'remember': True}
26 | req = self.session.prepare_request(Request('POST', self.server.url + endpoint, json=pre_cookies))
27 | post = connection_handler(self.session, req, self.server.verify_ssl, as_is_reply=True)
28 |
29 | if not post or not post.cookies.get('unifises'):
30 | self.logger.error("Could not retrieve session cookie from UniFi Controller")
31 | return
32 |
33 | cookies = {'unifises': post.cookies.get('unifises')}
34 | self.session.cookies.update(cookies)
35 |
36 | def get_site(self):
37 | endpoint = '/api/self/sites'
38 | req = self.session.prepare_request(Request('GET', self.server.url + endpoint))
39 | get = connection_handler(self.session, req, self.server.verify_ssl)
40 |
41 | if not get:
42 | self.logger.error("Could not get list of sites from UniFi Controller")
43 | return
44 | site = [site['name'] for site in get['data'] if site['name'].lower() == self.server.site.lower()
45 | or site['desc'].lower() == self.server.site.lower()]
46 | if site:
47 | self.site = site[0]
48 | else:
49 | self.logger.error(f"Could not map site {self.server.site} to a site id/alias")
50 |
51 | def get_usg_stats(self):
52 | now = datetime.now(timezone.utc).astimezone().isoformat()
53 | endpoint = f'/api/s/{self.site}/stat/device'
54 | req = self.session.prepare_request(Request('GET', self.server.url + endpoint))
55 | get = connection_handler(self.session, req, self.server.verify_ssl)
56 |
57 | if not get:
58 | if self.get_retry:
59 | self.get_retry = False
60 | self.logger.error("Attempting to reauthenticate for unifi-%s", self.server.id)
61 | self.get_cookie()
62 | self.get_usg_stats()
63 | else:
64 | self.get_retry = True
65 | self.logger.error("Disregarding Job get_usg_stats for unifi-%s", self.server.id)
66 | return
67 |
68 | if not self.get_retry:
69 | self.get_retry = True
70 |
71 | devices = {device['name']: device for device in get['data'] if device.get('name')}
72 |
73 | if devices.get(self.server.usg_name):
74 | device = devices[self.server.usg_name]
75 | else:
76 | self.logger.error("Could not find a USG named %s from your UniFi Controller", self.server.usg_name)
77 | return
78 |
79 | try:
80 | influx_payload = [
81 | {
82 | "measurement": "UniFi",
83 | "tags": {
84 | "model": device['model'],
85 | "name": device['name']
86 | },
87 | "time": now,
88 | "fields": {
89 | "bytes_current": device['wan1']['bytes-r'],
90 | "rx_bytes_total": device['wan1']['rx_bytes'],
91 | "rx_bytes_current": device['wan1']['rx_bytes-r'],
92 | "tx_bytes_total": device['wan1']['tx_bytes'],
93 | "tx_bytes_current": device['wan1']['tx_bytes-r'],
94 | "cpu_loadavg_1": float(device['sys_stats']['loadavg_1']),
95 | "cpu_loadavg_5": float(device['sys_stats']['loadavg_5']),
96 | "cpu_loadavg_15": float(device['sys_stats']['loadavg_15']),
97 | "cpu_util": float(device['system-stats']['cpu']),
98 | "mem_util": float(device['system-stats']['mem']),
99 | }
100 | }
101 | ]
102 | self.dbmanager.write_points(influx_payload)
103 | except KeyError as e:
104 | self.logger.error('Error building payload for unifi. Discarding. Error: %s', e)
105 |
--------------------------------------------------------------------------------
/varken/varkenlogger.py:
--------------------------------------------------------------------------------
1 | from logging.handlers import RotatingFileHandler
2 | from logging import Filter, DEBUG, INFO, getLogger, Formatter, StreamHandler
3 |
4 | from varken.helpers import mkdir_p
5 |
6 |
7 | class BlacklistFilter(Filter):
8 | """
9 | Log filter for blacklisted tokens and passwords
10 | """
11 | filename = "varken.log"
12 | max_size = 5000000 # 5 MB
13 | max_files = 5
14 | log_folder = 'logs'
15 |
16 | blacklisted_strings = ['apikey', 'username', 'password', 'url']
17 |
18 | def __init__(self, filteredstrings):
19 | super().__init__()
20 | self.filtered_strings = filteredstrings
21 |
22 | def filter(self, record):
23 | for item in self.filtered_strings:
24 | try:
25 | if item in record.msg:
26 | record.msg = record.msg.replace(item, 8 * '*' + item[-5:])
27 | if any(item in str(arg) for arg in record.args):
28 | record.args = tuple(arg.replace(item, 8 * '*' + item[-5:]) if isinstance(arg, str) else arg
29 | for arg in record.args)
30 | except TypeError:
31 | pass
32 | return True
33 |
34 |
35 | class VarkenLogger(object):
36 | def __init__(self, debug=None, data_folder=None):
37 | self.data_folder = data_folder
38 | self.log_level = debug
39 |
40 | # Set log level
41 | if self.log_level:
42 | self.log_level = DEBUG
43 |
44 | else:
45 | self.log_level = INFO
46 |
47 | # Make the log directory if it does not exist
48 | mkdir_p(f'{self.data_folder}/{BlacklistFilter.log_folder}')
49 |
50 | # Create the Logger
51 | self.logger = getLogger()
52 | self.logger.setLevel(DEBUG)
53 |
54 | # Create a Formatter for formatting the log messages
55 | logger_formatter = Formatter('%(asctime)s : %(levelname)s : %(module)s : %(message)s', '%Y-%m-%d %H:%M:%S')
56 |
57 | # Create the Handler for logging data to a file
58 | file_logger = RotatingFileHandler(f'{self.data_folder}/{BlacklistFilter.log_folder}/{BlacklistFilter.filename}',
59 | mode='a', maxBytes=BlacklistFilter.max_size, encoding=None, delay=0,
60 | backupCount=BlacklistFilter.max_files)
61 |
62 | file_logger.setLevel(self.log_level)
63 |
64 | # Add the Formatter to the Handler
65 | file_logger.setFormatter(logger_formatter)
66 |
67 | # Add the console logger
68 | console_logger = StreamHandler()
69 | console_logger.setFormatter(logger_formatter)
70 | console_logger.setLevel(self.log_level)
71 |
72 | # Add the Handler to the Logger
73 | self.logger.addHandler(file_logger)
74 | self.logger.addHandler(console_logger)
75 |
--------------------------------------------------------------------------------