├── .codeclimate.yml
├── .dockerignore
├── .gitattributes
├── .github
├── FUNDING.yml
├── dependabot.yml
└── workflows
│ ├── docker-build.yaml
│ ├── linters.yaml
│ └── tests.yaml
├── .gitignore
├── .gitlab-ci.yml
├── .gitlint
├── .pre-commit-config.yaml
├── .python-version
├── Dockerfile
├── LICENSE
├── Procfile
├── README.md
├── bin
└── entrypoint.sh
├── frontend
├── index.html
├── public
│ └── favicon.png
└── src
│ ├── App.vue
│ ├── api
│ └── index.js
│ ├── components
│ ├── AddFeedFromUpload.vue
│ ├── AddFeedProcessedUrl.vue
│ ├── AddFeeds.vue
│ ├── ApplePodcastsSearch.vue
│ ├── ApplePodcastsSearchItem.vue
│ ├── ApplePodcastsToplist.vue
│ ├── Auth.vue
│ ├── Backdrop.vue
│ ├── EpisodeDetail.vue
│ ├── EpisodeDetailModal.vue
│ ├── EpisodeList.vue
│ ├── EpisodeListItem.vue
│ ├── Home.vue
│ ├── InfiniteNoMore.vue
│ ├── Login.vue
│ ├── PodcastDetail.vue
│ ├── PodcastListItem.vue
│ ├── PrivacySafeImage.vue
│ ├── ResetPassword.vue
│ └── Spinner.vue
│ ├── filters.js
│ ├── images
│ ├── icon.png
│ ├── icon@2x.png
│ ├── icon@3x.png
│ ├── icon_icon.svg
│ ├── icon_icon_margin.svg
│ ├── icon_margin.png
│ ├── icon_margin@2x.png
│ ├── icon_margin@3x.png
│ └── michael-mroczek-195362-unsplash.jpg
│ ├── main.js
│ ├── mixins
│ └── index.js
│ ├── router
│ └── index.js
│ └── scss
│ ├── _borders-responsive.scss
│ ├── _dark-mode.scss
│ ├── _fonts.scss
│ └── main.scss
├── hack
├── assets
│ ├── if_Cassette_669942.svg
│ └── michael-mroczek-195362-unsplash.jpg
├── attachments
│ ├── ApplePodcastsSpecUpdatesiOS11.pdf
│ ├── overcast20171205.opml
│ └── screenshots
│ │ ├── login-animated.gif
│ │ ├── podcast-detail.png
│ │ ├── podcasts-list.png
│ │ ├── podcasts-new.png
│ │ └── welcome.png
└── docker
│ ├── docker-compose-separate-worker.yml
│ └── docker-compose.yml
├── listeners
├── __init__.py
├── admin.py
├── migrations
│ ├── 0001_initial.py
│ └── __init__.py
├── models.py
└── serializers.py
├── manage.py
├── package-lock.json
├── package.json
├── podcasts
├── __init__.py
├── admin.py
├── api
│ ├── __init__.py
│ ├── serializers.py
│ └── views.py
├── apps.py
├── conf.py
├── enums.py
├── management
│ ├── __init__.py
│ └── commands
│ │ ├── __init__.py
│ │ ├── dbconnection.py
│ │ ├── initadmin.py
│ │ └── refresh_feeds.py
├── migrations
│ ├── 0001_initial.py
│ ├── 0002_episode_image.py
│ ├── 0003_auto_20190901_1334.py
│ ├── 0004_auto_20190902_1938.py
│ ├── 0005_auto_20190902_1957.py
│ ├── 0006_auto_20190902_2002.py
│ └── __init__.py
├── models
│ ├── __init__.py
│ ├── common.py
│ ├── episode.py
│ ├── episode_chapter.py
│ └── podcast.py
├── old_api.py
├── serializers.py
├── tasks.py
├── tests
│ ├── __init__.py
│ ├── cassettes
│ │ ├── test_episode_model.yaml
│ │ ├── test_invalid_feed[0-None-Not Found].yaml
│ │ ├── test_invalid_feed[1-None-Feed is malformatted].yaml
│ │ ├── test_long_subtitle_feed.yaml
│ │ ├── test_paged_feed.yaml
│ │ ├── test_podcast_model.yaml
│ │ ├── test_podcast_with_paged_feed.yaml
│ │ └── test_valid_feed.yaml
│ ├── fixtures
│ │ ├── invalid.xml
│ │ ├── paged_p1.xml
│ │ ├── paged_p2.xml
│ │ ├── subtitle_too_long.xml
│ │ └── valid.xml
│ ├── test_models.py
│ └── test_utils.py
├── utils
│ ├── __init__.py
│ ├── filters.py
│ ├── parsers
│ │ ├── __init__.py
│ │ └── feed_content.py
│ ├── properties.py
│ ├── sanitizers.py
│ └── serializers.py
├── validators.py
└── views.py
├── poetry.lock
├── pyproject.toml
├── requirements.txt
├── setup.cfg
├── tapedrive
├── __init__.py
├── settings.py
├── urls.py
└── wsgi.py
└── vite.config.js
/.codeclimate.yml:
--------------------------------------------------------------------------------
1 | version: "2"
2 | checks:
3 | file-lines:
4 | config:
5 | threshold: 250
6 | method-complexity:
7 | config:
8 | threshold: 18
9 | method-count:
10 | config:
11 | threshold: 20
12 | method-lines:
13 | config:
14 | threshold: 40
15 | exclude_patterns:
16 | - "**/migrations/"
17 | - "**/tests/"
18 | - "**/templates/"
19 |
--------------------------------------------------------------------------------
/.dockerignore:
--------------------------------------------------------------------------------
1 | __research_material
2 | hack
3 | .cache
4 | .coverage
5 | .env
6 | .git
7 | .pytest_cache
8 | .python-version
9 | .tx
10 | *.mo
11 | *.pyc
12 | *.sqlite3
13 | **/__pycache__
14 | **/.DS_Store
15 | **/tests
16 | assets
17 | htmlcov
18 | media
19 | mediafiles
20 | secret.txt
21 | staticfiles
22 |
23 | # Ignore files already generated during Docker build
24 | **/node_modules
25 | **/dist
26 |
27 | # Ignore all files from iPython Notebooks (used for PoCs)
28 | .ipynb_checkpoints
29 | .ipynotebooks
30 | *ipynb
31 |
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | *.pdf filter=lfs diff=lfs merge=lfs -text
2 | *.png filter=lfs diff=lfs merge=lfs -text
3 | *.jpg filter=lfs diff=lfs merge=lfs -text
4 |
--------------------------------------------------------------------------------
/.github/FUNDING.yml:
--------------------------------------------------------------------------------
1 | liberapay: janw
2 | ko_fi: janwxyz
3 |
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 | updates:
3 | - package-ecosystem: pip
4 | directory: /
5 | schedule:
6 | interval: weekly
7 | labels:
8 | - dependencies
9 | commit-message:
10 | prefix: "build(deps)"
11 |
--------------------------------------------------------------------------------
/.github/workflows/docker-build.yaml:
--------------------------------------------------------------------------------
1 | name: Docker Build
2 |
3 | on:
4 | pull_request:
5 | push:
6 | branches:
7 | - "main"
8 | tags:
9 | - "*"
10 |
11 | jobs:
12 | docker-build:
13 | runs-on: ubuntu-latest
14 | steps:
15 | - name: Checkout
16 | uses: actions/checkout@v3
17 |
18 | - name: Get python version from file
19 | run: echo "PYTHON_VERSION=$(cat .python-version)" | tee -a "$GITHUB_ENV"
20 |
21 | - name: Docker meta
22 | id: meta
23 | uses: docker/metadata-action@v5
24 | with:
25 | images: "ghcr.io/${{ github.repository }}"
26 | tags: |
27 | type=ref,event=pr
28 | type=semver,pattern=v{{major}}
29 | type=semver,pattern=v{{major}}.{{minor}}
30 | type=semver,pattern=v{{version}}
31 | type=raw,value=edge,enable=${{ github.ref == format('refs/heads/{0}', 'main') }}
32 |
33 | - name: Set up Docker Buildx
34 | uses: docker/setup-buildx-action@v3
35 |
36 | - name: Login to Github Container Registry
37 | if: github.event_name != 'pull_request'
38 | uses: docker/login-action@v2
39 | with:
40 | registry: ghcr.io
41 | username: ${{ github.actor }}
42 | password: ${{ secrets.GITHUB_TOKEN }}
43 |
44 | - name: Build and push
45 | id: docker_build
46 | uses: docker/build-push-action@v5
47 | with:
48 | context: .
49 | push: ${{ github.event_name != 'pull_request' }}
50 | tags: ${{ steps.meta.outputs.tags }}
51 | labels: ${{ steps.meta.outputs.labels }}
52 | platforms: linux/amd64 # TODO: add `linux/arm64/v8`
53 | build-args: |
54 | PYTHON_VERSION=${{ env.PYTHON_VERSION }}
55 |
56 | - name: Image digest
57 | run: echo ${{ steps.docker_build.outputs.digest }}
58 |
--------------------------------------------------------------------------------
/.github/workflows/linters.yaml:
--------------------------------------------------------------------------------
1 | name: Linters
2 |
3 | on:
4 | pull_request:
5 |
6 | jobs:
7 | commitizen:
8 | runs-on: ubuntu-latest
9 | steps:
10 | - name: Check out
11 | uses: actions/checkout@v3
12 | with:
13 | fetch-depth: 0
14 | sparse-checkout: |
15 | pyproject.toml
16 | sparse-checkout-cone-mode: false
17 |
18 | - name: Install commitizen
19 | run: pipx install commitizen
20 |
21 | - run: cz check --rev-range origin/main..HEAD
22 | shell: bash
23 |
24 | # pre-commit-extras:
25 | # runs-on: ubuntu-latest
26 | # steps:
27 | # - name: Check out
28 | # uses: actions/checkout@v3
29 |
30 | # - name: Get python version from file
31 | # run: echo "PYTHON_VERSION=$(cat .python-version)" | tee -a "$GITHUB_ENV"
32 |
33 | # - name: Install commitizen
34 | # run: pipx install poetry
35 |
36 | # - name: Set up python
37 | # uses: actions/setup-python@v4
38 | # with:
39 | # python-version: ${{ env.PYTHON_VERSION }}
40 | # cache: 'poetry'
41 |
42 | # - run: poetry install --with=dev
43 |
44 | # - uses: pre-commit/action@v3.0.0
45 | # with:
46 | # extra_args: >
47 | # rich-codex
48 | # --all-files
49 |
--------------------------------------------------------------------------------
/.github/workflows/tests.yaml:
--------------------------------------------------------------------------------
1 | name: Tests
2 |
3 | on:
4 | pull_request:
5 | push:
6 | branches:
7 | - "main"
8 | tags:
9 | - "*"
10 |
11 | jobs:
12 | pytest:
13 | runs-on: ubuntu-latest
14 | steps:
15 | - name: Check out
16 | uses: actions/checkout@v3
17 |
18 | - name: Get python version from file
19 | run: echo "PYTHON_VERSION=$(cat .python-version)" | tee -a "$GITHUB_ENV"
20 |
21 | - name: Install poetry
22 | run: pipx install poetry
23 |
24 | - name: Set up python environment
25 | uses: actions/setup-python@v4
26 | with:
27 | python-version: ${{ env.PYTHON_VERSION }}
28 | cache: 'poetry'
29 |
30 | - run: poetry install --no-root --with=tests --sync
31 |
32 | - run: poetry run pytest --cov --cov-report=xml --cov-report=term
33 |
34 | - name: Upload coverage reports to Codeclimate
35 | uses: paambaati/codeclimate-action@v5
36 | env:
37 | CC_TEST_REPORTER_ID: bfeb9df569119f2dfbf094be7ebff7f1c40ab30660d1cb949fc43c3c68618ee6
38 | with:
39 | coverageLocations: |
40 | coverage.xml:coverage.py
41 | if: always()
42 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Created by https://www.gitignore.io
2 |
3 | ### OSX ###
4 | .DS_Store
5 | .AppleDouble
6 | .LSOverride
7 |
8 | # Icon must end with two \r
9 | Icon
10 |
11 |
12 | # Thumbnails
13 | ._*
14 |
15 | # Files that might appear on external disk
16 | .Spotlight-V100
17 | .Trashes
18 |
19 | # Directories potentially created on remote AFP share
20 | .AppleDB
21 | .AppleDesktop
22 | Network Trash Folder
23 | Temporary Items
24 | .apdisk
25 |
26 |
27 | ### Python ###
28 | # Byte-compiled / optimized / DLL files
29 | .pytest_cache/
30 | __pycache__/
31 | *.py[cod]
32 | *.ipynb
33 | # C extensions
34 | *.so
35 |
36 | # Distribution / packaging
37 | .Python
38 | env/
39 | build/
40 | develop-eggs/
41 | dist/
42 | downloads/
43 | eggs/
44 | lib/
45 | lib64/
46 | parts/
47 | sdist/
48 | var/
49 | *.egg-info/
50 | .installed.cfg
51 | *.egg
52 |
53 | # PyInstaller
54 | # Usually these files are written by a python script from a template
55 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
56 | *.manifest
57 | *.spec
58 |
59 | # Pyenv
60 | .python-version
61 |
62 | # Installer logs
63 | pip-log.txt
64 | pip-delete-this-directory.txt
65 |
66 | # Unit test / coverage reports
67 | htmlcov/
68 | .tox/
69 | .coverage
70 | .cache
71 | nosetests.xml
72 | coverage.xml
73 |
74 | # Translations
75 | *.mo
76 | *.pot
77 |
78 | # Sphinx documentation
79 | docs/_build/
80 |
81 | # PyBuilder
82 | target/
83 |
84 |
85 | ### Django ###
86 | *.log
87 | *.pot
88 | *.pyc
89 | __pycache__/
90 | local_settings.py
91 | *secret.txt
92 |
93 | .env
94 | *.sqlite3
95 |
96 | # compiled assets
97 | assets/dist/
98 | .sass-cache/
99 |
100 | # downloaded test feeds
101 | *.xml
102 |
103 | # Files uploaded / saved from podcasts
104 | media/
105 | mediafiles/
106 | static/
107 | staticfiles/
108 | $HOME/
109 |
110 | ### Sublime Text ###
111 | *.sublime-*
112 |
113 |
114 |
115 | # Created by https://www.gitignore.io/api/node
116 |
117 | ### Node ###
118 | # Logs
119 | logs
120 | *.log
121 | npm-debug.log*
122 | yarn-debug.log*
123 | yarn-error.log*
124 |
125 | # Runtime data
126 | pids
127 | *.pid
128 | *.seed
129 | *.pid.lock
130 |
131 | # Directory for instrumented libs generated by jscoverage/JSCover
132 | lib-cov
133 |
134 | # Coverage directory used by tools like istanbul
135 | coverage
136 |
137 | # nyc test coverage
138 | .nyc_output
139 |
140 | # Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files)
141 | .grunt
142 |
143 | # Bower dependency directory (https://bower.io/)
144 | bower_components
145 |
146 | # node-waf configuration
147 | .lock-wscript
148 |
149 | # Compiled binary addons (http://nodejs.org/api/addons.html)
150 | build/Release
151 |
152 | # Dependency directories
153 | node_modules/
154 | jspm_packages/
155 |
156 | # Typescript v1 declaration files
157 | typings/
158 |
159 | # Optional npm cache directory
160 | .npm
161 |
162 | # Optional eslint cache
163 | .eslintcache
164 |
165 | # Optional REPL history
166 | .node_repl_history
167 |
168 | # Output of 'npm pack'
169 | *.tgz
170 |
171 | # Yarn Integrity file
172 | .yarn-integrity
173 |
174 | # End of https://www.gitignore.io/api/node
175 |
176 | webpack-stats.json
177 | __old_templates/
178 |
--------------------------------------------------------------------------------
/.gitlab-ci.yml:
--------------------------------------------------------------------------------
1 | stages:
2 | - test
3 | - build
4 | - tag
5 |
6 | .python:
7 | image: registry.gitlab.com/janw/python-poetry:3.7
8 | stage: test
9 | cache:
10 | key: pip-${CI_JOB_NAME}
11 | paths:
12 | - .pytest_cache
13 | - "$CI_PROJECT_DIR/.cache/pip"
14 | - "$CI_PROJECT_DIR/.cache/pre-commit"
15 | variables:
16 | PIP_CACHE_DIR: "$CI_PROJECT_DIR/.cache/pip"
17 | PRE_COMMIT_HOME: "$CI_PROJECT_DIR/.cache/pre-commit"
18 | before_script:
19 | - poetry debug
20 | - poetry config virtualenvs.create false
21 | - poetry install --no-interaction
22 |
23 | pytest:
24 | extends: .python
25 | services:
26 | - postgres:latest
27 | variables:
28 | POSTGRES_DB: tapedrive_testing
29 | POSTGRES_USER: gitlabci
30 | POSTGRES_PASSWORD: tapedrive
31 | DATABASE_URL: "postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@postgres/$POSTGRES_DB"
32 |
33 | # Required for coverage to pick up correct branch name
34 | GIT_BRANCH: "${CI_COMMIT_REF_NAME}"
35 | script:
36 | - wget -O ./cc-test-reporter https://codeclimate.com/downloads/test-reporter/test-reporter-latest-linux-amd64
37 | - chmod +x cc-test-reporter
38 | - ./cc-test-reporter before-build
39 | - pytest --junitxml=pytest.xml --vcr-record=none --cov --cov-report term --cov-report xml
40 | - ./cc-test-reporter after-build
41 | artifacts:
42 | reports:
43 | junit: pytest.xml
44 | coverage: '/^TOTAL\s+\d+\s+\d+\s+([\d\.]+\%)$/'
45 |
46 | pre-commit:
47 | extends: .python
48 | image: python:3.7
49 | before_script:
50 | - pip install -U pre-commit
51 | script:
52 | - pre-commit run --all-files
53 |
54 | .docker:
55 | image: docker:stable
56 | stage: build
57 | services:
58 | - docker:dind
59 | variables:
60 | DOCKER_TLS_CERTDIR: ""
61 | DOCKER_DRIVER: overlay2
62 | before_script:
63 | - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
64 |
65 | build:
66 | extends: .docker
67 | script:
68 | - docker pull $CI_REGISTRY_IMAGE:latest || true
69 | - docker build
70 | --cache-from $CI_REGISTRY_IMAGE:latest
71 | --tag $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG .
72 | - if [[ "$CI_COMMIT_REF_SLUG" == master ]]; then docker tag $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG $CI_REGISTRY_IMAGE:master-$CI_COMMIT_SHORT_SHA; fi
73 | - docker push $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG
74 |
75 | tag latest:
76 | extends: .docker
77 | stage: tag
78 | script:
79 | - docker pull $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG
80 | - docker tag $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG $CI_REGISTRY_IMAGE:latest
81 | - docker push $CI_REGISTRY_IMAGE:latest
82 | only:
83 | - master
84 |
--------------------------------------------------------------------------------
/.gitlint:
--------------------------------------------------------------------------------
1 | [general]
2 | ignore = body-is-missing
3 | contrib = contrib-title-conventional-commits
4 | verbosity = 2
5 |
6 | [title-max-length]
7 | line-length = 88
8 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | ci:
2 | autofix_prs: false
3 | autoupdate_schedule: quarterly
4 | autoupdate_commit_msg: 'build(deps): [pre-commit.ci] pre-commit autoupdate'
5 |
6 | exclude: >
7 | (?x)^(
8 | .+\.svg|
9 | .+/migrations/.+|
10 | .+/fixtures/.+|
11 | .+/cassettes/.+
12 | )$
13 |
14 | repos:
15 | - repo: https://github.com/astral-sh/ruff-pre-commit
16 | rev: 'v0.1.14'
17 | hooks:
18 | - id: ruff
19 | args: [ --fix, --exit-non-zero-on-fix ]
20 | - id: ruff-format
21 |
22 | - repo: https://github.com/pre-commit/pre-commit-hooks
23 | rev: 'v4.5.0'
24 | hooks:
25 | - id: check-yaml
26 | - id: check-builtin-literals
27 | - id: check-executables-have-shebangs
28 | - id: check-shebang-scripts-are-executable
29 |
30 | - repo: https://github.com/python-poetry/poetry
31 | rev: '1.7.1'
32 | hooks:
33 | - id: poetry-check
34 | - id: poetry-export
35 |
--------------------------------------------------------------------------------
/.python-version:
--------------------------------------------------------------------------------
1 | 3.12
2 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | # syntax=docker/dockerfile:1
2 | ARG PYTHON_VERSION
3 |
4 | FROM node:16 as frontend
5 |
6 | WORKDIR /frontend
7 | COPY package-lock.json package.json ./
8 | RUN npm install
9 |
10 | COPY vite.config.js ./
11 | COPY frontend ./frontend
12 | RUN npm run build
13 |
14 | FROM python:${PYTHON_VERSION}-alpine
15 | ENV PIP_NO_CACHE_DIR off
16 | ENV PYTHONUNBUFFERED 1
17 |
18 | WORKDIR /app
19 | COPY requirements.txt ./
20 |
21 | # hadolint ignore=DL3018
22 | RUN \
23 | set -ex; \
24 | apk add --no-cache tini postgresql-libs jpeg-dev && \
25 | apk add --no-cache --virtual build-dependencies curl postgresql-dev libstdc++ zlib-dev build-base && \
26 | pip install --no-cache-dir -r requirements.txt && \
27 | apk del build-dependencies && \
28 | find /usr/local -depth -type f -a \( -name '*.pyc' -o -name '*.pyo' \) -exec rm -rf '{}' +;
29 |
30 |
31 | # User-accessible environment
32 | ENV ENVIRONMENT=PRODUCTION
33 | ENV DJANGO_ALLOWED_HOSTS=127.0.0.1
34 |
35 | COPY Procfile ./
36 | COPY manage.py ./
37 | COPY bin ./bin
38 | COPY --from=frontend /frontend/frontend/dist ./frontend/dist
39 | COPY tapedrive ./tapedrive
40 | COPY listeners ./listeners
41 | COPY podcasts ./podcasts
42 |
43 | RUN python manage.py collectstatic --no-input
44 |
45 | EXPOSE 8273
46 | VOLUME /app /data
47 | ENTRYPOINT [ "tini", "--", "./bin/entrypoint.sh" ]
48 | CMD ["honcho", "start"]
49 |
--------------------------------------------------------------------------------
/Procfile:
--------------------------------------------------------------------------------
1 | web: gunicorn -b "0.0.0.0:8273" -w 3 tapedrive.wsgi
2 | worker: python manage.py process_tasks
3 |
--------------------------------------------------------------------------------
/bin/entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | set -ex
3 |
4 | # Check for database and migrate it
5 | python manage.py dbconnection
6 | python manage.py migrate
7 |
8 | # Initialize admin account
9 | python manage.py initadmin
10 |
11 | echo "Starting main process."
12 | exec $@
13 |
--------------------------------------------------------------------------------
/frontend/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 | ",
8 | "license": "Apache-2.0",
9 | "type": "module",
10 | "scripts": {
11 | "serve": "vite serve",
12 | "build": "vite build"
13 | },
14 | "dependencies": {
15 | "axios": "^1",
16 | "axios-auth-refresh": "^3",
17 | "bootstrap-vue": "^2.23",
18 | "dayjs": "^1.8.28",
19 | "typeface-fira-sans-condensed": "^0.0.72",
20 | "vue": "^2.7",
21 | "vue-infinite-loading": "^2.4.4",
22 | "vue-router": "^3.1.2",
23 | "vue-spinners": "^1.0.2"
24 | },
25 | "devDependencies": {
26 | "@vitejs/plugin-vue2": "^2.2.0",
27 | "sass": "^1.62.1",
28 | "vite": "^4.3.5",
29 | "vue-template-compiler": "^2.6.10"
30 | }
31 | }
32 |
--------------------------------------------------------------------------------
/podcasts/__init__.py:
--------------------------------------------------------------------------------
1 | default_app_config = "podcasts.apps.PodcastsConfig"
2 |
--------------------------------------------------------------------------------
/podcasts/admin.py:
--------------------------------------------------------------------------------
1 | from django.contrib import admin
2 |
3 | from podcasts.models.episode import Episode, EpisodePlaybackState
4 | from podcasts.models.podcast import Podcast
5 |
6 |
7 | # Register your models here.
8 | @admin.register(Podcast)
9 | class PodcastAdmin(admin.ModelAdmin):
10 | list_display = ("title", "author", "fetched", "updated")
11 | pass
12 |
13 |
14 | class PlaybackStateInline(admin.TabularInline):
15 | model = EpisodePlaybackState
16 |
17 |
18 | @admin.register(Episode)
19 | class EpisodeAdmin(admin.ModelAdmin):
20 | list_display = ("title", "podcast", "published", "guid")
21 | readonly_fields = ("media_url", "link", "guid", "slug")
22 |
23 | inlines = [PlaybackStateInline]
24 |
--------------------------------------------------------------------------------
/podcasts/api/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/janw/tapedrive/4df92020add1a2c2f1dc21495ea01b16ea140ae4/podcasts/api/__init__.py
--------------------------------------------------------------------------------
/podcasts/api/serializers.py:
--------------------------------------------------------------------------------
1 | # from background_task.models import Task
2 | from rest_framework import serializers
3 |
4 | from podcasts.models.episode import Episode
5 | from podcasts.models.podcast import Podcast
6 |
7 |
8 | class EpisodeInlineSerializer(serializers.ModelSerializer):
9 | class Meta:
10 | model = Episode
11 | fields = ("id", "title", "subtitle", "published", "downloaded")
12 |
13 |
14 | class PodcastInlineSerializer(serializers.ModelSerializer):
15 | class Meta:
16 | model = Podcast
17 | exclude = ("subscribers", "followers")
18 |
19 |
20 | class PodcastSerializer(serializers.HyperlinkedModelSerializer):
21 | num_episodes = serializers.IntegerField(read_only=True)
22 | last_published = serializers.DateTimeField(read_only=True)
23 |
24 | class Meta:
25 | model = Podcast
26 | lookup_field = "slug"
27 | exclude = ("subscribers", "followers")
28 | extra_kwargs = {"url": {"lookup_field": "slug"}}
29 |
30 |
31 | class PodcastListSerializer(serializers.ModelSerializer):
32 | num_episodes = serializers.IntegerField(read_only=True)
33 | last_published = serializers.DateTimeField(read_only=True)
34 |
35 | class Meta:
36 | model = Podcast
37 | lookup_field = "slug"
38 | fields = (
39 | "title",
40 | "slug",
41 | "id",
42 | "subtitle",
43 | "image",
44 | "num_episodes",
45 | "last_published",
46 | )
47 |
48 |
49 | class PodcastFromUrlSerializer(serializers.Serializer):
50 | feed_url = serializers.URLField()
51 |
52 |
53 | # class EpisodeDownloadTaskSerializer(serializers.ModelSerializer):
54 | # class Meta:
55 | # model = Task
56 | # fields = ("task_hash", "run_at", "attempts", "failed_at")
57 |
58 |
59 | class EpisodeSerializer(serializers.ModelSerializer):
60 | # download_task = EpisodeDownloadTaskSerializer(read_only=True)
61 | podcast = PodcastInlineSerializer(read_only=True)
62 |
63 | class Meta:
64 | model = Episode
65 | exclude = ("media_url", "user", "shownotes")
66 |
67 |
68 | class EpisodeListSerializer(serializers.ModelSerializer):
69 | class Meta:
70 | model = Episode
71 | fields = ("title", "id", "podcast", "published")
72 |
--------------------------------------------------------------------------------
/podcasts/api/views.py:
--------------------------------------------------------------------------------
1 | from django.db.models.functions import Lower
2 | from requests import HTTPError
3 | from rest_framework import generics, renderers, status, viewsets
4 | from rest_framework.decorators import action
5 | from rest_framework.response import Response
6 |
7 | from podcasts.api import serializers
8 | from podcasts.models.episode import Episode
9 | from podcasts.models.podcast import Podcast
10 |
11 |
12 | class PodcastViewSet(viewsets.ModelViewSet):
13 | queryset = Podcast.objects.order_by(Lower("title"))
14 | serializer_class = serializers.PodcastSerializer
15 | list_serializer_class = serializers.PodcastListSerializer
16 | lookup_field = "slug"
17 |
18 | def get_serializer_class(self):
19 | if self.action == "list":
20 | return self.list_serializer_class
21 | return self.serializer_class
22 |
23 | def get_queryset(self, *args, **kwargs):
24 | if self.action == "list":
25 | return self.queryset.filter(subscribers=self.request.user)
26 | return self.queryset
27 |
28 | @action(detail=True, renderer_classes=[renderers.StaticHTMLRenderer])
29 | def summary(self, request, *args, **kwargs):
30 | podcast = self.get_object()
31 | return Response(podcast.summary_p)
32 |
33 | def perform_create(self, serializer):
34 | instance = serializer.save()
35 | self.request.user.subscribed_podcasts.add(instance)
36 |
37 | @action(detail=False, methods=["post"])
38 | def add(self, request):
39 | serializer = serializers.PodcastFromUrlSerializer(data=request.data)
40 | if serializer.is_valid():
41 | try:
42 | podcast, created = Podcast.objects.get_or_create_from_feed_url(
43 | serializer.data["feed_url"], subscriber=request.user
44 | )
45 | except HTTPError as exc:
46 | return Response(serializer.data, status=exc.response.status_code)
47 |
48 | data = self.serializer_class(podcast, context={"request": request}).data
49 | data["created_now"] = created
50 | return Response(data, status=status.HTTP_200_OK)
51 | return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
52 |
53 |
54 | class EpisodeViewSet(viewsets.ModelViewSet):
55 | queryset = Episode.objects.all()
56 | serializer_class = serializers.EpisodeSerializer
57 | list_serializer_class = serializers.EpisodeListSerializer
58 |
59 | def get_queryset(self, *args, **kwargs):
60 | if self.action == "list":
61 | return self.queryset.order_by("-published", "title")
62 | return self.queryset
63 |
64 | def get_serializer_class(self):
65 | if self.action == "list":
66 | return self.list_serializer_class
67 | return self.serializer_class
68 |
69 | @action(detail=True, renderer_classes=[renderers.StaticHTMLRenderer])
70 | def shownotes(self, request, *args, **kwargs):
71 | episode = self.get_object()
72 | return Response(episode.shownotes)
73 |
74 |
75 | class PodcastEpisodesList(generics.ListAPIView):
76 | serializer_class = serializers.EpisodeInlineSerializer
77 |
78 | def get_queryset(self):
79 | slug = self.kwargs["slug"]
80 | return Episode.objects.filter(podcast__slug=slug).order_by("-published")
81 |
--------------------------------------------------------------------------------
/podcasts/apps.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | from django.apps import AppConfig
4 | from django.apps import apps as global_apps
5 | from django.db import DEFAULT_DB_ALIAS, router
6 | from django.db.models.signals import post_migrate
7 |
8 | logger = logging.getLogger(__name__)
9 |
10 |
11 | # def create_background_refresh_task(
12 | # app_config,
13 | # verbosity=2,
14 | # interactive=True,
15 | # using=DEFAULT_DB_ALIAS,
16 | # apps=global_apps,
17 | # **kwargs,
18 | # ):
19 | # task_name = "podcasts.tasks.regular_feed_refresh"
20 |
21 | # try:
22 | # Task = apps.get_model("background_task", "Task")
23 | # except LookupError:
24 | # return
25 |
26 | # if not router.allow_migrate_model(using, Task):
27 | # return
28 |
29 | # tasks = Task.objects.using(using).filter(task_name=task_name)
30 | # if not tasks.exists():
31 | # from podcasts.conf import DEFAULT_REFRESH_DELAY, DEFAULT_REFRESH_PRIORITY, DEFAULT_REFRESH_RATE
32 | # from podcasts.tasks import regular_feed_refresh
33 |
34 | # task = regular_feed_refresh(
35 | # repeat=DEFAULT_REFRESH_RATE,
36 | # priority=DEFAULT_REFRESH_PRIORITY,
37 | # schedule=DEFAULT_REFRESH_DELAY,
38 | # )
39 | # logger.info("Created feed refresh task")
40 | # else:
41 | # task = tasks[0]
42 | # logger.info("Found existing feed refresh task")
43 | # logger.info("Is scheduled for %s" % timezone.get_current_timezone().normalize(task.run_at))
44 |
45 |
46 | # Shamelessly stolen and adapted from django.contrib.sites
47 | def create_default_settings(
48 | app_config,
49 | verbosity=2,
50 | interactive=True,
51 | using=DEFAULT_DB_ALIAS,
52 | apps=global_apps,
53 | **kwargs,
54 | ):
55 | try:
56 | PodcastsSettings = apps.get_model("podcasts", "PodcastsSettings")
57 | except LookupError:
58 | return
59 |
60 | if not router.allow_migrate_model(using, PodcastsSettings):
61 | return
62 |
63 | if not PodcastsSettings.objects.using(using).exists():
64 | # The default settings set SITE_ID = 1 for django.contrib.sites, so we make
65 | # dependency on the default Site to create the initial settings object.
66 | logger.info("Creating default PodcastsSettings")
67 | PodcastsSettings().save(using=using)
68 |
69 |
70 | class PodcastsConfig(AppConfig):
71 | name = "podcasts"
72 | verbose_name = "Podcasts"
73 | verbose_name_plural = "Podcasts"
74 |
75 | def ready(self):
76 | post_migrate.connect(create_default_settings, sender=self)
77 | # post_migrate.connect(create_background_refresh_task, sender=self)
78 |
79 | from actstream import registry
80 |
81 | registry.register(self.get_model("Podcast"), self.get_model("Episode"))
82 |
--------------------------------------------------------------------------------
/podcasts/conf.py:
--------------------------------------------------------------------------------
1 | from datetime import timedelta
2 |
3 | from django.conf import settings
4 |
5 | from .apps import PodcastsConfig
6 |
7 |
8 | def _getattr(variable, default):
9 | prefix = PodcastsConfig.name.upper()
10 | variable = variable.upper()
11 | return getattr(settings, f"{prefix}_{variable}", default)
12 |
13 |
14 | STORAGE_DIRECTORY = "/data"
15 | DEFAULT_NAMING_SCHEME = "$podcast_slug/$episode_slug"
16 | DEFAULT_DATE_FORMAT = "Y-m-d_Hi"
17 |
18 | ITUNES_TOPCHARTS_URL = "https://rss.itunes.apple.com/api/v1/us/podcasts/top-podcasts/all/25/explicit.json"
19 | ITUNES_SEARCH_URL = "https://itunes.apple.com/search?"
20 | ITUNES_LOOKUP_URL = "https://itunes.apple.com/lookup?"
21 | ITUNES_SEARCH_LIMIT = 15
22 |
23 | # DEFAULT_REFRESH_RATE = Task.HOURLY
24 | DEFAULT_REFRESH_PRIORITY = -10
25 | DEFAULT_REFRESH_DELAY = timedelta(minutes=1)
26 |
27 |
28 | DEFAULT_PODCASTS_PER_PAGE = 15
29 | DEFAULT_EPISODES_PER_PAGE = 30
30 | DEFAULT_DEFAULT_PODCASTS_ORDER = "title"
31 | DEFAULT_DEFAULT_EPISODES_ORDER = "-published"
32 | DEFAULT_DEFAULT_IMAGE_SECURITY_POLICY = "f"
33 | DEFAULT_SEEK_FORWARD_BY = 45
34 | DEFAULT_SEEK_BACKWARD_BY = 30
35 |
--------------------------------------------------------------------------------
/podcasts/enums.py:
--------------------------------------------------------------------------------
1 | from __future__ import annotations
2 |
3 | from django.db import models
4 |
5 |
6 | class PodcastOrder(models.TextChoices):
7 | TITLE = "TIT", "Title"
8 | PUBLISHED_ASC = "PUB", "Last Published"
9 | NUM_EPISODES = "NUM", "Number of Episodes"
10 |
11 | @classmethod
12 | def default(cls) -> PodcastOrder:
13 | return cls.TITLE
14 |
15 |
16 | class EpisodeOrder(models.TextChoices):
17 | TITLE = "TIT", "Title"
18 | DOWNLOADED_ASC = "DOW", "Download Date (Earliest First)"
19 | DOWNLOADED_DESC = "-DOW", "Download Date (Latest First)"
20 | PUBLISHED_ASC = "PUB", "Publishing Date (Earliest First)"
21 | PUBLISHED_DESC = "-PUB", "Publishing Date (Latest First)"
22 | DURATION_ASC = "DUR", "Duration (Shortest First)"
23 | DURATION_DESC = "-DUR", "Duration (Longest First)"
24 |
25 | @classmethod
26 | def default(cls) -> EpisodeOrder:
27 | return cls.PUBLISHED_DESC
28 |
29 |
30 | class ImageSecurityPolicy(models.TextChoices):
31 | ALLOW_ALL = "a", "Allow All"
32 | ALLOW_FIRST_PARTY = "f", "Allow First-Party"
33 | ALLOW_NONE = "n", "Allow None"
34 |
35 | @classmethod
36 | def default(cls) -> ImageSecurityPolicy:
37 | return cls.ALLOW_FIRST_PARTY
38 |
--------------------------------------------------------------------------------
/podcasts/management/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/janw/tapedrive/4df92020add1a2c2f1dc21495ea01b16ea140ae4/podcasts/management/__init__.py
--------------------------------------------------------------------------------
/podcasts/management/commands/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/janw/tapedrive/4df92020add1a2c2f1dc21495ea01b16ea140ae4/podcasts/management/commands/__init__.py
--------------------------------------------------------------------------------
/podcasts/management/commands/dbconnection.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import sys
3 | import time
4 |
5 | from django.core.management.base import BaseCommand
6 | from django.db import connection
7 |
8 | logger = logging.getLogger(__name__)
9 |
10 |
11 | class Command(BaseCommand):
12 | help = "Wait for a usable database connection."
13 |
14 | def handle(self, *args, **options):
15 | # If connection is already up: exit.
16 | if connection.connection is not None:
17 | sys.exit(0)
18 |
19 | # Wait for a proper database connection
20 | logger.info("Waiting for Database connection.")
21 | retry_count = 30
22 | while retry_count > 0:
23 | try:
24 | connection.ensure_connection()
25 | except Exception:
26 | time.sleep(1)
27 | retry_count -= 1
28 | else:
29 | sys.exit(0)
30 | logger.error(f"Database did not become available in {retry_count} seconds.")
31 | sys.exit(1)
32 |
--------------------------------------------------------------------------------
/podcasts/management/commands/initadmin.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from os import environ
3 |
4 | from django.contrib.auth import get_user_model
5 | from django.core.management.base import BaseCommand
6 |
7 | logger = logging.getLogger(__name__)
8 |
9 | User = get_user_model()
10 |
11 |
12 | class Command(BaseCommand):
13 | def handle(self, *args, **kwargs):
14 | if User.objects.count() == 0:
15 | username = "admin"
16 | email = "changeme@tapedrive.io"
17 | password = environ.get("INITIAL_ADMIN_PASSWORD", "admin")
18 | logger.info("Creating initial admin account.")
19 | admin = User.objects.create_superuser(email=email, username=username, password=password)
20 | admin.is_active = True
21 | admin.is_admin = True
22 | admin.save()
23 |
--------------------------------------------------------------------------------
/podcasts/management/commands/refresh_feeds.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | from django.apps import apps
4 | from django.core.management.base import BaseCommand
5 |
6 | logger = logging.getLogger(__name__)
7 |
8 |
9 | class Command(BaseCommand):
10 | help = "Refresh feeds of all podcasts in the database."
11 |
12 | def handle(self, *args, **options):
13 | verbosity = int(options["verbosity"])
14 | root_logger = logging.getLogger("")
15 | if verbosity > 1:
16 | root_logger.setLevel(logging.DEBUG)
17 |
18 | Podcast = apps.get_model("podcasts", "Podcast")
19 | for podcast in Podcast.objects.iterator():
20 | logger.info("Updating podcast feed: %s ..." % podcast.title)
21 | podcast.update()
22 | podcast.save()
23 |
--------------------------------------------------------------------------------
/podcasts/migrations/0002_episode_image.py:
--------------------------------------------------------------------------------
1 | # Generated by Django 2.2.4 on 2019-09-01 11:20
2 |
3 | from django.db import migrations, models
4 | import podcasts.models.episode
5 |
6 |
7 | class Migration(migrations.Migration):
8 |
9 | dependencies = [("podcasts", "0001_initial")]
10 |
11 | operations = [
12 | migrations.AddField(
13 | model_name="episode",
14 | name="image",
15 | field=models.ImageField(
16 | blank=True,
17 | null=True,
18 | upload_to=podcasts.models.common.cover_image_filename,
19 | verbose_name="Cover Image",
20 | ),
21 | )
22 | ]
23 |
--------------------------------------------------------------------------------
/podcasts/migrations/0003_auto_20190901_1334.py:
--------------------------------------------------------------------------------
1 | # Generated by Django 2.2.4 on 2019-09-01 11:34
2 |
3 | from django.db import migrations, models
4 | import podcasts.models.common
5 |
6 |
7 | class Migration(migrations.Migration):
8 |
9 | dependencies = [
10 | ('podcasts', '0002_episode_image'),
11 | ]
12 |
13 | operations = [
14 | migrations.AlterField(
15 | model_name='podcast',
16 | name='image',
17 | field=models.ImageField(blank=True, null=True, upload_to=podcasts.models.common.cover_image_filename, verbose_name='Cover Image'),
18 | ),
19 | ]
20 |
--------------------------------------------------------------------------------
/podcasts/migrations/0004_auto_20190902_1938.py:
--------------------------------------------------------------------------------
1 | # Generated by Django 2.2.4 on 2019-09-02 17:38
2 |
3 | from django.db import migrations, models
4 |
5 |
6 | class Migration(migrations.Migration):
7 |
8 | dependencies = [
9 | ('podcasts', '0003_auto_20190901_1334'),
10 | ]
11 |
12 | operations = [
13 | migrations.AlterField(
14 | model_name='episode',
15 | name='link',
16 | field=models.URLField(blank=True, max_length=2048, null=True, verbose_name='Episode Link'),
17 | ),
18 | migrations.AlterField(
19 | model_name='podcast',
20 | name='feed_url',
21 | field=models.URLField(max_length=2048, unique=True, verbose_name='Feed URL'),
22 | ),
23 | migrations.AlterField(
24 | model_name='podcast',
25 | name='link',
26 | field=models.URLField(blank=True, max_length=2048, null=True, verbose_name='Podcast Link'),
27 | ),
28 | ]
29 |
--------------------------------------------------------------------------------
/podcasts/migrations/0005_auto_20190902_1957.py:
--------------------------------------------------------------------------------
1 | # Generated by Django 2.2.4 on 2019-09-02 17:57
2 |
3 | import django.contrib.postgres.fields.jsonb
4 | import django.core.serializers.json
5 | from django.db import migrations, models
6 | import django.db.models.deletion
7 |
8 |
9 | class Migration(migrations.Migration):
10 |
11 | dependencies = [
12 | ('podcasts', '0004_auto_20190902_1938'),
13 | ]
14 |
15 | operations = [
16 | migrations.AddField(
17 | model_name='episode',
18 | name='chapters',
19 | field=django.contrib.postgres.fields.jsonb.JSONField(default=list, encoder=django.core.serializers.json.DjangoJSONEncoder),
20 | ),
21 | migrations.AlterField(
22 | model_name='episodechapter',
23 | name='episode',
24 | field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='old_chapters', to='podcasts.Episode', verbose_name='Episode of Chapter'),
25 | ),
26 | ]
27 |
--------------------------------------------------------------------------------
/podcasts/migrations/0006_auto_20190902_2002.py:
--------------------------------------------------------------------------------
1 | # Generated by Django 2.2.4 on 2019-09-02 18:02
2 |
3 | import django.contrib.postgres.fields.jsonb
4 | import django.core.serializers.json
5 | from django.db import migrations
6 |
7 |
8 | class Migration(migrations.Migration):
9 |
10 | dependencies = [
11 | ('podcasts', '0005_auto_20190902_1957'),
12 | ]
13 |
14 | operations = [
15 | migrations.AlterField(
16 | model_name='episode',
17 | name='chapters',
18 | field=django.contrib.postgres.fields.jsonb.JSONField(default=list, encoder=django.core.serializers.json.DjangoJSONEncoder, null=True),
19 | ),
20 | ]
21 |
--------------------------------------------------------------------------------
/podcasts/migrations/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/janw/tapedrive/4df92020add1a2c2f1dc21495ea01b16ea140ae4/podcasts/migrations/__init__.py
--------------------------------------------------------------------------------
/podcasts/models/__init__.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from django.db import models
4 | from django.utils.translation import gettext as _
5 |
6 | from podcasts.conf import DEFAULT_DATE_FORMAT, DEFAULT_NAMING_SCHEME, STORAGE_DIRECTORY
7 | from podcasts.validators import validate_naming_scheme, validate_path
8 |
9 |
10 | class IntegerRangeField(models.IntegerField):
11 | def __init__(self, verbose_name=None, name=None, min_value=None, max_value=None, **kwargs):
12 | self.min_value, self.max_value = min_value, max_value
13 | models.IntegerField.__init__(self, verbose_name, name, **kwargs)
14 |
15 | def formfield(self, **kwargs):
16 | defaults = {"min_value": self.min_value, "max_value": self.max_value}
17 | defaults.update(kwargs)
18 | return super().formfield(**defaults)
19 |
20 |
21 | class PodcastsSettings(models.Model):
22 | storage_directory = models.CharField(
23 | null=False,
24 | blank=False,
25 | max_length=255,
26 | default=STORAGE_DIRECTORY,
27 | validators=[validate_path],
28 | verbose_name=_("Storage Directory"),
29 | help_text=_("Root directory of where the podcast episodes are downloaded to"),
30 | )
31 | naming_scheme = models.CharField(
32 | null=False,
33 | blank=False,
34 | max_length=255,
35 | default=DEFAULT_NAMING_SCHEME,
36 | validators=[validate_naming_scheme],
37 | verbose_name=_("Episode Naming Scheme"),
38 | help_text=_("Scheme used to compile the episode download filenames"),
39 | )
40 | inpath_dateformat = models.CharField(
41 | null=False,
42 | blank=False,
43 | max_length=255,
44 | default=DEFAULT_DATE_FORMAT,
45 | validators=[],
46 | verbose_name=_("In-Path Date Format"),
47 | help_text=_("Scheme used to compile date segments in episode download filenames"),
48 | )
49 |
50 | class Meta:
51 | verbose_name = _("Podcasts Settings")
52 | verbose_name_plural = _("Podcasts Settings")
53 |
54 | def __str__(self):
55 | return "Tape Drive Settings"
56 |
57 | def save(self, *args, **kwargs):
58 | # Expand user and vars now once, to prevent future changes to cause unexpected directory changes
59 | self.storage_directory = os.path.expanduser(os.path.expandvars(self.storage_directory))
60 |
61 | super().save(*args, **kwargs)
62 |
--------------------------------------------------------------------------------
/podcasts/models/common.py:
--------------------------------------------------------------------------------
1 | import itertools
2 | from os import path
3 |
4 | from django.db import models
5 | from django.template.defaultfilters import slugify
6 |
7 | from podcasts.utils import download_cover
8 |
9 |
10 | def cover_image_filename(instance, filename):
11 | ext = path.splitext(filename)[-1]
12 | return f"{instance.__class__.__name__.lower()}-{instance.id}-cover{ext}"
13 |
14 |
15 | class CommonAbstract(models.Model):
16 | image = models.ImageField(
17 | blank=True,
18 | null=True,
19 | upload_to=cover_image_filename,
20 | verbose_name="Cover Image",
21 | )
22 |
23 | class Meta:
24 | abstract = True
25 |
26 | def save(self, *args, **kwargs):
27 | # Update the slug, ensuring it's unique
28 | Model = self._meta.concrete_model
29 | if not self.id or not self.slug:
30 | max_length = self._meta.get_field("slug").max_length
31 | self.slug = orig = slugify(self.title)
32 | for x in itertools.count(1):
33 | if not Model.objects.filter(slug=self.slug).exists():
34 | break
35 | self.slug = "%s-%d" % (orig[: max_length - len(str(x)) - 1], x)
36 |
37 | # Some items have ridiculously long titles, shorten to max slug length
38 | if len(self.slug) > max_length:
39 | self.slug = self.slug[:max_length]
40 | if self.slug.endswith("-"):
41 | self.slug = self.slug[:-1]
42 |
43 | super().save(*args, **kwargs)
44 |
45 | def insert_cover(self, img_url):
46 | if img_url:
47 | file = download_cover(img_url)
48 | if file:
49 | self.image.save(file.name, file, save=True)
50 |
--------------------------------------------------------------------------------
/podcasts/models/episode_chapter.py:
--------------------------------------------------------------------------------
1 | import os
2 | from io import BytesIO
3 |
4 | from django.core.files import File
5 | from django.db import models
6 | from django.utils.translation import gettext as _
7 |
8 | from podcasts import utils
9 |
10 |
11 | def chapter_image_filename(instance, filename):
12 | ext = os.path.splitext(filename)[-1]
13 |
14 | filename = f"{instance.episode.podcast.slug}-{instance.episode.pk}-{instance.pk}"
15 | return filename + ext
16 |
17 |
18 | class EpisodeChapter(models.Model):
19 | episode = models.ForeignKey(
20 | "podcasts.Episode",
21 | on_delete=models.CASCADE,
22 | related_name="old_chapters",
23 | verbose_name=_("Episode of Chapter"),
24 | )
25 | starttime = models.DurationField(
26 | blank=False,
27 | null=False,
28 | verbose_name=_("Chapter Start"),
29 | )
30 | title = models.CharField(
31 | blank=False,
32 | null=False,
33 | max_length=2047,
34 | verbose_name=_("Chapter Title"),
35 | )
36 | link = models.URLField(
37 | blank=True,
38 | default="",
39 | max_length=2047,
40 | verbose_name=_("Chapter Link"),
41 | )
42 | image_url = models.URLField(
43 | blank=True,
44 | default="",
45 | max_length=2047,
46 | verbose_name=_("Chapter Image URL"),
47 | )
48 | image = models.ImageField(
49 | blank=True,
50 | null=True,
51 | upload_to=chapter_image_filename,
52 | verbose_name=_("Chapter Image"),
53 | )
54 |
55 | class Meta:
56 | verbose_name = _("Episode")
57 | verbose_name_plural = _("Episodes")
58 | ordering = ["starttime"]
59 |
60 | def __str__(self) -> str:
61 | return f"{self.title} of episode {self.episode_id}"
62 |
63 | def save(self, *args, **kwargs):
64 | if not self.image:
65 | self.insert_image(self.image_url)
66 | super().save(*args, **kwargs)
67 |
68 | def insert_image(self, img_url=None):
69 | if img_url:
70 | self.image_url = img_url
71 | else:
72 | img_url = self.image_url
73 |
74 | if img_url:
75 | output = BytesIO()
76 | name = utils.download_cover(img_url, output)
77 | if name:
78 | self.image.save(name, File(output), save=True)
79 |
--------------------------------------------------------------------------------
/podcasts/serializers.py:
--------------------------------------------------------------------------------
1 | from rest_framework import serializers
2 |
3 | from podcasts import conf
4 | from podcasts.models import EpisodeChapter
5 | from podcasts.models.episode import Episode
6 | from podcasts.models.podcast import Podcast
7 |
8 |
9 | class PodcastSerializer(serializers.ModelSerializer):
10 | class Meta:
11 | model = Podcast
12 | fields = "__all__"
13 |
14 |
15 | class EpisodeChapterSerializer(serializers.ModelSerializer):
16 | class Meta:
17 | model = EpisodeChapter
18 | fields = ("starttime", "title", "link", "image")
19 |
20 |
21 | class EpisodeSerializer(serializers.ModelSerializer):
22 | chapters = EpisodeChapterSerializer(many=True, read_only=True)
23 | podcast = PodcastSerializer(read_only=True)
24 |
25 | class Meta:
26 | model = Episode
27 | fields = "__all__"
28 |
29 |
30 | class ApplePodcastsSearchRequestSerializer(serializers.Serializer):
31 | term = serializers.CharField(trim_whitespace=True, min_length=3)
32 | media = serializers.CharField(default="podcast", read_only=True)
33 | limit = serializers.IntegerField(default=conf.ITUNES_SEARCH_LIMIT, read_only=True)
34 |
--------------------------------------------------------------------------------
/podcasts/tasks.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | from actstream import action
4 | from background_task import background
5 | from django.contrib.auth import get_user_model
6 | from django.utils import timezone
7 |
8 | from podcasts.models import PodcastsSettings
9 | from podcasts.models.episode import Episode
10 | from podcasts.models.podcast import Podcast
11 | from podcasts.utils import download_file
12 |
13 | User = get_user_model()
14 |
15 | logger = logging.getLogger(__name__)
16 |
17 |
18 | @background()
19 | def download_episode(media_url, file_path, episode_id):
20 | # Get Episode from database
21 | episode = Episode.objects.get(id=episode_id)
22 | logger.info("Downloading episode %s ..." % episode)
23 |
24 | # Download the file
25 | filesize = download_file(media_url, file_path)
26 |
27 | if filesize:
28 | # Update Episode after download
29 | episode.file_size = filesize
30 | episode.downloaded = timezone.now()
31 | episode.save()
32 | action.send(episode, verb="was downloaded")
33 | else:
34 | action.send(episode, verb="failed downloading")
35 |
36 |
37 | @background()
38 | def regular_feed_refresh():
39 | for psettings in PodcastsSettings.objects.iterator():
40 | logger.info("Queueing feed refreshes ...")
41 | # Refresh feeds of podcasts with at least one follower
42 | for podcast in Podcast.objects.filter(followers__isnull=False).iterator():
43 | refresh_feed(podcast.id)
44 |
45 | logger.info("Queueing downloads for subscribed feeds ...")
46 | for podcast in Podcast.objects.filter(subscribers__isnull=False).iterator():
47 | podcast.queue_missing_episodes_download_tasks(
48 | storage_directory=psettings.storage_directory,
49 | naming_scheme=psettings.naming_scheme,
50 | )
51 |
52 | logger.info("All done for now.")
53 |
54 |
55 | @background()
56 | def refresh_feed(podcast_id):
57 | try:
58 | podcast = Podcast.objects.get(id=podcast_id)
59 | podcast.update()
60 | except Exception:
61 | logger.exception("Refresh task failed", exc_info=True)
62 |
--------------------------------------------------------------------------------
/podcasts/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/janw/tapedrive/4df92020add1a2c2f1dc21495ea01b16ea140ae4/podcasts/tests/__init__.py
--------------------------------------------------------------------------------
/podcasts/tests/cassettes/test_invalid_feed[0-None-Not Found].yaml:
--------------------------------------------------------------------------------
1 | interactions:
2 | - request:
3 | body: null
4 | headers:
5 | Accept:
6 | - '*/*'
7 | Accept-Encoding:
8 | - gzip, deflate
9 | Connection:
10 | - keep-alive
11 | User-Agent:
12 | - Podcast-Archive/0.1 (https://github.com/janw/tapedrive)
13 | method: GET
14 | uri: https://raw.githubusercontent.com/janw/tapedrive/main/podcasts/tests/fixtures/literally_nonexistent.xml
15 | response:
16 | body:
17 | string: '404: Not Found
18 |
19 | '
20 | headers:
21 | Accept-Ranges:
22 | - bytes
23 | Access-Control-Allow-Origin:
24 | - '*'
25 | Connection:
26 | - keep-alive
27 | Content-Length:
28 | - '15'
29 | Content-Security-Policy:
30 | - default-src 'none'; style-src 'unsafe-inline'; sandbox
31 | Date:
32 | - Wed, 08 May 2019 19:27:45 GMT
33 | Expires:
34 | - Wed, 08 May 2019 19:32:45 GMT
35 | Source-Age:
36 | - '0'
37 | Strict-Transport-Security:
38 | - max-age=31536000
39 | Vary:
40 | - Authorization,Accept-Encoding
41 | Via:
42 | - 1.1 varnish
43 | X-Cache:
44 | - MISS
45 | X-Cache-Hits:
46 | - '0'
47 | X-Content-Type-Options:
48 | - nosniff
49 | X-Fastly-Request-ID:
50 | - ea39a50e31f3eff9016b922280db6187446052d4
51 | X-Frame-Options:
52 | - deny
53 | X-GitHub-Request-Id:
54 | - F8B6:2A44:75B4:8824:5CD32DB1
55 | X-Served-By:
56 | - cache-hhn1551-HHN
57 | X-Timer:
58 | - S1557343665.230360,VS0,VE152
59 | X-XSS-Protection:
60 | - 1; mode=block
61 | status:
62 | code: 404
63 | message: Not Found
64 | version: 1
65 |
--------------------------------------------------------------------------------
/podcasts/tests/cassettes/test_invalid_feed[1-None-Feed is malformatted].yaml:
--------------------------------------------------------------------------------
1 | interactions:
2 | - request:
3 | body: null
4 | headers:
5 | Accept:
6 | - '*/*'
7 | Accept-Encoding:
8 | - gzip, deflate
9 | Connection:
10 | - keep-alive
11 | User-Agent:
12 | - Podcast-Archive/0.1 (https://github.com/janw/tapedrive)
13 | method: GET
14 | uri: https://raw.githubusercontent.com/janw/tapedrive/main/podcasts/tests/fixtures/invalid.xml
15 | response:
16 | body:
17 | string: !!binary |
18 | H4sIAAAAAAAAA7NR1NXlckktTi7KLCjJzM+zUsjMyUnLL8pNTVGI8PXhcq0oSE0usVIAg6T8qnwu
19 | XV07Lpui4mKFstSiYqAOWyUjPQMloJg+UBAANxuSNk8AAAA=
20 | headers:
21 | Accept-Ranges:
22 | - bytes
23 | Access-Control-Allow-Origin:
24 | - '*'
25 | Cache-Control:
26 | - max-age=300
27 | Connection:
28 | - keep-alive
29 | Content-Encoding:
30 | - gzip
31 | Content-Length:
32 | - '92'
33 | Content-Security-Policy:
34 | - default-src 'none'; style-src 'unsafe-inline'; sandbox
35 | Content-Type:
36 | - text/plain; charset=utf-8
37 | Date:
38 | - Wed, 08 May 2019 19:27:45 GMT
39 | ETag:
40 | - '"044edc24026f8be483133f2fd539ee3f0ba49368"'
41 | Expires:
42 | - Wed, 08 May 2019 19:32:45 GMT
43 | Source-Age:
44 | - '0'
45 | Strict-Transport-Security:
46 | - max-age=31536000
47 | Vary:
48 | - Authorization,Accept-Encoding
49 | Via:
50 | - 1.1 varnish
51 | X-Cache:
52 | - MISS
53 | X-Cache-Hits:
54 | - '0'
55 | X-Content-Type-Options:
56 | - nosniff
57 | X-Fastly-Request-ID:
58 | - 75aa362bd49e153cb3f01a468e2d80f9a1f5cf2d
59 | X-Frame-Options:
60 | - deny
61 | X-Geo-Block-List:
62 | - ''
63 | X-GitHub-Request-Id:
64 | - E082:24AA:8CD3:A24C:5CD32DB1
65 | X-Served-By:
66 | - cache-hhn1542-HHN
67 | X-Timer:
68 | - S1557343666.540201,VS0,VE158
69 | X-XSS-Protection:
70 | - 1; mode=block
71 | status:
72 | code: 200
73 | message: OK
74 | version: 1
75 |
--------------------------------------------------------------------------------
/podcasts/tests/cassettes/test_long_subtitle_feed.yaml:
--------------------------------------------------------------------------------
1 | interactions:
2 | - request:
3 | body: null
4 | headers:
5 | Accept:
6 | - '*/*'
7 | Accept-Encoding:
8 | - gzip, deflate
9 | Connection:
10 | - keep-alive
11 | User-Agent:
12 | - Podcast-Archive/0.1 (https://github.com/janw/tapedrive)
13 | method: GET
14 | uri: https://raw.githubusercontent.com/janw/tapedrive/main/podcasts/tests/fixtures/subtitle_too_long.xml
15 | response:
16 | body:
17 | string: !!binary |
18 | H4sIAAAAAAAAA+2caXLjuhGA/79TIP6RSiqmxAXc9DyeeLdnxru8plJTIACKsLkNF8nyr7lGql4O
19 | kGvkKHOSNEhJlpdnW7alTCp2laskEt1EN9AfGgu18PEqClGXZ7lI4g9zWkOdQzymCRNx58PcUXtd
20 | ceY+Lv6ykOX5TSldlgK5OG+Joox5/mEuKIq01Wz2er1GfalBk6jJCpY304RRkhcK6G7AhaEkKZLo
21 | llzPaCRZp6mrqtlcgpvDgjSJCx4Xo7JpmYVVSahTE5Q2o4SVIc+bg4LN0ROyQnNrsRzkqq9Vtarb
22 | UlzhVyAhjao0zS3+gtACDUgc81B+hm+FKEK+uEJCkQak4AvN+kJ9k/GcZiItQEF9Ba794W8rq0vt
23 | pb8tpItbMSoCjnokQwl85FmWZPOoFyRI5EgUcJMUqMd/fP9HxlHGSRj2kS86QQHO/4h+fP9t9Ngf
24 | 3/+J/CQMk16ODsrLSMQCwc1QRIQGotZJE9lAg8eIPAKBDLXh+Tu8h86S7BK1RcTzeVmXgGfoW8lz
25 | qEOCypiBYEFihrYOtw4baB0EowTqJGLQERFpHyJeUhaVPTnPhNTTFTkYEfcLqbZyLB3WtwEPBhvr
26 | kqCGhiX4CjGRF2XmgXkoJHGnJB2O5GNzyqHLoMRHnYykgaCgOwmhH/LGQjNdBFfuZdDIlGcttBSz
27 | PtoWYZj/ig54mmRgcN56wCuV5vHS2yQmHfnsG2VfCHgqJ7IYaEniX9EaE0UiFZ5wKbqaZFml6H7J
28 | pTxPqABjx/Qt5dCmBK2AD8qsy5n4FbU5DWIBnkGrIuMUlLfQckYYWhc5NAM88YrTshDdGz3zaOes
29 | jZZKJhKoooCnthNw2rByYkzXraKHJCIxVABt8lh0qhoK2a7Fjem1hkqqKg8GhlC7S/BOmQsKZoOv
30 | BFxfzkRR8DDk82M+BDlpL6jPQAa+roTC90F7Bl2WX8+PGzaP2uSyRIdlR0QJ9DLpxFXSFQweEXcS
31 | 2a5///sgjpr3AmkhGtS4rvDiqGf9day/oT/d795/XmjeEa310STtZzK2Fv/9rweiYqF5U6AW6ECX
32 | zIhUsHTQ1tyF5s2FuoAEWCsU8SUKMu7fcAa40rhhzajicxDh4Ye5nIf+HCr6Kf8wR9I0hJ4hrZZS
33 | fwEuzTUH2qXixfvoygNAwI3ShWZVrhapwdtKelDPEY4GF2MS8cWHzB6/f0eGR0SED3t+JFeXGbTi
34 | /QoMVZGyCMBxj1RgUOKWVF5GEcn672h9R+s7Wl+K1odiaWHYRRZ5DAgZfrkVfPxKwkkUi/2xKB1d
35 | vFUUEMY7SdaHALmCPA2CIkd/JFH6K9pLQlEImo+w9rDEhoyvOILkbSC3m3VILK6r8HhK+ICHoiPD
36 | qBY9TAX4t4RwKfp3JSV2F2XkkHBkUnXtVino4xA8t6E+yC0badWPZLUalMVjnK+E8ia3mq7RpG5T
37 | 8+Cza1BX8xRicEPBlkYUjzBdMR3qGti1sWPrTd/lnPnMsBl3Ve5TanicqLbLPGK7nqWrmm+bzPGo
38 | 7tkWIY7Bbc8hJuEWlNQ9z9cxdS3VJ75DqWpxajqualDGue/aqkNN3fdtA2OsWbppc6IxZjLX8nSX
39 | Wbbm4sZFyjs3fopG/QC+QaK9+P/hgIWmtHVo9+Rj71MTheaYY6Gb8eiuWEBSgC0QLW6h3ZijMw4j
40 | 2xfQk91S9PCc4/bQeCKHPpYA9OWI4peAS+jNSciqUUtek0MRDIpwI+Z8MMDFPs9zzuSgFZUZgPij
41 | hBPcHCoDOQ8ijQNM46T3cRwyD2ZwY1H3tIm3St5NQ1KRJ4y3ZZz6ZXgTueM3HpZZ1PS7pe+WHHJx
42 | Vl77ndRmEF6tau7N2c/Usr9TtYVOCQOOyPc4pDFfIBI+zPkkzPncYkewYdwoA68rYUJl6tw8Vpub
43 | Su9w/2tg2dmls/31hJ2V1un2ipuRVXt//dQJNvyV7GLn7LN+sgVJNzxk9MS09Fahv0AaWc4jXUOf
44 | yhgBHxyk4ZaBW9hBigp/YMCg4N1e8fSQ9j4KjPuAlVk1BC+qasvQwccjr43uDCWgd4RJLjskkPTG
45 | Y6zIG2nCiozQykEZJG4yvWpEqXFnpjToKnlTdxydOJahOJjoClaJoXi2ShRqMmx6zKWmTaX8aBol
46 | U7RmJC1AkNl2iuDDnKHplokNazSyNW+o+xiAdyDZakG2CFWBSQykYXtQQYmriTF8GBJIavowRckR
47 | h9wGEjcIl3raAv9JmHT6Mi2v5wXDHDtPObnMUZXU1dHMeEHEKJoDyMHBxCpqM1nMFxDLUmdHZKEM
48 | ZR7n8smsgQ7hmwzwxotZ/Yg3pkZs9bnE/qkc/Bqs/1SGTJf9vWg/uo5W3WQ33N3tJsfLZ8v5/ubu
49 | uWuvertnebcbOJ6xdLq0+4U+xn4Nj7HfbWGzparv7J8e+3WrZVozYT/xXDDcNxXbIFTBumMpDrNt
50 | xcbM9LEBrrDYU+zXTce0saW/DfvbvWRi9i/5Ul8BHYijPqS5cp2mjlEKjxJdmJrOI4L6SRl30Bm5
51 | FkzUUVmnblVUy6j1SSTC/k3MiyrzAz/D/Z4ooDHkYtdb872yeGp8157L95k78TUM/y+3+FtzWjGv
52 | d/DBeRgdpvuht3Z2vnK5gr+Gn678TRayvX1rvbjMDNfo9TrP5bTqtsx3Tk+f0/pMOM0Mh5muTRXL
53 | Uj0Fc50qnmmYCmUO9lzHdik3nua07RqmZk7K6TW5jt+q1t+XMwEtQfLJ0/MTmEBDMDIIyiJLYOoL
54 | CVbOKYQjZEK0lGuROSJVVG+Dz0Lk8zBsoK0Cqs9QWQWsTKaiRManlB6mYYnvQ18l4cux/LCB0yKy
55 | ++wlklm47FXrI7Nv07cG7/U64Xpv89PFUT883vJPNvdX94/VQ+1z2RXrbH3XWKfrp0Ec+JbzGHhV
56 | +z1BnuniCG5pzkzAa9g6Vg3TUHzdZHWC7BmeqjgO1Bx6oOpR7cnFEd01VM1yJgXvIe/Kxdsqdl64
57 | LC3qjVCoCsw8kccDOSklVZIjsnotM4kH0UmzMvIgMl++1ny7wtMCqDPZGvMLXfD6heM38f1bA8/y
58 | srVv5zjbuE52XHLJtlfa2YHd6eLNFbyi7i+1i7N863K3/HTw6IqAoaFt0n8H3gyBh7WZAM8iDsGa
59 | 6SuYMEeBT47icewrxLUclVoUElD1SeAZhoot050YeOIKpsckldtWGRGTQ+/H998Ok4gXgTwgINf3
60 | IJFoyBMng/ndj+//yCE3Kb0C+SUPITsZPztSHTuplwGzsghenlDet2NaLLSfy8K39sxr8DjdVnpr
61 | YhobW5/M4Og0Pe72DxQrPaGfjHb76vyy7RvRt4vwVDs7udjo9IyztUf3z/A7MWdJTGy0tNkQExNX
62 | 9YiLFcuxCGDT9hQHfAPstD3b1FwH+/gpYmINW1g1J04R10WX1zPXTQ5VmxiYUhLsy0pRIEoymP1C
63 | UjI4HHcpz0/FHXkAjstzdnUQgioow+UOShXAL6fkvbpPC5LWcyH5Jt54DRmn0BxvvlRpdLqHnuiZ
64 | ViLW1eOtI3Vf0fxoI7huR721wjo46q0crh7uYHb06FKl/Y7DmS5Vai3VmAkObRuqCFNmxfVNvc4i
65 | HU3VFIe5KnEpZszjTy5VqrrmWpo2MQ6TMmuhoxx18ypOosm3klDMe6MYTOWyVo7kAeBekhVBdfSz
66 | wwu53dCFelSnfMhw0SvivHgdEe9Xf1pMNJ+9LfQmDnnVXs8UmuStqXih6eerpw79uvGZkkNVfLXo
67 | SbK5maoXy1Yv+Ozmafrp4LPxrX/+6CErTX2n4kypaLb02VCR6g7TuEcU27ewgg1sK45nYEUnKjPA
68 | Mq5p1pNUxIbturY9KRXbcpu0zrSWIKfokskn1tVCVr1z6pO4IHkfwg0CTb7qIYp+dSCSjMfpyyH4
69 | UG2nRUH8XAq+xP7XMO/1/n5rwq2lonvy+bJ/HfG1+Erz19V0d/XreoR3rL1v+2cb/fJ0b6889bb7
70 | l4/ulBjvhJsp4YyWOpudEmrotqXqnqJrFniM+YbiWRpTPKwZDGuag13yJOF0HTio4okJ10ta6KCO
71 | BLnt+ILdkltvslXhR9IUok3uUs5XSUaQ9D7ePQ7YEzIDkeEYQTUqsYhHHnyuXzIDmUq2F/TllO0i
72 | EXK6Vh0nT0L2CkjeM3haiDSei8j/mgtft1PzU7T7m5/79NjRZR5fLF/T7VOl7O4RjovT9pKw99Wd
73 | 3Nnydzeu95d2I4M+fubfQktp9g7rGZ75V82ZwNpRqUF8ihXb8rmCHZUohLiqYjuqbngO3OZP7/Ko
74 | jmqp7sS7PLvxIL0bvvM6Ma2HwVgX9AYv6Axzo8RHHpdbC8ngTPfoNWJo8GIUlrl8DyeDciCdkTxA
75 | HukMAntwgrsK1wz6YwjRW73TO3wHmnSIiPOaFy+H+AOOmBbFn/1e1s/m2tfA/WezZbrMP/ly7Has
76 | EwDBNTetLbp2cr7hrARXrJvyMzMTn47Wv5bhwX569PgZUveG+SoA/535U0/QsT0T5jNGielRT7Fs
77 | jyrYZlTxfNtVCDM8rBqWybWnF2Z1B6uWYTyX+XuZfJ2mHHBuW+Tyx39etEt166cnUEByRJO83hk5
78 | ahw2kCe3RxKYL1cpG9z2OI/l+cROUFTT5ZiTLJQzz/rAeOPBX5+oUjouOjFozzKep0nM5Gvyv/cD
79 | FCSXvx3w4/tvMpEk0Eq9+z9/8eP7PyceIn7Hb1N7S2CSHbL/8ZZ47f7c/7j50x2Esp1zR1Hw54Qk
80 | yclyHy8H16tK2Gsfuxua1b+gfF0/6n/bS4/38CSDkPo+CE11EFKtlj6bVSJs+xx8wxUPPAeDkIUV
81 | lzJVYWCGaurgDGI+NQhZuoYt44H3GKA/D39MbkFWYPGX/wBsTA4HcE8AAA==
82 | headers:
83 | Accept-Ranges:
84 | - bytes
85 | Access-Control-Allow-Origin:
86 | - '*'
87 | Cache-Control:
88 | - max-age=300
89 | Connection:
90 | - keep-alive
91 | Content-Encoding:
92 | - gzip
93 | Content-Length:
94 | - '3634'
95 | Content-Security-Policy:
96 | - default-src 'none'; style-src 'unsafe-inline'; sandbox
97 | Content-Type:
98 | - text/plain; charset=utf-8
99 | Date:
100 | - Tue, 07 May 2019 17:31:16 GMT
101 | ETag:
102 | - '"082d8cc737cc134eaaee57ce6b4bcdb90db260a9"'
103 | Expires:
104 | - Tue, 07 May 2019 17:36:16 GMT
105 | Source-Age:
106 | - '0'
107 | Strict-Transport-Security:
108 | - max-age=31536000
109 | Vary:
110 | - Authorization,Accept-Encoding
111 | Via:
112 | - 1.1 varnish
113 | X-Cache:
114 | - MISS
115 | X-Cache-Hits:
116 | - '0'
117 | X-Content-Type-Options:
118 | - nosniff
119 | X-Fastly-Request-ID:
120 | - 9b5c462571dcb0e90ab7b3f844b1bc42e54fcd1f
121 | X-Frame-Options:
122 | - deny
123 | X-Geo-Block-List:
124 | - ''
125 | X-GitHub-Request-Id:
126 | - 6BEE:6CBC:55961:60EC7:5CD1C0E1
127 | X-Served-By:
128 | - cache-fra19163-FRA
129 | X-Timer:
130 | - S1557250276.063095,VS0,VE158
131 | X-XSS-Protection:
132 | - 1; mode=block
133 | status:
134 | code: 200
135 | message: OK
136 | version: 1
137 |
--------------------------------------------------------------------------------
/podcasts/tests/fixtures/invalid.xml:
--------------------------------------------------------------------------------
1 |
5 |
6 |
2 |
3 |
4 |
5 |
6 | en
7 | CRE: Technik, Kultur, Gesellschaft
8 | http://cre.fm
9 | Der Interview-Podcast mit Tim Pritlove
10 | Sat, 05 May 2018 07:45:04 +0000
11 |
12 | https://meta.metaebene.me/media/cre/cre-logo-1400x1400.jpg
13 | CRE: Technik, Kultur, Gesellschaft
14 | http://cre.fm
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 | de-DE
24 |
25 | 19k8ek636elsgxy7ez2egvhDwoboeok8is5tyN
26 | Podlove Podcast Publisher v2.8.0.build599
27 | Metaebene Personal Media - Tim Pritlove
28 | episodic
29 | Intensive und ausführliche Gespräche über Themen aus Technik, Kultur und Gesellschaft, das ist CRE. Interessante Gesprächspartner stehen Rede und Antwort zu Fragen, die man normalerweise selten gestellt bekommt. CRE möchte aufklären, weiterbilden und unterhalten.
30 |
31 |
32 | Tim Pritlove
33 | tim@pritlove.org
34 |
35 |
36 | Der Interview-Podcast mit Tim Pritlove
37 | no
38 | no
39 | -
40 |
CRE218 Diamanten
41 | http://cre.fm/cre218-diamanten
42 | Sat, 05 May 2018 07:45:04 +0000
43 | podlove-2018-05-04t23:28:38+00:00-474cc3b8e742abe
44 |
45 |
46 |
47 | 03:01:45
48 | Metaebene Personal Media - Tim Pritlove
49 | Kultur und Physik des härtesten Materials der Welt
50 | 218
51 | full
52 | Diamanten faszinieren die Menschen und seit einem Jahrhundert symbolisieren sie Luxus und Perfektion wie kein anderes Material. Nachdem Diamanten zunächst nur aus der Erde gegraben wurden können Diamanten mit technischen Verfahren in sogar besserer Form hergestellt werden und spielen in Forschung und Industrie eine wichtige Rolle. Ich spreche mit Physiker und und Podcaster Reinhard Remfort über seinen persönlichen Weg zum Thema und über die Physik, Struktur, Eigenschaften und Anwendungen, die Diamanten heute ermöglichen und in Zukunft noch ermöglichen könnten.
53 |
54 | ]]>
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 | 2
73 |
74 | Tim Pritlove
75 | http://tim.pritlove.org/
76 |
77 |
78 | Reinhard Remfort
79 |
80 |
81 |
82 |
83 |
--------------------------------------------------------------------------------
/podcasts/tests/test_models.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from datetime import UTC, datetime
3 |
4 | import pytest
5 | from django.db.utils import IntegrityError
6 |
7 | from podcasts.models import PodcastsSettings
8 | from podcasts.models.podcast import Podcast
9 | from podcasts.tests.test_utils import TEST_FEED_NEXT_PAGE
10 |
11 | # Create your tests here.
12 |
13 | TEST_FEED = "http://feeds.5by5.tv/killingtime"
14 | TEST_FEED_PAGED = TEST_FEED_NEXT_PAGE
15 |
16 | TEST_PODCAST = {
17 | "feed_url": "http://example.com/feed",
18 | "title": "Test Feed",
19 | "itunes_type": "serial",
20 | }
21 | TEST_EPISODE = {
22 | "title": "Le fancey episode",
23 | "guid": "http://example.com/feed/01-testep",
24 | "media_url": "http://example.com/feed/01-testep.mp3",
25 | "published": datetime(2018, 3, 12, 10, tzinfo=UTC),
26 | }
27 |
28 |
29 | @pytest.mark.django_db
30 | @pytest.mark.vcr()
31 | def test_podcast_model(caplog):
32 | """Test creation of Podcast model from different inputs"""
33 | Podcast.objects.create(feed_url=TEST_FEED)
34 |
35 | podcast = Podcast.objects.get(feed_url=TEST_FEED)
36 | assert podcast.title == "Untitled"
37 |
38 | with pytest.raises(IntegrityError):
39 | Podcast.objects.create_from_feed_url(TEST_FEED)
40 |
41 | assert "Fetched Killing Time" in caplog.text
42 |
43 | # get_or_create should return the same podcast now
44 | same_one, created = Podcast.objects.get_or_create_from_feed_url(TEST_FEED)
45 | assert podcast == same_one
46 | assert created is False
47 |
48 | # # Should have more than 0 episodes
49 | # assert Episode.objects.count() > 0
50 |
51 | # # Deletion should remove Podcast and cascade to Episodes
52 | # podcast.delete()
53 | # Podcast.objects.count() == 0
54 | # Episode.objects.count() == 0
55 |
56 |
57 | @pytest.mark.django_db
58 | @pytest.mark.vcr()
59 | def test_podcast_with_paged_feed(caplog):
60 | """Create complete Podcast from feed_url, with pages."""
61 |
62 | with caplog.at_level(logging.INFO, logger="podcasts.models"):
63 | podcast, created = Podcast.objects.get_or_create_from_feed_url(TEST_FEED_NEXT_PAGE)
64 | assert created
65 | assert isinstance(podcast, Podcast)
66 |
67 | assert "Creating CRE" in caplog.text
68 | assert "Queued refresh task" in caplog.text
69 |
70 |
71 | @pytest.mark.django_db
72 | def test_filename_generation():
73 | valid_naming_scheme = "$podcast_type/$podcast_slug/${episode_date}_$episode_title"
74 | invalid_naming_scheme = "$podcast_type/${podcast_slug}_$episode_testattr"
75 | datefmt = "Y-m-d_Hi"
76 |
77 | podcast = Podcast.objects.create(**TEST_PODCAST)
78 | episode = podcast.episodes.create(**TEST_EPISODE)
79 |
80 | should_be = "serial/test-feed/2018-03-12_1000_Le fancey episode.mp3"
81 | file_path = episode.construct_file_path("", valid_naming_scheme, datefmt)
82 | assert file_path == should_be
83 |
84 | # Check that an invalid segments is untouched
85 | should_be = "serial/test-feed_$episode_testattr.mp3"
86 | file_path = episode.construct_file_path("", invalid_naming_scheme, datefmt)
87 | assert file_path == should_be
88 |
89 |
90 | @pytest.mark.django_db
91 | def test_settings_model():
92 | settings = PodcastsSettings.objects.create()
93 | assert str(settings) == "Tape Drive Settings"
94 |
--------------------------------------------------------------------------------
/podcasts/tests/test_utils.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | import pytest
4 | import requests
5 |
6 | from podcasts import utils
7 | from podcasts.utils import properties
8 |
9 | FIXTURES_URL = "https://raw.githubusercontent.com/janw/tapedrive/main/podcasts/tests/fixtures/"
10 |
11 | TEST_FEED = FIXTURES_URL + "valid.xml"
12 | TEST_FEED_MALFORMED = (
13 | FIXTURES_URL + "literally_nonexistent.xml", # Not Found
14 | FIXTURES_URL + "invalid.xml", # Invalid Feed
15 | )
16 | TEST_FEED_NEXT_PAGE = FIXTURES_URL + "paged_p1.xml"
17 | TEST_FEED_SUBTITLE_TOO_LONG = FIXTURES_URL + "subtitle_too_long.xml"
18 |
19 |
20 | def test_valid_help_string():
21 | string = "{podcast_segments}||{episode_segments}||{unifying_segments}"
22 | should_become = (
23 | "$podcast_slug
, $podcast_type
, $podcast_title
, $p"
24 | "odcast_subtitle
, $podcast_author
, $podcast_language
, $"
25 | "podcast_explicit
, $podcast_updated
||$episode_slug
, $ep"
26 | "isode_id
, $episode_date
, $episode_number
, $episode_typ"
27 | "e
, $episode_title
||$episode_slug
, $episode_id
, "
28 | "$episode_date
, $episode_number
, $episode_title
"
29 | )
30 | assert properties.resolve_segments(string) == should_become
31 |
32 |
33 | @pytest.mark.vcr()
34 | def test_valid_feed():
35 | feed_info = utils.refresh_feed(TEST_FEED)
36 | assert feed_info is not None
37 | assert feed_info.data["title"] == "Killing Time"
38 |
39 |
40 | @pytest.mark.vcr()
41 | @pytest.mark.parametrize("feed,expected,message", [(0, None, "Not Found"), (1, None, "Feed is malformatted")])
42 | def test_invalid_feed(feed, expected, message, caplog):
43 | """Querying an invalid feed should always fail softly, returning None."""
44 | caplog.set_level(logging.ERROR, logger="podcasts.utils")
45 | with pytest.raises(Exception): # noqa: B017
46 | utils.refresh_feed(TEST_FEED_MALFORMED[feed])
47 |
48 |
49 | def test_connection_error(mocker, caplog):
50 | mock_requests = mocker.patch("podcasts.utils.session.get", side_effect=requests.exceptions.ConnectionError)
51 | caplog.set_level(logging.ERROR, logger="podcasts.utils")
52 | with pytest.raises(requests.exceptions.ConnectionError):
53 | utils.refresh_feed("https://any.feed/is/fine/here")
54 | mock_requests.assert_called_once()
55 |
56 |
57 | @pytest.mark.vcr()
58 | def test_paged_feed(caplog):
59 | """Test proper handling of a paged feed."""
60 | caplog.set_level(logging.INFO, logger="podcasts.utils")
61 |
62 | feed_info = utils.refresh_feed(TEST_FEED_NEXT_PAGE)
63 | assert feed_info.next_page is not None
64 | assert "Feed has next page" in caplog.text
65 |
66 |
67 | @pytest.mark.vcr()
68 | def test_long_subtitle_feed(caplog):
69 | """Test if an overly long subtitle is properly truncated"""
70 | caplog.set_level(logging.WARNING, logger="podcasts.utils")
71 | feed_info = utils.refresh_feed(TEST_FEED_SUBTITLE_TOO_LONG)
72 | assert len(feed_info.data["subtitle"]) == 255
73 | assert feed_info.data["subtitle"].endswith(" ...")
74 | assert "Subtitle too long, will be truncated" in caplog.text
75 |
--------------------------------------------------------------------------------
/podcasts/utils/__init__.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os
3 | import tempfile
4 | import xml.etree.ElementTree as etree
5 | from collections import namedtuple
6 | from functools import lru_cache
7 | from io import BytesIO
8 | from shutil import copyfileobj, move
9 | from urllib.error import HTTPError, URLError
10 | from urllib.parse import urlparse, urlunparse
11 | from urllib.request import Request, urlopen
12 |
13 | import feedparser
14 | import requests
15 | from django.core.files import File
16 | from feedparser import CharacterEncodingOverride
17 |
18 | from podcasts.utils.filters import shownotes_image_cleaner
19 | from podcasts.utils.parsers.feed_content import parse_feed_info
20 |
21 | # Get an instance of a logger
22 | logger = logging.getLogger(__name__)
23 |
24 | USER_AGENT = "Podcast-Archive/0.1 (https://github.com/janw/tapedrive)"
25 | HEADERS = {"User-Agent": USER_AGENT}
26 |
27 | session = requests.Session()
28 | session.headers.update(HEADERS)
29 |
30 | feed_info = namedtuple("feed_info", ["data", "url", "next_page", "last_page"])
31 |
32 |
33 | def refresh_feed(feed_url):
34 | response = session.get(feed_url, allow_redirects=True)
35 | response.raise_for_status()
36 |
37 | feedobj = feedparser.parse(response.content)
38 |
39 | # Escape malformatted XML
40 | if feedobj["bozo"] == 1 and type(feedobj["bozo_exception"]) is not CharacterEncodingOverride:
41 | raise Exception("Feed is malformatted")
42 |
43 | if "feed" not in feedobj:
44 | raise Exception("Feed is incomplete")
45 |
46 | links = feedobj["feed"].get("links", [])
47 | next_page = next((item for item in links if item["rel"] == "next"), {}).get("href")
48 | last_page = next((item for item in links if item["rel"] == "last"), {}).get("href")
49 |
50 | if next_page:
51 | logger.info("Feed has next page")
52 |
53 | return feed_info(parse_feed_info(feedobj), response.url, next_page, last_page)
54 |
55 |
56 | def replace_shownotes_images(content, allowed_domains=False):
57 | if len(allowed_domains) == 1 and allowed_domains[0] == "*":
58 | return content
59 | else:
60 | return shownotes_image_cleaner.clean(content, allowed_domains=allowed_domains)
61 |
62 |
63 | def chunks(l, n): # noqa: E741
64 | # For item i in a range that is a length of l,
65 | for i in range(0, len(l), n):
66 | # Create an index range for l of n items:
67 | yield l[i : i + n]
68 |
69 |
70 | def download_file(link, filename):
71 | logger = logging.getLogger("podcasts.utils.download_file")
72 |
73 | if os.path.isfile(filename):
74 | logger.error("File at %s already exists" % filename)
75 | return
76 |
77 | # Begin downloading, resolve redirects
78 | prepared_request = Request(link, headers=HEADERS)
79 | try:
80 | with tempfile.NamedTemporaryFile(delete=False) as outfile, urlopen(prepared_request) as response:
81 | # Check for proper content length, with resolved link
82 | link = response.geturl()
83 | total_size = int(response.getheader("content-length", "0"))
84 | if total_size == 0:
85 | logger.error("Received content-length is 0")
86 | return
87 |
88 | logger.debug("Resolved link:", link)
89 |
90 | # Create the subdir, if it does not exist
91 | os.makedirs(os.path.dirname(filename), exist_ok=True)
92 |
93 | # Finally start the download for real
94 | copyfileobj(response, outfile)
95 |
96 | move(outfile.name, filename)
97 | return total_size
98 |
99 | except (HTTPError, URLError) as error:
100 | logger.error("Download failed. Query returned '%s'" % error)
101 | return
102 | except KeyboardInterrupt:
103 | logger.error("Unexpected interruption. Deleting unfinished file")
104 | os.remove(filename)
105 | return
106 |
107 |
108 | @lru_cache(maxsize=256)
109 | def download_cover(img_url):
110 | logger.info(f"Downloading cover {img_url}")
111 |
112 | # Remove query params from URL (could be size-restricting, example: NPR's Invisibilia)
113 | # Of course that does not work on for example private feeds that use query params for
114 | # authentication (example: Do By Friday Aftershow delivered via Patreon using
115 | # token-time&token-hash)
116 | url = urlparse(img_url)
117 | logger.debug("Query params (removed on first try): %s", url.query)
118 | url = url._replace(query="")
119 | unqueried_img_url = urlunparse(url)
120 | response = session.get(unqueried_img_url, allow_redirects=True)
121 | if response.status_code >= 400:
122 | logger.info("Failed without query string, trying again.")
123 | # If that fails, try again with the original URL. After that fail softly
124 | response = session.get(img_url, allow_redirects=True)
125 | if response.status_code >= 400:
126 | return
127 | logger.info("Success.")
128 |
129 | name = url.path.split("/")[-1]
130 | finput = BytesIO(response.content)
131 | return File(finput, name=name)
132 |
133 |
134 | def strip_url(link):
135 | linkpath = urlparse(link).path
136 | extension = os.path.splitext(linkpath)[1]
137 | return linkpath, extension
138 |
139 |
140 | def handle_uploaded_file(f):
141 | with tempfile.NamedTemporaryFile(delete=False) as destination:
142 | for chunk in f.chunks():
143 | destination.write(chunk)
144 | return destination.name
145 |
146 |
147 | def parse_opml_file(filename):
148 | with open(filename) as file:
149 | tree = etree.fromstringlist(file)
150 | return [node.get("xmlUrl") for node in tree.findall("*/outline/[@type='rss']") if node.get("xmlUrl") is not None]
151 |
152 |
153 | def unify_apple_podcasts_response(data):
154 | if "feed" in data:
155 | data["results"] = data["feed"]["results"]
156 | data["resultsCount"] = len(data["results"])
157 | for i, result in enumerate(data["results"]):
158 | if "collectionId" in result:
159 | data["results"][i]["id"] = int(result["collectionId"])
160 | else:
161 | data["results"][i]["id"] = int(result["id"])
162 | if "collectionName" in result:
163 | data["results"][i]["name"] = result["collectionName"]
164 |
165 | if "artworkUrl600" in result:
166 | data["results"][i]["artworkUrl"] = result["artworkUrl600"]
167 | elif "artworkUrl100" in result:
168 | data["results"][i]["artworkUrl"] = result["artworkUrl100"]
169 |
170 | if "genres" in result and isinstance(result["genres"][0], dict):
171 | data["results"][i]["genres"] = [{"name": item.get("name")} for item in result["genres"]]
172 |
173 | return data
174 |
--------------------------------------------------------------------------------
/podcasts/utils/filters.py:
--------------------------------------------------------------------------------
1 | from urllib.parse import urlparse
2 |
3 | from bleach.sanitizer import BleachSanitizerFilter, Cleaner
4 | from django.utils.text import format_lazy
5 | from html5lib.filters.base import Filter
6 |
7 | CLEAN_HTML_GLOBAL = ["summary", "subtitle"]
8 | CLEAN_HTML_EPISODE = ["description", "subtitle"]
9 |
10 | ALLOWED_HTML_TAGS = [
11 | "a",
12 | "abbr",
13 | "acronym",
14 | "b",
15 | "blockquote",
16 | "code",
17 | "em",
18 | "i",
19 | "li",
20 | "ol",
21 | "p",
22 | "strong",
23 | "ul",
24 | ]
25 |
26 | ALLOWED_HTML_ATTRIBUTES = {
27 | "a": ["href", "title"],
28 | "acronym": ["title"],
29 | "abbr": ["title"],
30 | }
31 |
32 | EXTENDED_HTML_TAGS = [
33 | "h1",
34 | "h2",
35 | "h3",
36 | "h4",
37 | "h5",
38 | "h6",
39 | "img",
40 | "table",
41 | "thead",
42 | "tbody",
43 | "tr",
44 | "th",
45 | "td",
46 | ]
47 |
48 | EXTENDED_HTML_ATTRIBUTES = {"img": ["rel", "src", "alt"], "td": ["colspan", "rowspan"]}
49 |
50 |
51 | def clean_link(link, include_path=False):
52 | parsed = urlparse(link)
53 | netloc = parsed.netloc
54 | if parsed.netloc.startswith("www."):
55 | netloc = netloc[4:]
56 |
57 | if include_path:
58 | path = parsed.path.rstrip("/")
59 | splits = str.split(path, "/")
60 | if len(splits) > 2:
61 | path = "/…/" + splits[-1]
62 |
63 | return netloc + path
64 | return netloc
65 |
66 |
67 | class CleanerWithOptions(Cleaner):
68 | def clean(self, text, allowed_domains=False):
69 | if not allowed_domains:
70 | allowed_domains = []
71 |
72 | if not isinstance(text, str):
73 | message = f"argument cannot be of '{text.__class__.__name__}' type, must be of text type"
74 | raise TypeError(message)
75 |
76 | if not text:
77 | return ""
78 |
79 | dom = self.parser.parseFragment(text)
80 | filtered = BleachSanitizerFilter(
81 | source=self.walker(dom),
82 | # Bleach-sanitizer-specific things
83 | attributes=self.attributes,
84 | strip_disallowed_elements=self.strip,
85 | strip_html_comments=self.strip_comments,
86 | # html5lib-sanitizer things
87 | allowed_elements=self.tags,
88 | allowed_css_properties=self.styles,
89 | allowed_protocols=self.protocols,
90 | allowed_svg_properties=[],
91 | )
92 |
93 | # Apply any filters after the BleachSanitizerFilter
94 | for filter_class in self.filters:
95 | fc = filter_class(source=filtered)
96 | filtered = fc.__iter__(allowed_domains=allowed_domains)
97 |
98 | return self.serializer.render(filtered)
99 |
100 |
101 | class ImgSrcFilter(Filter):
102 | def __iter__(self, **kwargs):
103 | allowed_domains = kwargs.pop("allowed_domains", [])
104 | for token in Filter.__iter__(self):
105 | if token["type"] in ["StartTag", "EmptyTag"] and token["data"]:
106 | data_alt = None
107 | data_src = None
108 | for attr, value in token["data"].items():
109 | if attr[1] in ("alt", "src"):
110 | data_alt = value
111 |
112 | if data_src:
113 | domain = clean_link(data_src)
114 | if domain not in allowed_domains:
115 | token["data"][(None, "data-src")] = data_src
116 | token["data"][(None, "class")] = "has-src"
117 | token["data"][(None, "alt")] = format_lazy("Image from {domain}", domain=domain)
118 | token["data"][(None, "src")] = ""
119 | if data_alt:
120 | token["data"][(None, "data-alt")] = data_alt
121 | yield token
122 |
123 |
124 | subtitle_cleaner = Cleaner(tags=[], strip=True)
125 |
126 | summary_cleaner = Cleaner(tags=ALLOWED_HTML_TAGS, attributes=ALLOWED_HTML_ATTRIBUTES, strip=True)
127 |
128 | shownotes_cleaner = Cleaner(
129 | tags=ALLOWED_HTML_TAGS + EXTENDED_HTML_TAGS,
130 | attributes={**ALLOWED_HTML_ATTRIBUTES, **EXTENDED_HTML_ATTRIBUTES},
131 | strip=True,
132 | )
133 |
134 | shownotes_image_cleaner = CleanerWithOptions(
135 | tags=ALLOWED_HTML_TAGS + EXTENDED_HTML_TAGS,
136 | attributes={**ALLOWED_HTML_ATTRIBUTES, **EXTENDED_HTML_ATTRIBUTES},
137 | strip=True,
138 | filters=[ImgSrcFilter],
139 | )
140 |
--------------------------------------------------------------------------------
/podcasts/utils/parsers/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/janw/tapedrive/4df92020add1a2c2f1dc21495ea01b16ea140ae4/podcasts/utils/parsers/__init__.py
--------------------------------------------------------------------------------
/podcasts/utils/parsers/feed_content.py:
--------------------------------------------------------------------------------
1 | from dateutil import parser as dateparser
2 | from django.template.defaultfilters import slugify
3 |
4 | from podcasts.utils.sanitizers import sanitize_shownotes, sanitize_subtitle, sanitize_summary
5 |
6 | # Summary, Subtitle not included, parsed separately
7 | PODCAST_INFO_KEYS = [
8 | "author",
9 | "language",
10 | "link",
11 | "title",
12 | "image",
13 | "itunes_explicit",
14 | "itunes_type",
15 | "generator",
16 | "updated",
17 | ]
18 |
19 | EPISODE_INFO_KEYS = [
20 | "link",
21 | "subtitle",
22 | "title",
23 | "published",
24 | "description",
25 | "guid",
26 | "image",
27 | ]
28 |
29 |
30 | def parse_chapters(obj):
31 | chapters = []
32 | if "psc_chapters" in obj:
33 | chapters = obj["psc_chapters"].get("chapters", [])
34 | for i, chap in enumerate(chapters):
35 | chapters[i]["starttime"] = chap["start_parsed"]
36 | del chapters[i]["start_parsed"]
37 | del chapters[i]["start"]
38 |
39 | if "href" in chap:
40 | chapters[i]["link"] = chap["href"]
41 | del chapters[i]["href"]
42 |
43 | return chapters
44 |
45 |
46 | def parse_feed_info(parsed_feed):
47 | feed_info = {}
48 |
49 | feed = parsed_feed["feed"]
50 | for key in PODCAST_INFO_KEYS:
51 | feed_info[key] = feed.get(key, None)
52 |
53 | if key == "updated" and feed_info[key] is not None:
54 | feed_info[key] = dateparser.parse(feed_info[key])
55 | elif key == "image" and "href" in feed_info[key]:
56 | feed_info[key] = feed_info[key]["href"]
57 |
58 | feed_info["subtitle"] = sanitize_subtitle(feed)
59 | feed_info["summary"] = sanitize_summary(feed)
60 |
61 | # Process episode list separately
62 | episode_list = parsed_feed.get("items", False) or parsed_feed.get("entries", False)
63 | if episode_list:
64 | feed_info["episodes"] = [parse_episode_info(episode) for episode in episode_list]
65 | else:
66 | feed_info["episodes"] = []
67 |
68 | return feed_info
69 |
70 |
71 | def parse_episode_info(episode):
72 | episode_info = {}
73 | for key in EPISODE_INFO_KEYS:
74 | episode_info[key] = episode.get(key, None)
75 |
76 | if key == "published" and episode_info[key] is not None:
77 | episode_info[key] = dateparser.parse(episode_info[key])
78 | elif key == "image" and episode_info.get(key, None) is not None and "href" in episode_info[key]:
79 | episode_info[key] = episode_info[key]["href"]
80 | elif key == "title":
81 | episode_info["slug"] = slugify(episode_info["title"])
82 |
83 | episode_info["subtitle"] = sanitize_subtitle(episode)
84 | episode_info["description"] = sanitize_summary(episode)
85 | episode_info["shownotes"] = sanitize_shownotes(episode)
86 | episode_info["chapters"] = parse_chapters(episode)
87 |
88 | episode_info["media_url"] = None
89 | for link in episode["links"]:
90 | if "rel" in link and link["rel"] == "enclosure":
91 | episode_info["media_url"] = link["href"]
92 |
93 | return episode_info
94 |
--------------------------------------------------------------------------------
/podcasts/utils/properties.py:
--------------------------------------------------------------------------------
1 | from django.utils.text import format_lazy
2 |
3 | # Mappings of usable segment => field name
4 | AVAILABLE_PODCAST_SEGMENTS = {
5 | "podcast_slug": "slug",
6 | "podcast_type": "itunes_type",
7 | "podcast_title": "title",
8 | "podcast_subtitle": "subtitle",
9 | "podcast_author": "author",
10 | "podcast_language": "language",
11 | "podcast_explicit": "itunes_explicit",
12 | "podcast_updated": "updated",
13 | }
14 |
15 | AVAILABLE_EPISODE_SEGMENTS = {
16 | "episode_slug": "slug",
17 | "episode_id": "id",
18 | "episode_date": "published",
19 | "episode_number": "itunes_episode",
20 | "episode_type": "itunes_episodetype",
21 | "episode_title": "title",
22 | }
23 |
24 | UNIFYING_EPISODE_SEGMENTS = [
25 | "episode_slug",
26 | "episode_id",
27 | "episode_date",
28 | "episode_number",
29 | "episode_title",
30 | ]
31 |
32 | ALL_VALID_SEGMENTS = {**AVAILABLE_EPISODE_SEGMENTS, **AVAILABLE_PODCAST_SEGMENTS}
33 |
34 |
35 | def get_segments_html(segments):
36 | if isinstance(segments, dict):
37 | segments = list(segments.keys())
38 | return "$" + "
, $".join(segments) + "
"
39 |
40 |
41 | def resolve_segments(string):
42 | return format_lazy(
43 | string,
44 | podcast_segments=get_segments_html(AVAILABLE_PODCAST_SEGMENTS),
45 | episode_segments=get_segments_html(AVAILABLE_EPISODE_SEGMENTS),
46 | unifying_segments=get_segments_html(UNIFYING_EPISODE_SEGMENTS),
47 | )
48 |
--------------------------------------------------------------------------------
/podcasts/utils/sanitizers.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | from bs4 import BeautifulSoup
4 | from markdown import markdown
5 |
6 | from podcasts.utils.filters import shownotes_cleaner, subtitle_cleaner, summary_cleaner
7 |
8 | logger = logging.getLogger(__name__)
9 |
10 |
11 | def sanitize_subtitle(obj):
12 | # Properly process subtitle
13 | if "subtitle" in obj:
14 | # As per spec, subtitle should be plain text and up to 255 characters.
15 | subtitle = subtitle_cleaner.clean(obj.get("subtitle", ""))
16 | if len(subtitle) > 255:
17 | logger.warning("Subtitle too long, will be truncated")
18 | subtitle = subtitle[:251] + " ..."
19 | return subtitle
20 |
21 |
22 | def sanitize_summary(obj):
23 | # Properly process summary/description
24 | if "summary_detail" in obj:
25 | # If summary properly announces as markdown parse it out
26 | if obj["summary_detail"]["type"] == "text/markdown":
27 | html = markdown(obj["summary_detail"]["value"])
28 | else:
29 | html = obj["summary_detail"]["value"]
30 | elif "summary" in obj:
31 | html = obj.get("summary", "")
32 | else:
33 | html = obj.get("description", "")
34 |
35 | # In any case, clean the thing from weird HTML shenanigans
36 | return summary_cleaner.clean(html)
37 |
38 |
39 | def sanitize_shownotes(obj, max_headline=2):
40 | content = obj.get("content")
41 | if not content:
42 | return None
43 |
44 | html = max(content, key=lambda c: len(c.get("value", ""))).get("value", "")
45 | soup = BeautifulSoup(html, "html.parser")
46 | for script in soup.find_all("script"):
47 | script.decompose()
48 | adjust_headline_levels(soup, max_headline)
49 | return shownotes_cleaner.clean(str(soup))
50 |
51 |
52 | def adjust_headline_levels(soup, max_level=3):
53 | top_level_content = 1
54 | for level in range(1, 6):
55 | if soup.find("h%d" % level):
56 | top_level_content = level
57 | break
58 |
59 | if top_level_content < max_level:
60 | transposal = max_level - top_level_content
61 | for level in reversed(range(1, 5)):
62 | newlevel = min((level + transposal, 6))
63 | for h in soup.find_all("h%d" % level):
64 | new_tag = soup.new_tag("h%d" % newlevel)
65 | new_tag.string = h.string
66 | h.replace_with(new_tag)
67 |
--------------------------------------------------------------------------------
/podcasts/utils/serializers.py:
--------------------------------------------------------------------------------
1 | import datetime
2 |
3 | from django.core.serializers.json import DjangoJSONEncoder
4 |
5 |
6 | class PodcastsJSONEncoder(DjangoJSONEncoder):
7 | def default(self, o):
8 | # See "Date Time String Format" in the ECMA-262 specification.
9 | if isinstance(o, datetime.timedelta):
10 | return round(o.total_seconds() * 1000)
11 | else:
12 | return super().default(o)
13 |
--------------------------------------------------------------------------------
/podcasts/validators.py:
--------------------------------------------------------------------------------
1 | import os
2 | import re
3 | from string import Template
4 |
5 | from django import forms
6 | from django.utils.translation import gettext as _
7 | from django.utils.translation import ngettext
8 |
9 | from podcasts.utils.properties import ALL_VALID_SEGMENTS, UNIFYING_EPISODE_SEGMENTS
10 |
11 | RE_MATCH_POSSIBLE_EXTENSION = re.compile(r".*(\.[0-9a-zA-Z]{1,4})$")
12 | RE_MATCH_ALL_SEGMENTS = re.compile(r"\$(" + Template.idpattern + ")")
13 |
14 |
15 | def validate_path(path):
16 | path = os.path.expanduser(os.path.expandvars(path))
17 | if not os.path.isdir(path):
18 | raise forms.ValidationError(_("Path %(path)s does not exist"), params={"path": path})
19 | if not os.access(path, os.W_OK):
20 | raise forms.ValidationError(_("Path %(path)s is not writable"), params={"path": path})
21 |
22 |
23 | def validate_naming_scheme(scheme):
24 | if "\\" in scheme:
25 | raise forms.ValidationError(
26 | _("Backslashes (\\) are not allowed in scheme."),
27 | params={"scheme": scheme},
28 | )
29 | if scheme.startswith("/") or scheme.endswith("/"):
30 | raise forms.ValidationError(_("Scheme must not begin or end with '/'"), params={"scheme": scheme})
31 |
32 | match = RE_MATCH_POSSIBLE_EXTENSION.fullmatch(scheme)
33 | if match:
34 | raise forms.ValidationError(
35 | _("Ending %(possible_extension)s is too similar to a file extension"),
36 | params={"scheme": scheme, "possible_extension": match.group(1)},
37 | )
38 |
39 | potential_segments = RE_MATCH_ALL_SEGMENTS.findall(scheme)
40 | invalid_segments = [s for s in potential_segments if s not in ALL_VALID_SEGMENTS]
41 | if len(invalid_segments) > 0:
42 | raise forms.ValidationError(
43 | ngettext(
44 | "Segment '%(segments)s' is not a valid segment",
45 | "Segments '%(segments)s' are not valid segments",
46 | len(invalid_segments),
47 | ),
48 | params={"segments": "', '".join(invalid_segments)},
49 | )
50 |
51 | unifying_segments = [s for s in potential_segments if s in UNIFYING_EPISODE_SEGMENTS]
52 | if len(unifying_segments) == 0:
53 | raise forms.ValidationError(
54 | _("Scheme must contain at least one unifying episode segment"),
55 | params={"scheme": scheme},
56 | )
57 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.poetry]
2 | name = "tapedrive"
3 | version = "0.1.0"
4 | description = "The selfhosted Podcast Archive"
5 | authors = ["Jan Willhaus "]
6 | license = "Apache-2.0"
7 |
8 | [tool.poetry.dependencies]
9 | python = "^3.12"
10 |
11 | # Django and friends
12 | Django = "~4.2"
13 | django-activity-stream = "^1.4"
14 | django-configurations = "^2.5"
15 | djangorestframework = "^3.14"
16 | djangorestframework_simplejwt = "^5.3"
17 | dj-database-url = "^2.1"
18 |
19 | # Feedparsing
20 | bleach = "^6.1"
21 | html5lib = "^1.1"
22 | feedparser = "^6.0"
23 | beautifulsoup4 = "^4.7"
24 | Markdown = "^3.1"
25 | Pillow = "^6.0"
26 | requests = "^2.31"
27 | python-dateutil = "^2.8"
28 |
29 | whitenoise = "^6.6"
30 |
31 | # Running the app
32 | psycopg2 = "^2.9"
33 | gunicorn = "*"
34 |
35 | [tool.poetry.group.tests.dependencies]
36 | pytest = "^7.4.4"
37 | pytest-cov = "^2.7"
38 | pytest-django = "^3.4"
39 | pytest-mock = "^1.10"
40 | pytest-vcr = "^1.0"
41 |
42 | [tool.poetry.group.dev.dependencies]
43 | ipython = "<8.18"
44 | ipdb = "*"
45 | ruff = "^0.1.14"
46 | pre-commit = "^3.2.2"
47 | commitizen = "^3.13.0"
48 | rich-codex = "^1.2.6"
49 | mypy = "^1.8.0"
50 |
51 | django-debug-toolbar = "^4.2"
52 | django-extensions = "^3.2"
53 |
54 | # Werkzeug = "^3.0"
55 | honcho = "*"
56 |
57 | # Typing
58 | django-stubs = "*"
59 |
60 |
61 | [tool.commitizen]
62 | version_scheme = "semver"
63 | version_provider = "poetry"
64 | version_files = [
65 | "pyproject.toml:version = ",
66 | "tapedrive/__init__.py",
67 | "package.json",
68 | "README.md",
69 | ]
70 | gpg_sign = true
71 | annotated_tag = true
72 | tag_format = "v$version"
73 | update_changelog_on_bump = true
74 |
75 |
76 | [tool.pytest.ini_options]
77 | testpaths = [
78 | "listeners/tests",
79 | "podcasts/tests",
80 | "tapedrive/tests",
81 | ]
82 |
83 | DJANGO_SETTINGS_MODULE = "tapedrive.settings"
84 | DJANGO_CONFIGURATION = "Testing"
85 |
86 |
87 | [tool.coverage.paths]
88 |
89 | [tool.coverage.run]
90 | branch = true
91 | source = [
92 | "tapedrive",
93 | "podcasts",
94 | "listeners",
95 | ]
96 | omit = [
97 | "*/tests/*",
98 | "*/migrations/*",
99 | ]
100 | [tool.coverage.report]
101 | exclude_also = [
102 | "if TYPE_CHECKING:",
103 | ]
104 | fail_under = 60
105 | precision = 2
106 | show_missing = true
107 |
108 |
109 | [tool.ruff]
110 | line-length = 120
111 | target-version = "py312"
112 | extend-select = [
113 | "I", # isort
114 | "B", # bugbear
115 | "ISC", # implicit-str-concat
116 | "Q", # quotes
117 | "TID", # tidy-imports
118 | "C4", # comprehensions
119 | "SIM", # simplify
120 | "C90", # mccabe
121 | "T20", # no print
122 | "PGH", # pygrep-hooks
123 | "W", # pycodestyle warnings (eol/eof whitespace, etc.
124 | "T10", # no debug statements
125 | "DJ", # flake8-django
126 | "A", # flake8-builtins
127 | "UP", # pyupgrade
128 | ]
129 | unfixable = [
130 | "B", # bugbear
131 | ]
132 | ignore = [
133 | "SIM108", # if-else-block-instead-of-if-exp
134 | "ISC001", # single-line-implicit-string-concatenation
135 | ]
136 | extend-exclude = [
137 | "frontend",
138 | "build",
139 | "dist",
140 | "staticfiles",
141 | "templates",
142 | "assets",
143 | "locale",
144 | "node_modules",
145 | "migrations",
146 | ]
147 |
148 | [tool.ruff.mccabe]
149 | max-complexity = 10
150 |
151 | [tool.ruff.format]
152 | quote-style = "double"
153 |
154 |
155 | [tool.mypy]
156 | disallow_untyped_defs = true
157 | disallow_any_generics = true
158 | disallow_untyped_calls = true
159 | disallow_incomplete_defs = true
160 | warn_unused_configs = true
161 | warn_redundant_casts = true
162 | warn_unused_ignores = true
163 | strict_optional = true
164 | strict_equality = true
165 | check_untyped_defs = true
166 | no_implicit_reexport = true
167 |
168 | plugins = [
169 | "mypy_django_plugin.main",
170 | ]
171 |
172 | [[tool.mypy.overrides]]
173 | module = [
174 | "feedparser.*",
175 | "requests.*",
176 | "slugify.*",
177 | "yaml.*",
178 | ]
179 | ignore_missing_imports = true
180 |
181 | [tool.django-stubs]
182 | django_settings_module = "tapedrive.settings:Testing"
183 |
184 |
185 | [build-system]
186 | requires = ["poetry-core"]
187 | build-backend = "poetry.core.masonry.api"
188 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [tool:pytest]
2 | DJANGO_SETTINGS_MODULE=tapedrive.settings
3 | DJANGO_CONFIGURATION=Testing
4 | norecursedirs =
5 | .git
6 | .tx
7 | frontend
8 | assets
9 | locale
10 | mediafiles
11 | staticfiles
12 | templates
13 |
14 | [coverage:run]
15 | source =
16 | tapedrive
17 | podcasts
18 | listeners
19 | omit =
20 | */tests/*
21 | */migrations/*
22 |
23 | [coverage:report]
24 | # Regexes for lines to exclude from consideration
25 | exclude_lines =
26 | # Have to re-enable the standard pragma
27 | pragma: no cover
28 |
29 | # Don't complain if tests don't hit defensive assertion code:
30 | raise AssertionError
31 | raise NotImplementedError
32 |
33 | # Don't complain if non-runnable code isn't run:
34 | if __name__ == .__main__.:
35 |
--------------------------------------------------------------------------------
/tapedrive/__init__.py:
--------------------------------------------------------------------------------
1 | __version__ = "v0.1.0"
2 |
--------------------------------------------------------------------------------
/tapedrive/settings.py:
--------------------------------------------------------------------------------
1 | import os
2 | import random
3 |
4 | from configurations import Configuration, values
5 |
6 |
7 | def get_secret_key(PROJECT_DIR):
8 | SECRET_FILE = os.path.join(PROJECT_DIR, "secret.txt")
9 | try:
10 | with open(SECRET_FILE) as sf:
11 | SECRET_KEY = sf.read().strip()
12 | except OSError:
13 | try:
14 | SECRET_KEY = "".join(
15 | [random.SystemRandom().choice("abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)") for i in range(50)]
16 | )
17 | with open(SECRET_FILE, "w") as sf:
18 | sf.write(SECRET_KEY)
19 | except OSError:
20 | Exception(
21 | "Please create a %s file with random characters \
22 | to generate your secret key!"
23 | % SECRET_FILE
24 | )
25 | return SECRET_KEY
26 |
27 |
28 | class Common(Configuration):
29 | # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
30 | BASE_DIR = os.path.dirname(os.path.dirname(__file__))
31 |
32 | # SECURITY WARNING: keep the secret key used in production secret!
33 | SECRET_KEY = get_secret_key(BASE_DIR)
34 |
35 | # SECURITY WARNING: don't run with debug turned on in production!
36 | DEBUG = values.BooleanValue(False)
37 |
38 | ALLOWED_HOSTS = values.ListValue([], environ=True)
39 |
40 | # Application definition
41 | INSTALLED_APPS = [
42 | "whitenoise.runserver_nostatic",
43 | "django.contrib.admin",
44 | "django.contrib.auth",
45 | "django.contrib.contenttypes",
46 | "django.contrib.messages",
47 | "django.contrib.sessions",
48 | "django.contrib.staticfiles",
49 | "listeners",
50 | "podcasts",
51 | "actstream",
52 | "rest_framework",
53 | ]
54 |
55 | MIDDLEWARE = [
56 | "django.middleware.security.SecurityMiddleware",
57 | "django.contrib.sessions.middleware.SessionMiddleware",
58 | "django.middleware.common.CommonMiddleware",
59 | "whitenoise.middleware.WhiteNoiseMiddleware",
60 | "django.middleware.locale.LocaleMiddleware",
61 | "django.contrib.auth.middleware.AuthenticationMiddleware",
62 | "django.contrib.messages.middleware.MessageMiddleware",
63 | "django.middleware.clickjacking.XFrameOptionsMiddleware",
64 | ]
65 |
66 | ROOT_URLCONF = "tapedrive.urls"
67 |
68 | TEMPLATES = [
69 | {
70 | "BACKEND": "django.template.backends.django.DjangoTemplates",
71 | "DIRS": [os.path.join(BASE_DIR, "frontend", "dist")],
72 | "APP_DIRS": True,
73 | "OPTIONS": {
74 | "context_processors": [
75 | "django.template.context_processors.request",
76 | "django.contrib.auth.context_processors.auth",
77 | "django.contrib.messages.context_processors.messages",
78 | ]
79 | },
80 | }
81 | ]
82 |
83 | WSGI_APPLICATION = "tapedrive.wsgi.application"
84 |
85 | # Database
86 | # https://docs.djangoproject.com/en/2.0/ref/settings/#databases
87 | DATABASES = values.DatabaseURLValue("postgres://tapedrive:tapedrive@localhost/tapedrive")
88 |
89 | # Password validation
90 | # https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
91 | AUTH_PASSWORD_VALIDATORS = [
92 | {"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"},
93 | {"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
94 | {"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
95 | {"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
96 | ]
97 |
98 | LOGIN_REDIRECT_URL = "api-root"
99 |
100 | # Internationalization
101 | # https://docs.djangoproject.com/en/2.0/topics/i18n/
102 | LANGUAGE_CODE = "en-us"
103 | TIME_ZONE = "Europe/Berlin"
104 | USE_TZ = True
105 |
106 | # Static files (CSS, JavaScript, Images)
107 | # https://docs.djangoproject.com/en/2.0/howto/static-files/
108 | STATIC_URL = "/static/"
109 | STATIC_ROOT = os.path.join(BASE_DIR, "frontend", "dist")
110 | STATICFILES_FINDERS = [
111 | "django.contrib.staticfiles.finders.FileSystemFinder",
112 | "django.contrib.staticfiles.finders.AppDirectoriesFinder",
113 | ]
114 |
115 | MEDIA_URL = "/media/"
116 | MEDIA_ROOT = os.path.join(BASE_DIR, "media")
117 |
118 | AUTH_USER_MODEL = "listeners.User"
119 |
120 | # Project settings
121 | COVER_IMAGE_SIZE = (500, 500)
122 |
123 | ACTSTREAM_SETTINGS = {
124 | "FETCH_RELATIONS": True,
125 | "USE_PREFETCH": True,
126 | "GFK_FETCH_DEPTH": 2,
127 | }
128 |
129 | LOGGING = {
130 | "version": 1,
131 | "disable_existing_loggers": False,
132 | "handlers": {"console": {"class": "logging.StreamHandler"}},
133 | "loggers": {
134 | "django": {"handlers": ["console"], "level": "INFO"},
135 | "podcasts": {"handlers": ["console"], "level": "DEBUG"},
136 | },
137 | }
138 |
139 | REST_FRAMEWORK = {
140 | "DEFAULT_PERMISSION_CLASSES": ("rest_framework.permissions.IsAuthenticated",),
141 | "DEFAULT_AUTHENTICATION_CLASSES": (
142 | "rest_framework.authentication.SessionAuthentication",
143 | "rest_framework_simplejwt.authentication.JWTAuthentication",
144 | ),
145 | "DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.PageNumberPagination",
146 | "PAGE_SIZE": 25,
147 | }
148 |
149 |
150 | class Development(Common):
151 | """
152 | The in-development settings and the default configuration.
153 | """
154 |
155 | DEBUG = True
156 |
157 | INTERNAL_IPS = ["127.0.0.1"]
158 |
159 | INSTALLED_APPS = Common.INSTALLED_APPS + ["django_extensions", "debug_toolbar"]
160 |
161 | MIDDLEWARE = Common.MIDDLEWARE + ["debug_toolbar.middleware.DebugToolbarMiddleware"]
162 |
163 | SHELL_PLUS_PRE_IMPORTS = [
164 | ("podcasts.conf", "*"),
165 | ("podcasts.utils", "*"),
166 | ("feedparser"),
167 | ]
168 |
169 | EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
170 |
171 | AUTH_PASSWORD_VALIDATORS = []
172 |
173 |
174 | class Testing(Common):
175 | DATABASES = values.DatabaseURLValue("sqlite:///tapedrive-testing.sqlite3")
176 |
177 |
178 | class Staging(Common):
179 | """
180 | The in-staging settings.
181 | """
182 |
183 | # Security
184 | STRONG_SECURITY = values.BooleanValue(False)
185 | if STRONG_SECURITY is True:
186 | SESSION_COOKIE_SECURE = values.BooleanValue(True)
187 | SECURE_HSTS_INCLUDE_SUBDOMAINS = values.BooleanValue(True)
188 | SECURE_HSTS_SECONDS = values.IntegerValue(31536000)
189 |
190 | SECURE_BROWSER_XSS_FILTER = values.BooleanValue(True)
191 | SECURE_CONTENT_TYPE_NOSNIFF = values.BooleanValue(True)
192 | SECURE_REDIRECT_EXEMPT = values.ListValue([])
193 | SECURE_SSL_HOST = values.Value(None)
194 | SECURE_PROXY_SSL_HEADER = values.TupleValue(("HTTP_X_FORWARDED_PROTO", "https"))
195 |
196 |
197 | class Production(Staging):
198 | """
199 | The in-production settings.
200 | """
201 |
202 | pass
203 |
--------------------------------------------------------------------------------
/tapedrive/urls.py:
--------------------------------------------------------------------------------
1 | from django.conf import settings
2 | from django.conf.urls.static import static
3 | from django.contrib import admin
4 | from django.urls import include, path
5 | from django.views.generic import TemplateView
6 | from rest_framework import routers
7 | from rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView, TokenVerifyView
8 |
9 | from listeners.serializers import UserView, UserViewSet
10 | from podcasts.api import views
11 |
12 | router = routers.DefaultRouter()
13 | router.register(r"users", UserViewSet)
14 | router.register(r"podcasts", views.PodcastViewSet)
15 | router.register(r"episodes", views.EpisodeViewSet)
16 |
17 | urlpatterns = [
18 | path("", TemplateView.as_view(template_name="index.html"), name="index"),
19 | path("api/", include(router.urls), name="api-root"),
20 | path("api/podcastepisodes//", views.PodcastEpisodesList.as_view()),
21 | path("admin/", admin.site.urls),
22 | path("api/user/", UserView.as_view(), name="user_details"),
23 | path("api/auth/", include("rest_framework.urls")),
24 | path("api/auth/token/", TokenObtainPairView.as_view(), name="token_obtain_pair"),
25 | path("api/auth/token/refresh/", TokenRefreshView.as_view(), name="token_refresh"),
26 | path("api/auth/token/verify/", TokenVerifyView.as_view(), name="token_verify"),
27 | *static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT),
28 | *static(settings.STATIC_URL, document_root=settings.STATIC_ROOT),
29 | ]
30 |
31 | if settings.DEBUG:
32 | import debug_toolbar
33 |
34 | urlpatterns = [path("__debug__/", include(debug_toolbar.urls))] + urlpatterns
35 |
--------------------------------------------------------------------------------
/tapedrive/wsgi.py:
--------------------------------------------------------------------------------
1 | """
2 | WSGI config for tapedrive project.
3 | It exposes the WSGI callable as a module-level variable named ``application``.
4 | For more information on this file, see
5 | https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
6 | """
7 | import os
8 |
9 | configuration = os.getenv("ENVIRONMENT", "development").title()
10 | os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tapedrive.settings")
11 | os.environ.setdefault("DJANGO_CONFIGURATION", configuration)
12 |
13 | from configurations.wsgi import get_wsgi_application # noqa: E402, isort:skip
14 |
15 | application = get_wsgi_application()
16 |
--------------------------------------------------------------------------------
/vite.config.js:
--------------------------------------------------------------------------------
1 | import { defineConfig } from 'vite'
2 | import vue from '@vitejs/plugin-vue2'
3 |
4 | // https://vitejs.dev/config/
5 | export default defineConfig({
6 | root: './frontend',
7 | base: '/static/',
8 | plugins: [
9 | vue(),
10 | ],
11 | define: {
12 | API_ROOT: JSON.stringify(process.env.API_ROOT || null),
13 | },
14 | resolve: {
15 | alias: {
16 | 'vue': 'vue/dist/vue.esm.js'
17 | }
18 | }
19 |
20 | })
21 |
--------------------------------------------------------------------------------