├── .mypy.ini ├── MANIFEST.in ├── .coveragerc ├── .github ├── FUNDING.yml ├── ISSUE_TEMPLATE │ └── bug-error-report.md └── workflows │ ├── gh-pages.yml │ └── lint-test.yml ├── tests ├── data │ ├── ebook │ │ ├── cover.jpg │ │ ├── dummy.epub │ │ ├── content │ │ │ ├── assets │ │ │ │ └── cover.jpg │ │ │ ├── pages │ │ │ │ ├── page-01.xhtml │ │ │ │ ├── page-02.xhtml │ │ │ │ └── Cover.xhtml │ │ │ └── toc.ncx │ │ ├── rosters.json │ │ ├── ebook.acsm │ │ ├── openbook.json │ │ ├── media.json │ │ └── sync.json │ ├── audiobook │ │ ├── book.mp3 │ │ ├── cover.jpg │ │ ├── odm │ │ │ ├── book1 │ │ │ │ ├── ceremonies_herrick_bk_64kb.mp3 │ │ │ │ ├── ceremonies_herrick_gg_64kb.mp3 │ │ │ │ └── ceremonies_herrick_cjph_64kb.mp3 │ │ │ ├── book2 │ │ │ │ ├── ceremonies_herrick_bk_64kb.mp3 │ │ │ │ ├── ceremonies_herrick_gg_64kb.mp3 │ │ │ │ └── ceremonies_herrick_cjph_64kb.mp3 │ │ │ ├── book3 │ │ │ │ └── 01_ceremonies_herrick_cjph_64kb.mp3 │ │ │ ├── test.license │ │ │ └── media.json │ │ ├── openbook.json │ │ ├── book.odm │ │ ├── media.json │ │ └── sync.json │ ├── magazine │ │ ├── cover.jpg │ │ ├── content │ │ │ ├── assets │ │ │ │ ├── cover.jpg │ │ │ │ ├── magazine.css │ │ │ │ └── fontfaces.css │ │ │ ├── stories │ │ │ │ ├── story-01.xhtml │ │ │ │ └── story-02.xhtml │ │ │ └── pages │ │ │ │ └── Cover.xhtml │ │ ├── rosters.json │ │ ├── openbook.json │ │ ├── media.json │ │ └── sync.json │ ├── test4.odm.info.expected.txt │ ├── test3.odm.info.expected.txt │ ├── test2.odm.info.expected.txt │ ├── test1.odm.info.expected.txt │ ├── test3.odm │ ├── test2.odm │ ├── test1.odm │ ├── test4.odm │ ├── test_ref24.odm │ ├── test.opf.xml │ └── opf.schema.xml ├── __init__.py ├── base.py ├── data.py ├── utils_tests.py ├── odmpy_shared_tests.py ├── overdrive_tests.py ├── odmpy_tests.py └── odmpy_dl_tests.py ├── example.dl.conf ├── run_tests.sh ├── .flake8 ├── dev-lint.sh ├── requirements.txt ├── requirements-dev.txt ├── .ruff.toml ├── odmpy ├── __init__.py ├── processing │ ├── __init__.py │ └── audiobook.py ├── errors.py ├── __main__.py ├── cli_utils.py ├── libby_errors.py ├── utils.py ├── constants.py └── overdrive.py ├── cov2md.py ├── .gitignore ├── setup.py └── .pylintrc /.mypy.ini: -------------------------------------------------------------------------------- 1 | [mypy] 2 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include LICENSE 2 | -------------------------------------------------------------------------------- /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | source=odmpy 3 | relative_files=True 4 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | custom: ['https://buymeacoffee.com/ping/'] 2 | -------------------------------------------------------------------------------- /tests/data/ebook/cover.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ping/odmpy/HEAD/tests/data/ebook/cover.jpg -------------------------------------------------------------------------------- /tests/data/ebook/dummy.epub: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ping/odmpy/HEAD/tests/data/ebook/dummy.epub -------------------------------------------------------------------------------- /tests/data/audiobook/book.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ping/odmpy/HEAD/tests/data/audiobook/book.mp3 -------------------------------------------------------------------------------- /tests/data/audiobook/cover.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ping/odmpy/HEAD/tests/data/audiobook/cover.jpg -------------------------------------------------------------------------------- /tests/data/magazine/cover.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ping/odmpy/HEAD/tests/data/magazine/cover.jpg -------------------------------------------------------------------------------- /tests/data/ebook/content/assets/cover.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ping/odmpy/HEAD/tests/data/ebook/content/assets/cover.jpg -------------------------------------------------------------------------------- /example.dl.conf: -------------------------------------------------------------------------------- 1 | --downloaddir=downloads 2 | --keepcover 3 | --chapters 4 | --merge 5 | --mergeformat=m4b 6 | --mergecodec=aac 7 | -------------------------------------------------------------------------------- /tests/data/magazine/content/assets/cover.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ping/odmpy/HEAD/tests/data/magazine/content/assets/cover.jpg -------------------------------------------------------------------------------- /run_tests.sh: -------------------------------------------------------------------------------- 1 | set -e 2 | 3 | coverage erase 4 | 5 | coverage run --append -m odmpy --version 6 | coverage run --append -m unittest -v tests 7 | -------------------------------------------------------------------------------- /tests/data/audiobook/odm/book1/ceremonies_herrick_bk_64kb.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ping/odmpy/HEAD/tests/data/audiobook/odm/book1/ceremonies_herrick_bk_64kb.mp3 -------------------------------------------------------------------------------- /tests/data/audiobook/odm/book1/ceremonies_herrick_gg_64kb.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ping/odmpy/HEAD/tests/data/audiobook/odm/book1/ceremonies_herrick_gg_64kb.mp3 -------------------------------------------------------------------------------- /tests/data/audiobook/odm/book2/ceremonies_herrick_bk_64kb.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ping/odmpy/HEAD/tests/data/audiobook/odm/book2/ceremonies_herrick_bk_64kb.mp3 -------------------------------------------------------------------------------- /tests/data/audiobook/odm/book2/ceremonies_herrick_gg_64kb.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ping/odmpy/HEAD/tests/data/audiobook/odm/book2/ceremonies_herrick_gg_64kb.mp3 -------------------------------------------------------------------------------- /tests/data/audiobook/odm/book1/ceremonies_herrick_cjph_64kb.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ping/odmpy/HEAD/tests/data/audiobook/odm/book1/ceremonies_herrick_cjph_64kb.mp3 -------------------------------------------------------------------------------- /tests/data/audiobook/odm/book2/ceremonies_herrick_cjph_64kb.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ping/odmpy/HEAD/tests/data/audiobook/odm/book2/ceremonies_herrick_cjph_64kb.mp3 -------------------------------------------------------------------------------- /tests/data/audiobook/odm/book3/01_ceremonies_herrick_cjph_64kb.mp3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ping/odmpy/HEAD/tests/data/audiobook/odm/book3/01_ceremonies_herrick_cjph_64kb.mp3 -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | #max-line-length = 120 3 | extend-ignore = 4 | # let black determine line length 5 | E501, 6 | # ref https://github.com/psf/black/issues/1437 7 | E203 8 | -------------------------------------------------------------------------------- /tests/data/magazine/content/assets/magazine.css: -------------------------------------------------------------------------------- 1 | #article-body { 2 | font-family: 'SourceSerifPro-Bold'; 3 | font-weight: normal; 4 | font-size: 19px; 5 | overflow-x: hidden; 6 | } 7 | -------------------------------------------------------------------------------- /dev-lint.sh: -------------------------------------------------------------------------------- 1 | # helper script for linting 2 | #flake8 setup.py odmpy tests 3 | ruff check setup.py odmpy tests 4 | pylint setup.py odmpy tests 5 | black --check setup.py odmpy tests 6 | mypy --package odmpy --package tests 7 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | requests >= 2.28.0 2 | eyed3 >= 0.9.7 3 | mutagen >= 1.46.0 4 | termcolor >= 2.0.0 5 | tqdm >= 4.63.0 6 | typing_extensions; python_version < '3.8' 7 | beautifulsoup4 >= 4.11.0 8 | lxml >= 4.9.0 9 | iso639-lang >= 2.1.0 10 | -------------------------------------------------------------------------------- /requirements-dev.txt: -------------------------------------------------------------------------------- 1 | # flake8 2 | ruff 3 | pylint 4 | black 5 | mypy 6 | types-requests 7 | types-tqdm 8 | types-beautifulsoup4 9 | # for tests 10 | responses 11 | # for tests: to validate opf 12 | lxml 13 | # for tests: to validate epub generated 14 | EbookLib 15 | coverage 16 | -------------------------------------------------------------------------------- /.ruff.toml: -------------------------------------------------------------------------------- 1 | # Never enforce `E501` (line length violations). 2 | # E203: https://github.com/psf/black/issues/1437 3 | ignore = ["E501", "E203"] 4 | 5 | # Assume Python 3.8 because calibre 6 | target-version = "py38" 7 | 8 | # F401: imported but unused 9 | [per-file-ignores] 10 | "__init__.py" = ["F401"] 11 | -------------------------------------------------------------------------------- /tests/data/ebook/content/pages/page-01.xhtml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Test Story 2 5 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /tests/data/ebook/content/pages/page-02.xhtml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Test Story 2 5 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /tests/data/magazine/content/stories/story-01.xhtml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Test Story 2 5 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /tests/data/magazine/content/stories/story-02.xhtml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Test Story 2 5 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # flake8: noqa 4 | from .odmpy_tests import OdmpyTests 5 | from .libby_tests import LibbyClientTests 6 | from .utils_tests import UtilsTests 7 | from .odmpy_libby_tests import OdmpyLibbyTests 8 | from .odmpy_dl_tests import OdmpyDlTests 9 | from .odmpy_shared_tests import ProcessingSharedTests 10 | from .overdrive_tests import OverDriveClientTests 11 | -------------------------------------------------------------------------------- /tests/data/audiobook/odm/test.license: -------------------------------------------------------------------------------- 1 | 1.00fef5121-bb1f-42a5-b62a-d9fded939d50-425B6B59B16-425D-11EB-81AA-ACDE48001122CGYIYDAaLg9pMTeJK6CQgFmfBr41r1glRlT0L3Ts59CXCoQgjz0StM7AaZ9qq3qRfYARJ+vG0Vx9TO3b8X6ND3HXboZ3ZIOMDbzkEJ/k14SZ6K//IConzYfJQnhUXJi420BqMUeO35AG5AVh50MqvdQ3QTcSzsz+a3Ul79xJLWI= -------------------------------------------------------------------------------- /tests/data/test4.odm.info.expected.txt: -------------------------------------------------------------------------------- 1 | Title: Ceremonies For Christmas 2 | Creators: Robert Herrick (Author), LibriVox Volunteers (Narrator) 3 | Publisher: Librivox 4 | Subjects: Fiction 5 | Languages: English 6 | Description: 7 | À,Á,Â,Ã,Ä,Å,Æ,Ç,È,É,Ê,Ë,Ì,Í,Î,Ï,Ð,Ñ,Ò,Ó,Ô,Õ,Ö,Ø,Ù,Ú,Û,Ü,Ý,Þ,ß,à,á,â,ã,ä,å,æ,ç,è,é,ê,ë,ì,í,î,ï,ð,ñ,ò,ó,ô,õ,ö,ø,ù,ú,û,ü,ý,þ,ÿ ¡,¢,£,¤,¥,¦,§,¨,©,ª,«,¬,­,®,¯,°,±,²,³,´,µ,¶,¸,¹,º,»,¼,½,¾,¿,×,÷ fiancée & friends “ ” ’ [ ] 8 | 9 | Format: Medium Quality 10 | * Part 1 - 01:01 (483kB) 11 | -------------------------------------------------------------------------------- /tests/data/test3.odm.info.expected.txt: -------------------------------------------------------------------------------- 1 | Title: Ceremonies For Christmas 2 | Creators: Robert Herrick (Author), LibriVox Volunteers (Narrator) 3 | Publisher: Librivox 4 | Subjects: Fiction 5 | Languages: English 6 | Description: 7 | Robert Herrick was a 17th-century English lyric poet and cleric. He is best known for Hesperides, a book of poems. This includes the carpe diem poem "To the Virgins, to Make Much of Time", with the first line "Gather ye rosebuds while ye may" 8 | 9 | Format: Medium Quality 10 | * Part 1 - 01:01 (483kB) 11 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug-error-report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug/Error report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the problem** 11 | A clear and concise description of what the bug/error is. If there is an error traceback dump, paste the text here (please avoid screenshots). 12 | 13 | **To Reproduce** 14 | Please include the full command used, for example `odmpy libby --direct` 15 | 16 | **Version/Environment** 17 | Please paste the contents of `odmpy --version` here. 18 | -------------------------------------------------------------------------------- /tests/data/test2.odm.info.expected.txt: -------------------------------------------------------------------------------- 1 | Title: 크리스마스를 위한 의식 2 | Creators: 로버트 Herrick (Author), LibriVox Volunteers (Narrator) 3 | Publisher: Librivox 4 | Subjects: Fiction 5 | Languages: English 6 | Description: 7 | 로버트 허릭 was a 17th-century English lyric poet and cleric. He is best known for Hesperides, a book of poems. This includes the carpe diem poem "To the Virgins, to Make Much of Time", with the first line "Gather ye rosebuds while ye may" 8 | 9 | Format: Medium Quality 10 | * Part 1 - 01:07 (523kB) 11 | * Part 2 - 01:01 (483kB) 12 | * Part 3 - 01:06 (521kB) 13 | -------------------------------------------------------------------------------- /tests/data/test1.odm.info.expected.txt: -------------------------------------------------------------------------------- 1 | Title: Ceremonies For Christmas 2 | Creators: Robert Herrick (Author), LibriVox Volunteers (Narrator) 3 | Publisher: Librivox 4 | Subjects: Fiction 5 | Languages: English 6 | Description: 7 | Robert Herrick was a 17th-century English lyric poet and cleric. He is best known for Hesperides, a book of poems. This includes the carpe diem poem "To the Virgins, to Make Much of Time", with the first line "Gather ye rosebuds while ye may" 8 | 9 | Format: Medium Quality 10 | * Part 1 - 01:07 (523kB) 11 | * Part 2 - 01:01 (483kB) 12 | * Part 3 - 01:06 (521kB) 13 | -------------------------------------------------------------------------------- /tests/data/ebook/rosters.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "id": "title-content-12345a67b8c912dea1b2cd345678efa9", 4 | "group": "title-content", 5 | "version": "1.1.3-1", 6 | "entries": [ 7 | { 8 | "url": "http://localhost/pages/Cover.xhtml", 9 | "bytes": 1 10 | }, 11 | { 12 | "url": "http://localhost/pages/page-01.xhtml", 13 | "bytes": 1 14 | }, 15 | { 16 | "url": "http://localhost/pages/page-02.xhtml", 17 | "bytes": 1 18 | }, 19 | { 20 | "url": "http://localhost/assets/cover.jpg", 21 | "bytes": 1 22 | }, 23 | { 24 | "url": "http://localhost/toc.ncx", 25 | "bytes": 1 26 | } 27 | ] 28 | } 29 | ] -------------------------------------------------------------------------------- /odmpy/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2018 github.com/ping 2 | # 3 | # This file is part of odmpy. 4 | # 5 | # odmpy is free software: you can redistribute it and/or modify 6 | # it under the terms of the GNU General Public License as published by 7 | # the Free Software Foundation, either version 3 of the License, or 8 | # (at your option) any later version. 9 | # 10 | # odmpy is distributed in the hope that it will be useful, 11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | # GNU General Public License for more details. 14 | # 15 | # You should have received a copy of the GNU General Public License 16 | # along with odmpy. If not, see . 17 | # 18 | -------------------------------------------------------------------------------- /tests/data/magazine/content/pages/Cover.xhtml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Cover 7 | 8 | 9 | 10 | 11 | 12 | -------------------------------------------------------------------------------- /tests/data/magazine/rosters.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "id": "title-content-12345a67b8c912dea1b2cd345678efa9", 4 | "group": "title-content", 5 | "version": "1.1.3-1", 6 | "entries": [ 7 | { 8 | "url": "http://localhost/pages/Cover.xhtml", 9 | "bytes": 1 10 | }, 11 | { 12 | "url": "http://localhost/stories/story-01.xhtml", 13 | "bytes": 1 14 | }, 15 | { 16 | "url": "http://localhost/stories/story-02.xhtml", 17 | "bytes": 1 18 | }, 19 | { 20 | "url": "http://localhost/assets/cover.jpg", 21 | "bytes": 1 22 | }, 23 | { 24 | "url": "http://localhost/assets/magazine.css", 25 | "bytes": 1 26 | }, 27 | { 28 | "url": "http://localhost/assets/fontfaces.css", 29 | "bytes": 1 30 | } 31 | ] 32 | } 33 | ] -------------------------------------------------------------------------------- /tests/data/ebook/content/pages/Cover.xhtml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Cover 7 | 8 | 9 | 10 | 11 | 12 | -------------------------------------------------------------------------------- /odmpy/processing/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2023 github.com/ping 2 | # 3 | # This file is part of odmpy. 4 | # 5 | # odmpy is free software: you can redistribute it and/or modify 6 | # it under the terms of the GNU General Public License as published by 7 | # the Free Software Foundation, either version 3 of the License, or 8 | # (at your option) any later version. 9 | # 10 | # odmpy is distributed in the hope that it will be useful, 11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | # GNU General Public License for more details. 14 | # 15 | # You should have received a copy of the GNU General Public License 16 | # along with odmpy. If not, see . 17 | # 18 | 19 | # flake8: noqa 20 | from .odm import process_odm, process_odm_return 21 | from .audiobook import process_audiobook_loan 22 | from .ebook import process_ebook_loan 23 | -------------------------------------------------------------------------------- /odmpy/errors.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2023 github.com/ping 2 | # 3 | # This file is part of odmpy. 4 | # 5 | # odmpy is free software: you can redistribute it and/or modify 6 | # it under the terms of the GNU General Public License as published by 7 | # the Free Software Foundation, either version 3 of the License, or 8 | # (at your option) any later version. 9 | # 10 | # odmpy is distributed in the hope that it will be useful, 11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | # GNU General Public License for more details. 14 | # 15 | # You should have received a copy of the GNU General Public License 16 | # along with odmpy. If not, see . 17 | # 18 | 19 | 20 | class OdmpyRuntimeError(RuntimeError): 21 | pass 22 | 23 | 24 | class LibbyNotConfiguredError(OdmpyRuntimeError): 25 | """ 26 | Raised when Libby is not yet configured. Used in `--check`. 27 | """ 28 | 29 | pass 30 | -------------------------------------------------------------------------------- /.github/workflows/gh-pages.yml: -------------------------------------------------------------------------------- 1 | name: "Update gh-pages" 2 | 3 | on: 4 | # Runs on pushes targeting the default branch 5 | push: 6 | branches: ["master"] 7 | paths: 8 | - "README.md" 9 | - .github/workflows/gh-pages.yml 10 | 11 | # Allows you to run this workflow manually from the Actions tab 12 | workflow_dispatch: 13 | 14 | jobs: 15 | update-pages: 16 | runs-on: ubuntu-latest 17 | steps: 18 | - name: Get source 19 | uses: actions/checkout@v3 20 | with: 21 | fetch-depth: 1 22 | path: "source" 23 | - name: Get pages 24 | uses: actions/checkout@v3 25 | with: 26 | ref: "gh-pages" 27 | fetch-depth: 1 28 | path: "pages" 29 | - name: Update page 30 | run: | 31 | cp -f source/README.md pages/index.md 32 | cd pages 33 | git config user.name github-actions 34 | git config user.email github-actions@github.com 35 | git add index.md 36 | if [[ `git status --porcelain --untracked-files=no` ]]; then git commit -m "Updated from source $GITHUB_SHA"; git push; fi 37 | -------------------------------------------------------------------------------- /tests/data/ebook/content/toc.ncx: -------------------------------------------------------------------------------- 1 | 2 | 4 | 5 | 6 | 7 | 8 | Test EBook 9 | 10 | 11 | Test Publisher 12 | 13 | 14 | 15 | 16 | Test EBook 17 | 18 | 19 | 20 | 21 | 22 | Test Chapter 1 23 | 24 | 25 | 26 | 27 | 28 | Test Chapter 2 29 | 30 | 31 | 32 | 33 | -------------------------------------------------------------------------------- /odmpy/__main__.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2018 github.com/ping 2 | # 3 | # This file is part of odmpy. 4 | # 5 | # odmpy is free software: you can redistribute it and/or modify 6 | # it under the terms of the GNU General Public License as published by 7 | # the Free Software Foundation, either version 3 of the License, or 8 | # (at your option) any later version. 9 | # 10 | # odmpy is distributed in the hope that it will be useful, 11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | # GNU General Public License for more details. 14 | # 15 | # You should have received a copy of the GNU General Public License 16 | # along with odmpy. If not, see . 17 | # 18 | 19 | import sys 20 | 21 | from .errors import OdmpyRuntimeError 22 | from .odm import run 23 | 24 | 25 | def main() -> None: 26 | try: 27 | run() 28 | except (KeyboardInterrupt, OdmpyRuntimeError): 29 | # we can silently ignore LibbyNotConfiguredError 30 | # because the message is already shown earlier 31 | sys.exit(1) 32 | 33 | 34 | if __name__ == "__main__": 35 | main() 36 | -------------------------------------------------------------------------------- /cov2md.py: -------------------------------------------------------------------------------- 1 | import json 2 | from pathlib import Path 3 | 4 | # 5 | # Simple script to convert coverage.json into markdown for display as GitHub Actions job summary 6 | # 7 | 8 | 9 | def _escape(txt: str) -> str: 10 | return txt.replace("_", r"\_") 11 | 12 | 13 | def convert(cover_json_filepath, markdown_filepath): 14 | cover_json_path = Path(cover_json_filepath) 15 | markdown_path = Path(markdown_filepath) 16 | with cover_json_path.open("r", encoding="utf-8") as f: 17 | results = json.load(f) 18 | 19 | job_summary = "" 20 | job_summary += f'\nTotal Coverage: __{results.get("totals", {}).get("percent_covered", 0):.1f}%__ \n\n' 21 | job_summary += """| Name | Stmts | Miss | Cover | 22 | | :--- | ---: | ---: | ---: | 23 | """ 24 | for k, v in results.get("files", {}).items(): 25 | summary = v.get("summary", {}) 26 | job_summary += f'| {_escape(k)} | {summary.get("num_statements", 0)} | {summary.get("missing_lines", 0)} | {summary.get("percent_covered", 0):.1f}% |\n' 27 | 28 | with markdown_path.open("w", encoding="utf-8") as f: 29 | f.write(job_summary) 30 | try: 31 | cover_json_path.unlink() 32 | except: # noqa: E722, pylint: disable=bare-except 33 | pass 34 | 35 | 36 | if __name__ == "__main__": 37 | convert("coverage.json", "coverage.md") 38 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | coverage.md 2 | odmpy_settings/ 3 | *.mp3 4 | *.odm 5 | *.license 6 | *.conf 7 | *.json 8 | 9 | .vscode/ 10 | venv/ 11 | venv3/ 12 | 13 | *.iml 14 | .idea/ 15 | 16 | # OS generated files # 17 | .DS_Store 18 | .DS_Store? 19 | ._* 20 | .Spotlight-V100 21 | .Trashes 22 | ehthumbs.db 23 | Thumbs.db 24 | 25 | # Byte-compiled / optimized / DLL files 26 | __pycache__/ 27 | *.py[cod] 28 | *$py.class 29 | 30 | # C extensions 31 | *.so 32 | 33 | # Distribution / packaging 34 | .Python 35 | env/ 36 | build/ 37 | develop-eggs/ 38 | dist/ 39 | downloads/ 40 | eggs/ 41 | .eggs/ 42 | lib/ 43 | lib64/ 44 | parts/ 45 | sdist/ 46 | var/ 47 | *.egg-info/ 48 | .installed.cfg 49 | *.egg 50 | 51 | # PyInstaller 52 | # Usually these files are written by a python script from a template 53 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 54 | *.manifest 55 | *.spec 56 | 57 | # Installer logs 58 | pip-log.txt 59 | pip-delete-this-directory.txt 60 | 61 | # Unit test / coverage reports 62 | htmlcov/ 63 | .tox/ 64 | .coverage 65 | .coverage.* 66 | .cache 67 | nosetests.xml 68 | coverage.xml 69 | *,cover 70 | .hypothesis/ 71 | 72 | # Translations 73 | *.mo 74 | *.pot 75 | 76 | # Django stuff: 77 | *.log 78 | 79 | # Sphinx documentation 80 | docs/_build/ 81 | 82 | # PyBuilder 83 | target/ 84 | 85 | #Ipython Notebook 86 | .ipynb_checkpoints 87 | -------------------------------------------------------------------------------- /tests/data/magazine/openbook.json: -------------------------------------------------------------------------------- 1 | { 2 | "title": { 3 | "main": "Test Magazine", 4 | "subtitle": "", 5 | "collection": "Test Magazine" 6 | }, 7 | "creator": [ 8 | { 9 | "name": "Test Publisher", 10 | "role": "pbl" 11 | } 12 | ], 13 | "description": { 14 | "full": "Test Magazine's full description", 15 | "short": "Test Magazine's short description" 16 | }, 17 | "language": "en", 18 | "nav": { 19 | "toc": [ 20 | { 21 | "path": "pages/Cover.xhtml", 22 | "title": "Test Magazine", 23 | "pageRange": "Cover", 24 | "featureImage": "assets/cover.jpg" 25 | }, 26 | { 27 | "path": "stories/story-01.xhtml", 28 | "title": "Test Story 1", 29 | "sectionName": "Test Section", 30 | "pageRange": "2" 31 | }, 32 | { 33 | "path": "stories/story-02.xhtml", 34 | "title": "Test Story 2", 35 | "sectionName": "Test Section", 36 | "pageRange": "3" 37 | } 38 | ] 39 | }, 40 | "rendition-format": "ebook", 41 | "spine": [ 42 | { 43 | "path": "pages/Cover.xhtml", 44 | "media-type": "application/xhtml+xml", 45 | "-odread-spine-position": 0, 46 | "-odread-original-path": "pages/Cover.xhtml" 47 | }, 48 | { 49 | "path": "stories/story-01.xhtml", 50 | "media-type": "application/xhtml+xml", 51 | "-odread-spine-position": 1, 52 | "-odread-original-path": "stories/story-01.xhtml" 53 | }, 54 | { 55 | "path": "stories/story-02.xhtml", 56 | "media-type": "application/xhtml+xml", 57 | "-odread-spine-position": 2, 58 | "-odread-original-path": "stories/story-02.xhtml" 59 | } 60 | ] 61 | } -------------------------------------------------------------------------------- /tests/data/magazine/content/assets/fontfaces.css: -------------------------------------------------------------------------------- 1 | @font-face { 2 | font-family: 'SourceSerifPro-Regular'; 3 | src: url('fonts/SourceSerifPro-Regular.ttf') format('truetype'); 4 | font-weight: normal; 5 | font-style: normal; 6 | } 7 | @font-face { 8 | font-family: 'SourceSerifPro-Bold'; 9 | src: url('fonts/SourceSerifPro-Bold.ttf') format('truetype'); 10 | font-weight: bold; 11 | font-style: normal; 12 | } 13 | @font-face { 14 | font-family: 'SourceSerifPro-Semibold'; 15 | src: url('fonts/SourceSerifPro-Semibold.ttf') format('truetype'); 16 | font-weight: bold; 17 | font-style: normal; 18 | } 19 | @font-face { 20 | font-family: 'SourceSerifPro-Light'; 21 | src: url('fonts/SourceSerifPro-Light.ttf') format('truetype'); 22 | font-weight: bold; 23 | font-style: normal; 24 | } 25 | @font-face { 26 | font-family: 'SourceSansPro-Bold'; 27 | src: url('fonts/SourceSansPro-Bold.ttf') format('truetype'); 28 | font-weight: normal; 29 | font-style: normal; 30 | } 31 | @font-face { 32 | font-family: 'SourceSansPro-Regular'; 33 | src: url('fonts/SourceSansPro-Regular.ttf') format('truetype'); 34 | font-weight: normal; 35 | font-style: normal; 36 | } 37 | @font-face { 38 | font-family: 'SourceSansPro-Light'; 39 | src: url('fonts/SourceSansPro-Light.ttf') format('truetype'); 40 | font-weight: normal; 41 | font-style: normal; 42 | } 43 | @font-face { 44 | font-family: 'SourceSansPro-SemiBold'; 45 | src: url('fonts/SourceSansPro-SemiBold.ttf') format('truetype'); 46 | font-weight: normal; 47 | font-style: normal; 48 | } 49 | @font-face { 50 | font-family: 'OpenSans-Regular'; 51 | src: url('fonts/OpenSans-Regular.ttf') format('truetype'); 52 | font-weight: normal; 53 | font-style: normal; 54 | } 55 | -------------------------------------------------------------------------------- /tests/data/audiobook/openbook.json: -------------------------------------------------------------------------------- 1 | { 2 | "title": { 3 | "main": "Test EBook", 4 | "subtitle": "", 5 | "collection": "Test EBook Series" 6 | }, 7 | "creator": [ 8 | { 9 | "name": "Test Author", 10 | "role": "author" 11 | }, 12 | { 13 | "name": "Test Narrator", 14 | "role": "narrator" 15 | }, 16 | { 17 | "name": "Test Publisher", 18 | "role": "pbl" 19 | } 20 | ], 21 | "description": { 22 | "full": "Test Audiobook's full description", 23 | "short": "Test Audiobook's short description" 24 | }, 25 | "language": "en", 26 | "nav": { 27 | "toc": [ 28 | { 29 | "title": "Ball Lightning", 30 | "path": "{AAAAAAAA-BBBB-CCCC-9999-ABCDEF123456}book.mp3" 31 | }, 32 | { 33 | "title": "Prelude", 34 | "path": "{AAAAAAAA-BBBB-CCCC-9999-ABCDEF123456}book.mp3#15" 35 | }, 36 | { 37 | "title": "Part 1 - College", 38 | "path": "{AAAAAAAA-BBBB-CCCC-9999-ABCDEF123456}book.mp3#30" 39 | }, 40 | { 41 | "title": "Part 1 - Strange Phenomena 1", 42 | "path": "{AAAAAAAA-BBBB-CCCC-9999-ABCDEF123456}book.mp3#40" 43 | }, 44 | { 45 | "title": "Part 1 - Ball Lightning", 46 | "path": "{AAAAAAAA-BBBB-CCCC-9999-ABCDEF123456}book.mp3#55" 47 | } 48 | ] 49 | }, 50 | "rendition-format": "ebook", 51 | "spine": [ 52 | { 53 | "path": "{AAAAAAAA-BBBB-CCCC-9999-ABCDEF123456}book.mp3", 54 | "media-type": "audio/mpeg", 55 | "audio-duration": 61.49, 56 | "audio-bitrate": 64, 57 | "-odread-spine-position": 0, 58 | "-odread-file-bytes": 5792287, 59 | "-odread-original-path": "{AAAAAAAA-BBBB-CCCC-9999-ABCDEF123456}book.mp3" 60 | } 61 | ] 62 | } -------------------------------------------------------------------------------- /tests/data/ebook/ebook.acsm: -------------------------------------------------------------------------------- 1 | 2 | urn:uuid:12345a67-b8c9-12de-a1b2-cd345678efa9 3 | http://localhost/fulfillment 4 | 1234 5 | 2023-03-01T00:00:00-00:00 6 | 2023-03-31T00:00:00-00:00 7 | 8 | urn:uuid:12345a67-b8c9-12de-a1b2-cd345678efa9 9 | 1 10 | 11 | Test EBook 12 | Test Author 13 | Test Publisher 14 | 9789999999999 15 | application/epub+zip 16 | EN-US 17 | http://localhost/mock/cover.jpg 18 | 19 | 20 | urn:uuid:12345a67-b8c9-12de-a1b2-cd345678efa9 21 | 22 | 23 | 1645997 24 | 25 | 26 | 1645997 27 | 28 | 29 | 1645997 30 | 31 | 32 | 1645997 33 | 34 | 35 | 36 | 37 | YXNkZmFzZGZhcw== 38 | 39 | -------------------------------------------------------------------------------- /tests/data/ebook/openbook.json: -------------------------------------------------------------------------------- 1 | { 2 | "title": { 3 | "main": "Test EBook", 4 | "subtitle": "", 5 | "collection": "Test EBook Series" 6 | }, 7 | "creator": [ 8 | { 9 | "name": "Test Publisher", 10 | "role": "pbl" 11 | } 12 | ], 13 | "description": { 14 | "full": "Test EBook's full description", 15 | "short": "Test EBooks's short description" 16 | }, 17 | "language": "en", 18 | "nav": { 19 | "landmarks": [ 20 | { 21 | "type": "cover", 22 | "path": "pages/Cover.xhtml", 23 | "title": "Cover", 24 | "media-type": "application/xhtml+xml" 25 | } 26 | ], 27 | "toc": [ 28 | { 29 | "path": "pages/Cover.xhtml", 30 | "title": "Test EBook", 31 | "pageRange": "Cover" 32 | }, 33 | { 34 | "path": "pages/page-01.xhtml", 35 | "title": "Test Chapter 1", 36 | "sectionName": "Test Section", 37 | "pageRange": "2" 38 | }, 39 | { 40 | "path": "pages/page-02.xhtml", 41 | "title": "Test Chapter 2", 42 | "pageRange": "3" 43 | } 44 | ] 45 | }, 46 | "rendition-format": "ebook", 47 | "spine": [ 48 | { 49 | "path": "pages/Cover.xhtml", 50 | "media-type": "application/xhtml+xml", 51 | "-odread-spine-position": 0, 52 | "-odread-original-path": "pages/Cover.xhtml" 53 | }, 54 | { 55 | "path": "pages/page-01.xhtml", 56 | "media-type": "application/xhtml+xml", 57 | "-odread-spine-position": 1, 58 | "-odread-original-path": "pages/page-01.xhtml" 59 | }, 60 | { 61 | "path": "pages/page-02.xhtml", 62 | "media-type": "application/xhtml+xml", 63 | "-odread-spine-position": 2, 64 | "-odread-original-path": "pages/page-02.xhtml" 65 | } 66 | ] 67 | } -------------------------------------------------------------------------------- /tests/data/magazine/media.json: -------------------------------------------------------------------------------- 1 | { 2 | "firstCreatorId": 0, 3 | "subjects": [ 4 | { 5 | "name": "Tests", 6 | "id": "1000" 7 | } 8 | ], 9 | "bisacCodes": [], 10 | "bisac": [], 11 | "levels": [], 12 | "creators": [], 13 | "languages": [ 14 | { 15 | "name": "English", 16 | "id": "en" 17 | } 18 | ], 19 | "ratings": { 20 | "maturityLevel": { 21 | "name": "General content", 22 | "id": "generalcontent" 23 | }, 24 | "naughtyScore": { 25 | "name": "General content", 26 | "id": "GeneralContent" 27 | } 28 | }, 29 | "publisher": { 30 | "name": "Test Publisher", 31 | "id": "12345678" 32 | }, 33 | "keywords": [], 34 | "type": { 35 | "name": "Magazine", 36 | "id": "magazine" 37 | }, 38 | "covers": { 39 | "cover150Wide": { 40 | "width": 150, 41 | "height": 200, 42 | "href": "http://localhost/mock/cover.jpg" 43 | }, 44 | "cover300Wide": { 45 | "width": 300, 46 | "height": 400, 47 | "href": "http://localhost/mock/cover.jpg" 48 | }, 49 | "cover510Wide": { 50 | "width": 510, 51 | "height": 680, 52 | "href": "http://localhost/mock/cover.jpg" 53 | } 54 | }, 55 | "formats": [ 56 | { 57 | "identifiers": [], 58 | "id": "magazine-overdrive", 59 | "name": "OverDrive Magazine", 60 | "fulfillmentType": "bifocal" 61 | } 62 | ], 63 | "publisherAccount": { 64 | "name": "Tests", 65 | "id": "12345" 66 | }, 67 | "id": "9999999", 68 | "title": "Test Magazine", 69 | "sortTitle": "Test Magazine", 70 | "edition": "Jan 20 2023", 71 | "publishDateText": "Jan 19 2023 7:00PM", 72 | "publishDate": "2023-01-10T19:00:00Z", 73 | "estimatedReleaseDate": "2023-02-10T00:00:00Z", 74 | "description": "Test Magazine's description", 75 | "fullDescription": "Test Magazine's full description", 76 | "shortDescription": "Test Magazine' short description", 77 | "series": "Test Magazine", 78 | "reserveId": "12345a67-b8c9-12de-a1b2-cd345678efa9" 79 | } -------------------------------------------------------------------------------- /tests/data/magazine/sync.json: -------------------------------------------------------------------------------- 1 | { 2 | "result": "synchronized", 3 | "cards": [{"cardId": "123456789", "library": {"name": "Test Library"}}], 4 | "loans": [ 5 | { 6 | "firstCreatorId": 0, 7 | "renewableOn": "2023-03-01T00:00:00Z", 8 | "checkoutDate": "2023-03-01T00:00:00Z", 9 | "expireDate": "2023-03-31T00:00:00Z", 10 | "expires": "2023-03-31T00:00:00Z", 11 | "isLuckyDayCheckout": false, 12 | "isAdvantageFiltered": false, 13 | "isHoldable": true, 14 | "isOwned": true, 15 | "isAssigned": false, 16 | "isBundledChild": false, 17 | "isFormatLockedIn": false, 18 | "isReturnable": true, 19 | "luckyDayOwnedCopies": 0, 20 | "luckyDayAvailableCopies": 0, 21 | "ownedCopies": 0, 22 | "availableCopies": 0, 23 | "checkoutId": 9999999999, 24 | "otherFormats": [], 25 | "subjects": [ 26 | { 27 | "name": "Science", 28 | "id": "9999" 29 | } 30 | ], 31 | "type": { 32 | "name": "Magazine", 33 | "id": "magazine" 34 | }, 35 | "covers": { 36 | "cover150Wide": { 37 | "width": 150, 38 | "height": 200, 39 | "href": "http://localhost/mock/cover.jpg" 40 | }, 41 | "cover300Wide": { 42 | "width": 300, 43 | "height": 400, 44 | "href": "http://localhost/mock/cover.jpg" 45 | }, 46 | "cover510Wide": { 47 | "width": 510, 48 | "height": 680, 49 | "href": "http://localhost/mock/cover.jpg" 50 | } 51 | }, 52 | "formats": [ 53 | { 54 | "identifiers": [], 55 | "id": "magazine-overdrive", 56 | "name": "OverDrive Magazine", 57 | "isLockedIn": false 58 | } 59 | ], 60 | "publisherAccount": { 61 | "name": "Tests", 62 | "id": "12345" 63 | }, 64 | "id": "9999999", 65 | "title": "Test Magazine", 66 | "sortTitle": "Test Magazine", 67 | "edition": "Jan 20 2023", 68 | "publishDateText": "Jan 10 2023 7:00PM", 69 | "publishDate": "2023-01-10T19:00:00Z", 70 | "holdsCount": 0, 71 | "availabilityType": "always", 72 | "series": "Test Magazine", 73 | "reserveId": "12345a67-b8c9-12de-a1b2-cd345678efa9", 74 | "websiteId": "99", 75 | "cardId": "123456789" 76 | } 77 | ] 78 | } -------------------------------------------------------------------------------- /tests/data/test3.odm: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | https://ping.github.io/odmpy/test_data/test.license 5 | 6 | MP3 Audio Book 7 | Ceremonies For Christmas 8 | Ceremonies For Christmas 9 | 10 | Librivox 11 | https://ping.github.io/odmpy/test_data/cover.jpg 12 | https://ping.github.io/odmpy/test_data/cover.jpg 13 | 14 | Robert Herrick 15 | LibriVox Volunteers 16 | 17 | 18 | Fiction 19 | 20 | 21 | English 22 | 23 | Robert Herrick was a 17th-century English lyric poet and cleric. He is best known for Hesperides, a book of poems. This includes the carpe diem poem "To the Virgins, to Make Much of Time", with the first line "Gather ye rosebuds while ye may" 24 | ]]> 25 | 1 26 | -1 27 | 1 28 | -1 29 | 1 30 | 1 31 | 1 32 | -1 33 | 0 34 | 0 35 | 1 36 | 2099-01-01T00:00:00ZLibrary Xhttp://overdrive.comhttp://overdrive.com/odmbanner.gif#eeeeee000-0000000-00000https://ping.github.io/odmpy/test_datahttps://ping.github.io/odmpy/test_data -------------------------------------------------------------------------------- /tests/data/audiobook/book.odm: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | https://ping.github.io/odmpy/test_data/test.license 5 | 6 | MP3 Audio Book 7 | Ceremonies For Christmas 8 | Ceremonies For Christmas 9 | 10 | Librivox 11 | https://ping.github.io/odmpy/test_data/cover.jpg 12 | https://ping.github.io/odmpy/test_data/cover.jpg 13 | 14 | Robert Herrick 15 | LibriVox Volunteers 16 | 17 | 18 | Fiction 19 | 20 | 21 | English 22 | 23 | Robert Herrick was a 17th-century English lyric poet and cleric. He is best known for Hesperides, a book of poems. This includes the carpe diem poem "To the Virgins, to Make Much of Time", with the first line "Gather ye rosebuds while ye may" 24 | ]]> 25 | 1 26 | -1 27 | 1 28 | -1 29 | 1 30 | 1 31 | 1 32 | -1 33 | 0 34 | 0 35 | 1 36 | 2099-01-01T00:00:00ZLibrary Xhttp://overdrive.comhttp://overdrive.com/odmbanner.gif#eeeeee000-0000000-00000https://ping.github.io/odmpy/test_datahttps://ping.github.io/odmpy/test_data -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2018 github.com/ping 2 | # 3 | # This file is part of odmpy. 4 | # 5 | # odmpy is free software: you can redistribute it and/or modify 6 | # it under the terms of the GNU General Public License as published by 7 | # the Free Software Foundation, either version 3 of the License, or 8 | # (at your option) any later version. 9 | # 10 | # odmpy is distributed in the hope that it will be useful, 11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | # GNU General Public License for more details. 14 | # 15 | # You should have received a copy of the GNU General Public License 16 | # along with odmpy. If not, see . 17 | # 18 | import sys 19 | 20 | from setuptools import setup # type: ignore[import] 21 | 22 | __author__ = "ping" 23 | __url__ = "https://github.com/ping/odmpy/" 24 | __version__ = "0.8.1" # also update odmpy/odm.py 25 | 26 | 27 | __long_description__ = """ 28 | ``odmpy`` is a console manager for OverDrive audiobook loan files (.odm). 29 | """ 30 | 31 | install_requires = [ 32 | "requests>=2.28.0", 33 | "eyed3>=0.9.7", 34 | "mutagen>=1.46.0", 35 | "termcolor>=2.0.0", 36 | "tqdm>=4.63.0", 37 | "beautifulsoup4>=4.11.0", 38 | "lxml>=4.9.0", 39 | "iso639-lang>=2.1.0", 40 | ] 41 | if sys.version_info < (3, 8): 42 | install_requires.append("typing_extensions") 43 | 44 | setup( 45 | name="odmpy", 46 | version=__version__, 47 | author=__author__, 48 | license="GPL", 49 | url=__url__, 50 | packages=["odmpy", "odmpy.processing"], 51 | entry_points={ 52 | "console_scripts": [ 53 | "odmpy = odmpy.__main__:main", 54 | ] 55 | }, 56 | python_requires=">=3.7", 57 | install_requires=install_requires, 58 | include_package_data=True, 59 | platforms="any", 60 | long_description=__long_description__, 61 | keywords="overdrive audiobook", 62 | description="A console downloader for an OverDrive audiobook loan.", 63 | classifiers=[ 64 | "Development Status :: 4 - Beta", 65 | "License :: OSI Approved :: GNU General Public License v3 (GPLv3)", 66 | "Environment :: Console", 67 | "Intended Audience :: End Users/Desktop", 68 | "Programming Language :: Python :: 3.7", 69 | "Programming Language :: Python :: 3.8", 70 | "Programming Language :: Python :: 3.9", 71 | "Programming Language :: Python :: 3.10", 72 | "Programming Language :: Python :: 3.11", 73 | ], 74 | ) 75 | -------------------------------------------------------------------------------- /tests/data/test2.odm: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | https://ping.github.io/odmpy/test_data/test.license 5 | 6 | MP3 Audio Book 7 | 크리스마스를 위한 의식 8 | 크리스마스를 위한 의식 9 | 10 | Librivox 11 | https://ping.github.io/odmpy/test_data/cover.jpg 12 | https://ping.github.io/odmpy/test_data/cover.jpg 13 | 14 | 로버트 Herrick 15 | LibriVox Volunteers 16 | 17 | 18 | Fiction 19 | 20 | 21 | English 22 | 23 | 로버트 허릭 was a 17th-century English lyric poet and cleric. He is best known for Hesperides, a book of poems. This includes the carpe diem poem "To the Virgins, to Make Much of Time", with the first line "Gather ye rosebuds while ye may" 24 | ]]> 25 | 1 26 | -1 27 | 1 28 | -1 29 | 1 30 | 1 31 | 1 32 | -1 33 | 0 34 | 0 35 | 1 36 | 2099-01-01T00:00:00ZLibrary Xhttp://overdrive.comhttp://overdrive.com/odmbanner.gif#eeeeee000-0000000-00000https://ping.github.io/odmpy/test_datahttps://ping.github.io/odmpy/test_data -------------------------------------------------------------------------------- /tests/data/test1.odm: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | https://ping.github.io/odmpy/test_data/test.license 5 | 6 | MP3 Audio Book 7 | Ceremonies For Christmas 8 | Ceremonies For Christmas 9 | 10 | Librivox 11 | https://ping.github.io/odmpy/test_data/cover.jpg 12 | https://ping.github.io/odmpy/test_data/cover.jpg 13 | 14 | Robert Herrick 15 | LibriVox Volunteers 16 | 17 | 18 | Fiction 19 | 20 | 21 | English 22 | 23 | Robert Herrick was a 17th-century English lyric poet and cleric. He is best known for Hesperides, a book of poems. This includes the carpe diem poem "To the Virgins, to Make Much of Time", with the first line "Gather ye rosebuds while ye may" 24 | ]]> 25 | 1 26 | -1 27 | 1 28 | -1 29 | 1 30 | 1 31 | 1 32 | -1 33 | 0 34 | 0 35 | 1 36 | 2099-01-01T00:00:00ZLibrary Xhttp://overdrive.comhttp://overdrive.com/odmbanner.gif#eeeeee000-0000000-00000https://ping.github.io/odmpy/test_datahttps://ping.github.io/odmpy/test_data -------------------------------------------------------------------------------- /tests/base.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | import os 4 | import platform 5 | import shutil 6 | import sys 7 | import unittest 8 | import warnings 9 | from http.client import HTTPConnection 10 | from pathlib import Path 11 | 12 | test_logger = logging.getLogger(__name__) 13 | test_logger.setLevel(logging.WARNING) 14 | requests_logger = logging.getLogger("urllib3") 15 | requests_logger.setLevel(logging.WARNING) 16 | requests_logger.propagate = True 17 | 18 | is_windows = os.name == "nt" or platform.system().lower() == "windows" 19 | 20 | # detect if running on CI 21 | is_on_ci = False 22 | try: 23 | # https://docs.github.com/en/actions/learn-github-actions/variables#default-environment-variables 24 | _ = os.environ["CI"] 25 | is_on_ci = True 26 | except KeyError: 27 | pass 28 | 29 | 30 | class BaseTestCase(unittest.TestCase): 31 | def setUp(self) -> None: 32 | warnings.filterwarnings( 33 | action="ignore", message="unclosed", category=ResourceWarning 34 | ) 35 | self.test_data_dir = Path(__file__).absolute().parent.joinpath("data") 36 | self.test_downloads_dir = self.test_data_dir.joinpath("downloads") 37 | if not self.test_downloads_dir.exists(): 38 | self.test_downloads_dir.mkdir(parents=True, exist_ok=True) 39 | 40 | # disable color output 41 | os.environ["NO_COLOR"] = "1" 42 | 43 | self.logger = test_logger 44 | # hijack unittest -v arg to toggle log verbosity in test 45 | self.is_verbose = "-vv" in sys.argv 46 | if self.is_verbose: 47 | self.logger.setLevel(logging.DEBUG) 48 | requests_logger.setLevel(logging.DEBUG) 49 | HTTPConnection.debuglevel = 1 50 | logging.basicConfig(stream=sys.stdout) 51 | 52 | def tearDown(self) -> None: 53 | del os.environ["NO_COLOR"] 54 | if self.test_downloads_dir.exists(): 55 | shutil.rmtree(self.test_downloads_dir, ignore_errors=True) 56 | 57 | def _generate_fake_settings(self) -> Path: 58 | """ 59 | Generate fake settings file for odmpy/libby. 60 | 61 | :return: 62 | """ 63 | settings_folder = self.test_downloads_dir.joinpath("settings") 64 | if not settings_folder.exists(): 65 | settings_folder.mkdir(parents=True, exist_ok=True) 66 | 67 | # generate fake settings 68 | with settings_folder.joinpath("libby.json").open("w", encoding="utf-8") as f: 69 | json.dump( 70 | { 71 | "chip": "12345", 72 | "identity": "abcdefgh", 73 | "syncable": False, 74 | "primary": True, 75 | "__libby_sync_code": "12345678", 76 | }, 77 | f, 78 | ) 79 | return settings_folder 80 | -------------------------------------------------------------------------------- /tests/data/audiobook/media.json: -------------------------------------------------------------------------------- 1 | { 2 | "firstCreatorId": 999999, 3 | "subjects": [ 4 | { 5 | "name": "Tests", 6 | "id": "1000" 7 | } 8 | ], 9 | "bisacCodes": [], 10 | "bisac": [], 11 | "levels": [], 12 | "creators": [ 13 | { 14 | "id": 999999, 15 | "sortName": "Author, Test", 16 | "role": "Author", 17 | "name": "Test Author" 18 | } 19 | ], 20 | "languages": [ 21 | { 22 | "name": "English", 23 | "id": "en" 24 | } 25 | ], 26 | "ratings": { 27 | "maturityLevel": { 28 | "name": "General content", 29 | "id": "generalcontent" 30 | }, 31 | "naughtyScore": { 32 | "name": "General content", 33 | "id": "GeneralContent" 34 | } 35 | }, 36 | "publisher": { 37 | "name": "Test Publisher", 38 | "id": "12345678" 39 | }, 40 | "keywords": [], 41 | "type": { 42 | "name": "Audiobook", 43 | "id": "audiobook" 44 | }, 45 | "covers": { 46 | "cover150Wide": { 47 | "width": 150, 48 | "height": 200, 49 | "href": "http://localhost/mock/cover.jpg" 50 | }, 51 | "cover300Wide": { 52 | "width": 300, 53 | "height": 400, 54 | "href": "http://localhost/mock/cover.jpg" 55 | }, 56 | "cover510Wide": { 57 | "width": 510, 58 | "height": 680, 59 | "href": "http://localhost/mock/cover.jpg" 60 | } 61 | }, 62 | "formats": [ 63 | { 64 | "identifiers": [ 65 | { 66 | "value": "9789999999999", 67 | "type": "ISBN" 68 | } 69 | ], 70 | "isLockedIn": false, 71 | "id": "audiobook-overdrive", 72 | "name": "OverDrive Listen audiobook", 73 | "fulfillmentType": "bifocal" 74 | }, 75 | { 76 | "identifiers": [ 77 | { 78 | "value": "9789999999999", 79 | "type": "ISBN" 80 | } 81 | ], 82 | "isLockedIn": false, 83 | "id": "audiobook-mp3", 84 | "name": "MP3 audiobook", 85 | "isbn": "9789999999999", 86 | "fulfillmentType": "odm" 87 | } 88 | ], 89 | "publisherAccount": { 90 | "name": "Tests", 91 | "id": "12345" 92 | }, 93 | "detailedSeries": { 94 | "readingOrder": "2", 95 | "seriesName": "Test Audiobook" 96 | }, 97 | "id": "9999999", 98 | "firstCreatorName": "Test Author", 99 | "firstCreatorSortName": "Author, Test", 100 | "title": "Test Audiobook", 101 | "sortTitle": "Test Audiobook", 102 | "edition": "Jan 20 2023", 103 | "publishDateText": "Jan 19 2023 7:00PM", 104 | "publishDate": "2023-01-10T19:00:00Z", 105 | "estimatedReleaseDate": "2023-02-10T00:00:00Z", 106 | "description": "Test Audiobook's description", 107 | "fullDescription": "Test Audiobook's full description", 108 | "shortDescription": "Test Audiobook' short description", 109 | "series": "Test Audiobook Series", 110 | "reserveId": "12345a67-b8c9-12de-a1b2-cd345678efa9" 111 | } -------------------------------------------------------------------------------- /tests/data/test4.odm: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | https://ping.github.io/odmpy/test_data/test.license 5 | 6 | MP3 Audio Book 7 | Ceremonies For Christmas 8 | Ceremonies For Christmas 9 | 10 | Librivox 11 | https://ping.github.io/odmpy/test_data/cover.jpg 12 | https://ping.github.io/odmpy/test_data/cover.jpg 13 | 14 | Robert Herrick 15 | LibriVox Volunteers 16 | 17 | 18 | Fiction 19 | 20 | 21 | English 22 | 23 | À,Á,Â,Ã,Ä,Å,Æ,Ç,È,É,Ê,Ë,Ì,Í,Î,Ï,Ð,Ñ,Ò,Ó,Ô,Õ,Ö,Ø,Ù,Ú,Û,Ü,Ý,Þ,ß,à,á,â,ã,ä,å,æ,ç,è,é,ê,ë,ì,í,î,ï,ð,ñ,ò,ó,ô,õ,ö,ø,ù,ú,û,ü,ý,þ,ÿ ¡,¢,£,¤,¥,¦,§,¨,©,ª,«,¬,­,®,¯,°,±,²,³,´,µ,¶,¸,¹,º,»,¼,½,¾,¿,×,÷ fiancée & friends “ ” ’ [ ] 24 | ]]> 25 | 1 26 | -1 27 | 1 28 | -1 29 | 1 30 | 1 31 | 1 32 | -1 33 | 0 34 | 0 35 | 1 36 | 2099-01-01T00:00:00ZLibrary Xhttp://overdrive.comhttp://overdrive.com/odmbanner.gif#eeeeee000-0000000-00000https://ping.github.io/odmpy/test_datahttps://ping.github.io/odmpy/test_data -------------------------------------------------------------------------------- /tests/data/test_ref24.odm: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | https://ping.github.io/odmpy/test_data/test.license 5 | 6 | MP3 Audio Book 7 | Ceremonies For Christmas 8 | Ceremonies For Christmas 9 | 10 | Librivox 11 | https://ping.github.io/odmpy/test_data/cover_NOTFOUND.jpg 12 | https://ping.github.io/odmpy/test_data/cover_NOTFOUND.jpg 13 | 14 | Robert Herrick 15 | LibriVox Volunteers 16 | 17 | 18 | Fiction 19 | 20 | 21 | English 22 | 23 | À,Á,Â,Ã,Ä,Å,Æ,Ç,È,É,Ê,Ë,Ì,Í,Î,Ï,Ð,Ñ,Ò,Ó,Ô,Õ,Ö,Ø,Ù,Ú,Û,Ü,Ý,Þ,ß,à,á,â,ã,ä,å,æ,ç,è,é,ê,ë,ì,í,î,ï,ð,ñ,ò,ó,ô,õ,ö,ø,ù,ú,û,ü,ý,þ,ÿ ¡,¢,£,¤,¥,¦,§,¨,©,ª,«,¬,­,®,¯,°,±,²,³,´,µ,¶,¸,¹,º,»,¼,½,¾,¿,×,÷ fiancée & friends “ ” ’ [ ] 24 | ]]> 25 | 1 26 | -1 27 | 1 28 | -1 29 | 1 30 | 1 31 | 1 32 | -1 33 | 0 34 | 0 35 | 1 36 | 2099-01-01T00:00:00ZLibrary Xhttp://overdrive.comhttp://overdrive.com/odmbanner.gif#eeeeee000-0000000-00000https://ping.github.io/odmpy/test_datahttps://ping.github.io/odmpy/test_data -------------------------------------------------------------------------------- /odmpy/cli_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2023 github.com/ping 2 | # 3 | # This file is part of odmpy. 4 | # 5 | # odmpy is free software: you can redistribute it and/or modify 6 | # it under the terms of the GNU General Public License as published by 7 | # the Free Software Foundation, either version 3 of the License, or 8 | # (at your option) any later version. 9 | # 10 | # odmpy is distributed in the hope that it will be useful, 11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | # GNU General Public License for more details. 14 | # 15 | # You should have received a copy of the GNU General Public License 16 | # along with odmpy. If not, see . 17 | # 18 | import argparse 19 | from enum import Enum 20 | from typing import Tuple 21 | 22 | 23 | # 24 | # Stuff for the CLI 25 | # 26 | 27 | 28 | DEFAULT_FORMAT_FIELDS = ("Title", "Author", "Series", "ReadingOrder", "Edition", "ID") 29 | 30 | 31 | class OdmpyCommands(str, Enum): 32 | """ 33 | Command strings 34 | """ 35 | 36 | Information = "info" 37 | Download = "dl" 38 | Return = "ret" 39 | Libby = "libby" 40 | LibbyReturn = "libbyreturn" 41 | LibbyRenew = "libbyrenew" 42 | 43 | def __str__(self): 44 | return str(self.value) 45 | 46 | def __repr__(self): 47 | # to ensure that proper values are printed out in arg command_name error help 48 | return str(self.value) 49 | 50 | 51 | class OdmpyNoninteractiveOptions(str, Enum): 52 | """ 53 | Non-interactive arguments 54 | """ 55 | 56 | DownloadLatestN = "download_latest_n" 57 | DownloadSelectedN = "selected_loans_indices" 58 | DownloadSelectedId = "selected_loans_ids" 59 | ExportLoans = "export_loans_path" 60 | Check = "check_signed_in" 61 | 62 | def __str__(self): 63 | return str(self.value) 64 | 65 | 66 | def positive_int(value: str) -> int: 67 | """ 68 | Ensure that argument is a positive integer 69 | 70 | :param value: 71 | :return: 72 | """ 73 | try: 74 | int_value = int(value) 75 | except ValueError: 76 | raise argparse.ArgumentTypeError(f'"{value}" is not a positive integer value') 77 | if int_value <= 0: 78 | raise argparse.ArgumentTypeError(f'"{value}" is not a positive integer value') 79 | return int_value 80 | 81 | 82 | def valid_book_folder_file_format( 83 | value: str, 84 | fields: Tuple = DEFAULT_FORMAT_FIELDS, 85 | ) -> str: 86 | """ 87 | Ensure that the book folder format is valid 88 | 89 | :param value: 90 | :param fields: 91 | :return: 92 | """ 93 | values_dict = {} 94 | for f in fields: 95 | values_dict[f] = "" 96 | try: 97 | value % values_dict 98 | except KeyError as err: 99 | raise argparse.ArgumentTypeError( 100 | f'"{value}" is not a valid book folder/file name format: Invalid field {err}' 101 | ) from err 102 | except Exception as err: 103 | raise argparse.ArgumentTypeError( 104 | f'"{value}" is not a valid book folder/file name format: {err}' 105 | ) from err 106 | return value 107 | -------------------------------------------------------------------------------- /tests/data/ebook/media.json: -------------------------------------------------------------------------------- 1 | { 2 | "firstCreatorId": 999999, 3 | "subjects": [ 4 | { 5 | "name": "Tests", 6 | "id": "1000" 7 | } 8 | ], 9 | "bisacCodes": [ 10 | "FIC000000" 11 | ], 12 | "bisac": [ 13 | { 14 | "description": "Test", 15 | "code": "FIC000000" 16 | } 17 | ], 18 | "levels": [], 19 | "creators": [ 20 | { 21 | "id": 999999, 22 | "sortName": "Author, Test", 23 | "role": "Author", 24 | "name": "Test Author" 25 | } 26 | ], 27 | "languages": [ 28 | { 29 | "name": "English", 30 | "id": "en" 31 | } 32 | ], 33 | "ratings": { 34 | "maturityLevel": { 35 | "name": "General content", 36 | "id": "generalcontent" 37 | }, 38 | "naughtyScore": { 39 | "name": "General content", 40 | "id": "GeneralContent" 41 | } 42 | }, 43 | "publisher": { 44 | "name": "Test Publisher", 45 | "id": "12345678" 46 | }, 47 | "keywords": [], 48 | "type": { 49 | "name": "eBook", 50 | "id": "ebook" 51 | }, 52 | "covers": { 53 | "cover150Wide": { 54 | "width": 150, 55 | "height": 200, 56 | "href": "http://localhost/mock/cover.jpg" 57 | }, 58 | "cover300Wide": { 59 | "width": 300, 60 | "height": 400, 61 | "href": "http://localhost/mock/cover.jpg" 62 | }, 63 | "cover510Wide": { 64 | "width": 510, 65 | "height": 680, 66 | "href": "http://localhost/mock/cover.jpg" 67 | } 68 | }, 69 | "formats": [ 70 | { 71 | "identifiers": [ 72 | { 73 | "value": "9789999999999", 74 | "type": "ISBN" 75 | } 76 | ], 77 | "id": "ebook-overdrive", 78 | "name": "OverDrive Read", 79 | "fulfillmentType": "bifocal" 80 | }, 81 | { 82 | "identifiers": [ 83 | { 84 | "value": "9789999999999", 85 | "type": "ISBN" 86 | } 87 | ], 88 | "id": "ebook-epub-adobe", 89 | "name": "EPUB eBook", 90 | "isbn": "9789999999999" 91 | }, 92 | { 93 | "identifiers": [ 94 | { 95 | "value": "B000000000", 96 | "type": "ASIN" 97 | } 98 | ], 99 | "id": "ebook-kindle", 100 | "name": "Kindle Book", 101 | "fulfillmentType": "kindle" 102 | } 103 | ], 104 | "publisherAccount": { 105 | "name": "Tests", 106 | "id": "12345" 107 | }, 108 | "detailedSeries": { 109 | "rank": 3, 110 | "seriesId": 123456, 111 | "readingOrder": "3", 112 | "seriesName": "The Test Series" 113 | }, 114 | "id": "9999999", 115 | "firstCreatorName": "Test Author", 116 | "firstCreatorSortName": "Author, Test", 117 | "title": "Test EBook", 118 | "subtitle": "Test Subtitle", 119 | "sortTitle": "Test EBook", 120 | "edition": "Jan 20 2023", 121 | "publishDateText": "Jan 19 2023 7:00PM", 122 | "publishDate": "2023-01-10T19:00:00Z", 123 | "estimatedReleaseDate": "2023-02-10T00:00:00Z", 124 | "description": "Test EBook's description", 125 | "fullDescription": "Test EBook's full description", 126 | "shortDescription": "Test EBook' short description", 127 | "series": "Test EBook Series", 128 | "reserveId": "12345a67-b8c9-12de-a1b2-cd345678efa9" 129 | } -------------------------------------------------------------------------------- /tests/data/audiobook/sync.json: -------------------------------------------------------------------------------- 1 | { 2 | "result": "synchronized", 3 | "cards": [ 4 | { 5 | "cardId": "123456789", 6 | "library": { 7 | "name": "Test Library" 8 | } 9 | } 10 | ], 11 | "loans": [ 12 | { 13 | "firstCreatorId": 999999, 14 | "renewableOn": "2023-03-01T00:00:00Z", 15 | "checkoutDate": "2023-03-01T00:00:00Z", 16 | "expireDate": "2023-03-31T00:00:00Z", 17 | "expires": "2023-03-31T00:00:00Z", 18 | "isLuckyDayCheckout": false, 19 | "isAdvantageFiltered": false, 20 | "isHoldable": true, 21 | "isOwned": true, 22 | "isAssigned": false, 23 | "isBundledChild": false, 24 | "isFormatLockedIn": false, 25 | "isReturnable": true, 26 | "luckyDayOwnedCopies": 0, 27 | "luckyDayAvailableCopies": 0, 28 | "ownedCopies": 0, 29 | "availableCopies": 0, 30 | "checkoutId": 9999999999, 31 | "otherFormats": [], 32 | "subjects": [ 33 | { 34 | "name": "Science", 35 | "id": "9999" 36 | } 37 | ], 38 | "type": { 39 | "name": "Audiobook", 40 | "id": "audiobook" 41 | }, 42 | "covers": { 43 | "cover150Wide": { 44 | "width": 150, 45 | "height": 200, 46 | "href": "http://localhost/mock/cover.jpg" 47 | }, 48 | "cover300Wide": { 49 | "width": 300, 50 | "height": 400, 51 | "href": "http://localhost/mock/cover.jpg" 52 | }, 53 | "cover510Wide": { 54 | "width": 510, 55 | "height": 680, 56 | "href": "http://localhost/mock/cover.jpg" 57 | } 58 | }, 59 | "formats": [ 60 | { 61 | "identifiers": [ 62 | { 63 | "value": "9789999999999", 64 | "type": "ISBN" 65 | } 66 | ], 67 | "isLockedIn": false, 68 | "id": "audiobook-overdrive", 69 | "name": "OverDrive Listen audiobook", 70 | "fulfillmentType": "bifocal" 71 | }, 72 | { 73 | "identifiers": [ 74 | { 75 | "value": "9789999999999", 76 | "type": "ISBN" 77 | } 78 | ], 79 | "isLockedIn": false, 80 | "id": "audiobook-mp3", 81 | "name": "MP3 audiobook", 82 | "isbn": "9789999999999", 83 | "fulfillmentType": "odm" 84 | } 85 | ], 86 | "publisherAccount": { 87 | "name": "Tests", 88 | "id": "12345" 89 | }, 90 | "detailedSeries": { 91 | "readingOrder": "2", 92 | "seriesName": "Test Audiobook" 93 | }, 94 | "id": "9999999", 95 | "firstCreatorName": "Test Author", 96 | "firstCreatorSortName": "Author, Test", 97 | "title": "Test Audiobook", 98 | "sortTitle": "Test Audiobook", 99 | "edition": "Jan 20 2023", 100 | "publishDateText": "Jan 10 2023 7:00PM", 101 | "publishDate": "2023-01-10T19:00:00Z", 102 | "holdsCount": 0, 103 | "availabilityType": "always", 104 | "series": "Test Audiobook", 105 | "reserveId": "12345a67-b8c9-12de-a1b2-cd345678efa9", 106 | "websiteId": "99", 107 | "cardId": "123456789" 108 | } 109 | ] 110 | } -------------------------------------------------------------------------------- /odmpy/libby_errors.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2023 github.com/ping 2 | # 3 | # This file is part of odmpy. 4 | # 5 | # odmpy is free software: you can redistribute it and/or modify 6 | # it under the terms of the GNU General Public License as published by 7 | # the Free Software Foundation, either version 3 of the License, or 8 | # (at your option) any later version. 9 | # 10 | # odmpy is distributed in the hope that it will be useful, 11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | # GNU General Public License for more details. 14 | # 15 | # You should have received a copy of the GNU General Public License 16 | # along with odmpy. If not, see . 17 | # 18 | 19 | import json 20 | from http import HTTPStatus 21 | 22 | import requests as requests 23 | 24 | # 25 | # For use with LibbyClient 26 | # 27 | 28 | 29 | class ClientError(Exception): 30 | """Generic error class, catch-all for most client issues.""" 31 | 32 | def __init__( 33 | self, 34 | msg: str, 35 | http_status: int = 0, 36 | error_response: str = "", 37 | ): 38 | self.http_status = http_status or 0 39 | self.error_response = error_response 40 | try: 41 | self.error_response_obj = json.loads(self.error_response) 42 | except ValueError: 43 | self.error_response_obj = {} 44 | super(ClientError, self).__init__(msg) 45 | 46 | @property 47 | def msg(self): 48 | return self.args[0] 49 | 50 | def __str__(self): 51 | return ( 52 | f"<{type(self).__module__}.{type(self).__name__}; http_status={self.http_status}, " 53 | f"msg='{self.msg}', error_response='{self.error_response}''>" 54 | ) 55 | 56 | 57 | class ClientConnectionError(ClientError): 58 | """Connection error""" 59 | 60 | 61 | class ClientTimeoutError(ClientError): 62 | """Timeout error""" 63 | 64 | 65 | class ClientBadRequestError(ClientError): 66 | """Raised when an HTTP 400 response is received.""" 67 | 68 | 69 | class ErrorHandler(object): 70 | @staticmethod 71 | def process(http_err: requests.HTTPError) -> None: 72 | """ 73 | Try to process an HTTP error from the api appropriately. 74 | 75 | :param http_err: requests.HTTPError instance 76 | :raises ClientError: 77 | :return: 78 | """ 79 | # json response 80 | if ( 81 | http_err.response.status_code == HTTPStatus.BAD_REQUEST 82 | and http_err.response.headers.get("content-type", "").startswith( 83 | "application/json" 84 | ) 85 | ): 86 | error = http_err.response.json() 87 | if error.get("result", "") == "upstream_failure": 88 | upstream = error.get("upstream", {}) 89 | if upstream: 90 | raise ClientBadRequestError( 91 | msg=f'{upstream.get("userExplanation", "")} [errorcode: {upstream.get("errorCode", "")}]', 92 | http_status=http_err.response.status_code, 93 | error_response=http_err.response.text, 94 | ) from http_err 95 | 96 | raise ClientBadRequestError( 97 | msg=str(error), 98 | http_status=http_err.response.status_code, 99 | error_response=http_err.response.text, 100 | ) from http_err 101 | 102 | # final fallback 103 | raise ClientError( 104 | msg=str(http_err), 105 | http_status=http_err.response.status_code, 106 | error_response=http_err.response.text, 107 | ) from http_err 108 | -------------------------------------------------------------------------------- /tests/data.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from pathlib import Path 3 | from typing import List 4 | 5 | 6 | @dataclass 7 | class ExpectedResult: 8 | book_folder: Path 9 | merged_book_basename: str 10 | mp3_name_format: str 11 | total_parts: int 12 | total_chapters: int 13 | chapter_durations_sec: List[int] 14 | 15 | 16 | def get_expected_result(test_downloads_dir: Path, test_file: str) -> ExpectedResult: 17 | return ExpectedResult( 18 | book_folder=test_downloads_dir.joinpath(book_folders[test_file]), 19 | merged_book_basename=merged_book_basenames[test_file], 20 | mp3_name_format=mp3_name_formats[test_file], 21 | total_parts=book_parts[test_file], 22 | total_chapters=book_chapters[test_file], 23 | chapter_durations_sec=book_chapter_durations[test_file], 24 | ) 25 | 26 | 27 | book_folders = { 28 | "test1.odm": "Ceremonies For Christmas - Robert Herrick", 29 | "test2.odm": "크리스마스를 위한 의식 - 로버트 Herrick", 30 | "test3.odm": "Ceremonies For Christmas - Robert Herrick", 31 | "test4.odm": "Ceremonies For Christmas - Robert Herrick", 32 | "test_ref24.odm": "Ceremonies For Christmas - Robert Herrick", 33 | } 34 | merged_book_basenames = { 35 | "test1.odm": "Ceremonies For Christmas - Robert Herrick", 36 | "test2.odm": "크리스마스를 위한 의식 - 로버트 Herrick", 37 | "test3.odm": "Ceremonies For Christmas - Robert Herrick", 38 | "test4.odm": "Ceremonies For Christmas - Robert Herrick", 39 | "test_ref24.odm": "Ceremonies For Christmas - Robert Herrick", 40 | } 41 | mp3_name_formats = { 42 | "test1.odm": "ceremonies-for-christmas-part-{:02}.mp3", 43 | "test2.odm": "크리스마스를-위한-의식-part-{:02}.mp3", 44 | "test3.odm": "ceremonies-for-christmas-part-{:02}.mp3", 45 | "test4.odm": "ceremonies-for-christmas-part-{:02}.mp3", 46 | "test_ref24.odm": "ceremonies-for-christmas-part-{:02}.mp3", 47 | } 48 | part_title_formats = { 49 | "test1.odm": "{:02d} - Ceremonies For Christmas", 50 | "test2.odm": "{:02d} - 크리스마스를 위한 의식", 51 | "test3.odm": "{:02d} Issue 17", 52 | "test4.odm": "{:02d} Issue 17", 53 | "test_ref24.odm": "{:02d} Issue 17", 54 | } 55 | markers = { 56 | "test1.odm": [ 57 | "Marker 1", 58 | "Marker 2", 59 | "Marker 3", 60 | ], 61 | "test2.odm": [ 62 | "마커 1", 63 | "마커 2", 64 | "마커 3", 65 | ], 66 | "test3.odm": [ 67 | "Ball Lightning", 68 | "Prelude", 69 | "Part 1 - College", 70 | "Part 1 - Strange Phenomena 1", 71 | "Part 1 - Ball Lightning", 72 | ], 73 | "test4.odm": [ 74 | "Ball Lightning", 75 | "Prelude", 76 | "Part 1 - College", 77 | "Part 1 - Strange Phenomena 1", 78 | "Part 1 - Ball Lightning", 79 | ], 80 | "test_ref24.odm": [ 81 | "Ball Lightning", 82 | "Prelude", 83 | "Part 1 - College", 84 | "Part 1 - Strange Phenomena 1", 85 | "Part 1 - Ball Lightning", 86 | ], 87 | } 88 | album_artists = { 89 | "test1.odm": "Robert Herrick", 90 | "test2.odm": "로버트 Herrick", 91 | "test3.odm": "Robert Herrick", 92 | "test4.odm": "Robert Herrick", 93 | "test_ref24.odm": "Robert Herrick", 94 | } 95 | book_parts = { 96 | "test1.odm": 3, 97 | "test2.odm": 3, 98 | "test3.odm": 1, 99 | "test4.odm": 1, 100 | "test_ref24.odm": 1, 101 | } 102 | book_chapters = { 103 | "test1.odm": 3, 104 | "test2.odm": 3, 105 | "test3.odm": 5, 106 | "test4.odm": 5, 107 | "test_ref24.odm": 5, 108 | } 109 | book_chapter_durations = { 110 | "test1.odm": [67, 61, 66, 64, 66, 46, 56, 56, 60, 52, 47], 111 | "test2.odm": [67, 61, 66, 64, 66, 46, 56, 56, 60, 52, 47], 112 | "test3.odm": [15, 15, 10, 15, 6], 113 | "test4.odm": [15, 15, 10, 15, 6], 114 | "test_ref24.odm": [15, 15, 10, 15, 6], 115 | } 116 | -------------------------------------------------------------------------------- /tests/data/test.opf.xml: -------------------------------------------------------------------------------- 1 | 2 | Attack Surfaceen978166491325757546140fef5121-bb1f-42a5-b62a-d9fded939d50Cory DoctorowAmber BensonCordoc-Co LLC<p><strong>Cory Doctorow's Attack Surface is a standalone novel set in the world of New York Times bestsellers Little Brother and Homeland.</strong></p><br /><p>Most days, Masha Maximow was sure she'd chosen the winning side.</p><br /><p>In her day job as a counterterrorism wizard for an transnational cybersecurity firm, she made the hacks that allowed repressive regimes to spy on dissidents, and manipulate their every move. The perks were fantastic, and the pay was obscene.</p><br /><p>Just for fun, and to piss off her masters, Masha sometimes used her mad skills to help those same troublemakers evade detection, if their cause was just. It was a dangerous game and a hell of a rush. But seriously self-destructive. And unsustainable.</p><br /><p>When her targets were strangers in faraway police states, it was easy to compartmentalize, to ignore the collateral damage of murder, rape, and torture. But when it hits close to home, and the hacks and exploits she's devised are directed at her friends and family—including boy wonder Marcus Yallow, her old crush and archrival, and his entourage of naïve idealists—Masha realizes she has to choose.</p><br /><p>And whatever choice she makes, someone is going to get hurt.</p>2020-10-13T04:00:00Z -------------------------------------------------------------------------------- /tests/utils_tests.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import string 3 | import unittest 4 | from datetime import datetime 5 | from pathlib import Path 6 | from random import choices 7 | 8 | from odmpy import cli_utils 9 | from odmpy import utils 10 | from tests.base import is_windows 11 | 12 | 13 | class UtilsTests(unittest.TestCase): 14 | def test_sanitize_path(self): 15 | self.assertEqual( 16 | utils.sanitize_path(r'ac:d"e/f\g|h?i*j_ac:d"e/f\g|h?i*j', ""), 17 | "abcdefghij_abcdefghij" 18 | if is_windows 19 | else r'ac:d"ef\g|h?i*j_ac:d"ef\g|h?i*j', 20 | ) 21 | self.assertEqual( 22 | utils.sanitize_path(r'ac:d"e/f\g|h?i*j'), 23 | "a-b-c-d-e-f-g-h-i-j" if is_windows else r'ac:d"e-f\g|h?i*j', 24 | ) 25 | 26 | self.assertEqual( 27 | utils.sanitize_path("abc\ndef\tghi"), 28 | "abcdefghi", 29 | ) 30 | self.assertEqual( 31 | utils.sanitize_path("Español 中文 русский 한국어 日本語"), 32 | "Español 中文 русский 한국어 日本語", 33 | ) 34 | self.assertEqual( 35 | utils.sanitize_path("abc_def.,ghi", "-", exclude_chars="_.,"), 36 | "abc-def--ghi", 37 | ) 38 | self.assertEqual( 39 | utils.sanitize_path("abc_def.,ghi", "-", exclude_chars="_.,-"), 40 | "abcdefghi", 41 | ) 42 | 43 | def test_sanitize_path_mkdir(self): 44 | # test if the folder and file can actually be created on the OS 45 | ts = int(datetime.utcnow().timestamp() * 1000) 46 | random_text = "".join(choices(string.ascii_lowercase, k=10)) 47 | sanitized_path = utils.sanitize_path( 48 | rf'test_{random_text}_{ts}<>:"/\|?*', sub_text="" 49 | ) 50 | if is_windows: 51 | self.assertEqual(sanitized_path, f"test_{random_text}_{ts}") 52 | test_path = Path(sanitized_path) 53 | test_file = test_path.joinpath(f"{sanitized_path}.txt") 54 | try: 55 | test_path.mkdir(parents=True) 56 | self.assertTrue(test_path.is_dir()) 57 | with test_file.open("w", encoding="utf-8") as f: 58 | f.write(sanitized_path) 59 | self.assertTrue(test_file.is_file()) 60 | finally: 61 | if test_file.exists(): 62 | test_file.unlink() 63 | if test_path.exists(): 64 | test_path.rmdir() 65 | 66 | def test_slugify(self): 67 | self.assertEqual( 68 | utils.slugify("Español 中文 русский 한국어 日本語", allow_unicode=True), 69 | "español-中文-русский-한국어-日本語", 70 | ) 71 | self.assertEqual( 72 | utils.slugify("Abc Def Ghi!?", allow_unicode=True), 73 | "abc-def-ghi", 74 | ) 75 | 76 | def test_parse_duration_to_milliseconds(self): 77 | self.assertEqual( 78 | utils.parse_duration_to_milliseconds("1:23:45.678"), 79 | 1 * 60 * 60 * 1000 + 23 * 60 * 1000 + 45 * 1000 + 678, 80 | ) 81 | self.assertEqual(utils.parse_duration_to_milliseconds("12:00"), 12 * 60 * 1000) 82 | 83 | def test_parse_duration_to_seconds(self): 84 | self.assertEqual(utils.parse_duration_to_seconds("12:00"), 12 * 60) 85 | self.assertEqual(utils.parse_duration_to_seconds("12:00.6"), 12 * 60 + 1) 86 | 87 | def test_positive_int(self): 88 | self.assertEqual(cli_utils.positive_int("1"), 1) 89 | 90 | with self.assertRaises(argparse.ArgumentTypeError): 91 | _ = cli_utils.positive_int("x") 92 | 93 | with self.assertRaises(argparse.ArgumentTypeError): 94 | _ = cli_utils.positive_int("-1") 95 | 96 | def test_valid_book_folder_file_format(self): 97 | self.assertEqual( 98 | cli_utils.valid_book_folder_file_format( 99 | "%(Author)s/%(Series)s/%(Title)s-%(Edition)s-%(ID)s" 100 | ), 101 | "%(Author)s/%(Series)s/%(Title)s-%(Edition)s-%(ID)s", 102 | ) 103 | 104 | with self.assertRaises(argparse.ArgumentTypeError) as context: 105 | _ = cli_utils.valid_book_folder_file_format("%(X)s") 106 | self.assertIn("Invalid field 'X'", str(context.exception)) 107 | 108 | def test_mimetypes_guess(self): 109 | for f in ( 110 | "a.xhtml", 111 | "a.html", 112 | "a.css", 113 | "a.png", 114 | "a.gif", 115 | "a.jpeg", 116 | "a.jpg", 117 | "a.otf", 118 | "a.ttf", 119 | "a.eot", 120 | "a.woff", 121 | "a.woff2", 122 | "x/a.svg", 123 | "http://localhost/x/a.ncx", 124 | ): 125 | with self.subTest(file_name=f): 126 | mime_type = utils.guess_mimetype(f) 127 | self.assertIsNotNone(mime_type, f"Unable to guess mimetype for {f}") 128 | -------------------------------------------------------------------------------- /tests/odmpy_shared_tests.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | from functools import cmp_to_key 3 | 4 | from odmpy.processing import shared 5 | from odmpy.processing.ebook import _sort_title_contents 6 | from tests.base import BaseTestCase 7 | 8 | 9 | class ProcessingSharedTests(BaseTestCase): 10 | def test_extract_authors_from_openbook(self): 11 | openbook_mock = { 12 | "creator": [ 13 | {"name": "A", "role": "author"}, 14 | {"name": "B", "role": "editor"}, 15 | ] 16 | } 17 | self.assertEqual(shared.extract_authors_from_openbook(openbook_mock), ["A"]) 18 | openbook_mock = { 19 | "creator": [ 20 | {"name": "B", "role": "editor"}, 21 | {"name": "B2", "role": "editor"}, 22 | {"name": "C", "role": "publisher"}, 23 | ] 24 | } 25 | self.assertEqual( 26 | shared.extract_authors_from_openbook(openbook_mock), ["B", "B2"] 27 | ) 28 | openbook_mock = { 29 | "creator": [ 30 | {"name": "C", "role": "publisher"}, 31 | ] 32 | } 33 | self.assertEqual(shared.extract_authors_from_openbook(openbook_mock), ["C"]) 34 | 35 | def test_extract_isbn(self): 36 | formats = [ 37 | { 38 | "identifiers": [ 39 | {"value": "9780000000000", "type": "ISBN"}, 40 | {"value": "tantor_audio#9780000000000", "type": "8"}, 41 | {"value": "9780000000001", "type": "LibraryISBN"}, 42 | ], 43 | "isbn": "9780000000001", 44 | "id": "audiobook-overdrive", 45 | }, 46 | { 47 | "identifiers": [ 48 | {"value": "9780000000000", "type": "ISBN"}, 49 | {"value": "tantor_audio#9780000000000", "type": "8"}, 50 | {"value": "9780000000001", "type": "LibraryISBN"}, 51 | ], 52 | "isbn": "9780000000001", 53 | "id": "audiobook-mp3", 54 | }, 55 | ] 56 | self.assertEqual( 57 | shared.extract_isbn(formats, ["audiobook-mp3"]), "9780000000001" 58 | ) 59 | formats = [ 60 | { 61 | "identifiers": [ 62 | {"value": "9780000000000", "type": "ISBN"}, 63 | {"value": "9780000000001", "type": "LibraryISBN"}, 64 | ], 65 | "id": "audiobook-mp3", 66 | } 67 | ] 68 | self.assertEqual( 69 | shared.extract_isbn(formats, ["audiobook-mp3"]), "9780000000001" 70 | ) 71 | formats = [ 72 | { 73 | "identifiers": [ 74 | {"value": "9780000000000", "type": "ISBN"}, 75 | {"value": "9780000000001", "type": "X"}, 76 | ], 77 | "id": "audiobook-mp3", 78 | } 79 | ] 80 | self.assertEqual( 81 | shared.extract_isbn(formats, ["audiobook-mp3"]), "9780000000000" 82 | ) 83 | 84 | def test_generate_names(self): 85 | args = argparse.Namespace( 86 | book_file_format="%(Title)s - %(Author)s", 87 | book_folder_format="%(Series)s - %(ReadingOrder)s", 88 | download_dir=str(self.test_downloads_dir), 89 | no_book_folder=False, 90 | remove_from_paths=None, 91 | ) 92 | book_folder, book_file_name = shared.generate_names( 93 | title="Test Title", 94 | series="Test Series", 95 | series_reading_order="3", 96 | authors=["Author1", "Author2"], 97 | edition="", 98 | title_id="", 99 | args=args, 100 | logger=self.logger, 101 | ) 102 | self.assertEqual(book_folder.stem, "Test Series - 3") 103 | self.assertEqual(book_file_name.name, "Test Title - Author1, Author2.mp3") 104 | 105 | args = argparse.Namespace( 106 | book_file_format="%(Title)s - %(Author)s", 107 | book_folder_format="%(Title)s - %(Author)s", 108 | download_dir=str(self.test_downloads_dir), 109 | no_book_folder=False, 110 | remove_from_paths=None, 111 | ) 112 | authors = [f"Test Author {i}" for i in range(1, 50)] 113 | book_folder, book_file_name = shared.generate_names( 114 | title="Test Title", 115 | series="Test Series", 116 | series_reading_order="3", 117 | authors=authors, 118 | edition="", 119 | title_id="", 120 | args=args, 121 | logger=self.logger, 122 | ) 123 | self.assertEqual(book_folder.stem, f"Test Title - {authors[0]}") 124 | self.assertEqual(book_file_name.name, f"Test Title - {authors[0]}.mp3") 125 | 126 | def test_sort_title_contents(self): 127 | entries = [ 128 | {"url": "http://localhost/assets/3.jpg"}, 129 | {"url": "http://localhost/assets/4.css"}, 130 | {"url": "http://localhost/pages/2.xhtml?cmpt=12345"}, 131 | {"url": "http://localhost/pages/1.xhtml?cmpt=12345"}, 132 | ] 133 | 134 | sorted_entries = sorted(entries, key=cmp_to_key(_sort_title_contents)) 135 | self.assertEqual( 136 | sorted_entries, 137 | [ 138 | {"url": "http://localhost/pages/1.xhtml?cmpt=12345"}, 139 | {"url": "http://localhost/pages/2.xhtml?cmpt=12345"}, 140 | {"url": "http://localhost/assets/3.jpg"}, 141 | {"url": "http://localhost/assets/4.css"}, 142 | ], 143 | ) 144 | -------------------------------------------------------------------------------- /.github/workflows/lint-test.yml: -------------------------------------------------------------------------------- 1 | name: "Lint and Test" 2 | 3 | on: 4 | push: 5 | branches: 6 | - '**' 7 | tags: 8 | - '!**' 9 | paths: 10 | - odmpy/** 11 | - tests/** 12 | - '*.py' 13 | - '.*' 14 | - run_tests.sh 15 | - 'requirements*.txt' 16 | - .github/workflows/lint-test.yml 17 | pull_request: 18 | branches: 19 | - '**' 20 | workflow_dispatch: 21 | 22 | jobs: 23 | lint: 24 | runs-on: ubuntu-latest 25 | strategy: 26 | matrix: 27 | python-version: [ "3.7", "3.8", "3.9", "3.10" , "3.11" ] 28 | steps: 29 | - uses: actions/checkout@v3 30 | with: 31 | submodules: true 32 | fetch-depth: 1 33 | - name: Set up Python ${{ matrix.python-version }} 34 | uses: actions/setup-python@v4 35 | with: 36 | python-version: ${{ matrix.python-version }} 37 | - name: Install dependencies 38 | run: | 39 | python -m pip install --upgrade pip 40 | pip -q install -r requirements.txt 41 | pip -q install -r requirements-dev.txt 42 | - name: Compile all 43 | run: | 44 | python -m compileall odmpy tests 45 | - name: Analysing the code with black 46 | run: | 47 | black --check setup.py odmpy tests 48 | # - name: Analysing the code with flake8 49 | # run: | 50 | # flake8 setup.py odmpy tests 51 | - name: Analysing the code with ruff 52 | run: | 53 | ruff check setup.py odmpy tests 54 | # keep pylint until https://github.com/astral-sh/ruff/issues/970 55 | - name: Analysing the code with pylint 56 | run: | 57 | pylint setup.py odmpy tests 58 | - name: Analysing the code with mypy 59 | run: | 60 | mypy --package odmpy --package tests 61 | 62 | tests: 63 | runs-on: ${{ matrix.os }} 64 | strategy: 65 | matrix: 66 | os: [ubuntu-latest, windows-latest] 67 | python-version: [ "3.7", "3.8", "3.9", "3.10" , "3.11" ] 68 | needs: lint 69 | steps: 70 | - uses: FedericoCarboni/setup-ffmpeg@v2 71 | id: setup-ffmpeg 72 | - uses: actions/checkout@v3 73 | with: 74 | submodules: true 75 | fetch-depth: 1 76 | - name: Set up Python ${{ matrix.python-version }} 77 | uses: actions/setup-python@v4 78 | with: 79 | python-version: ${{ matrix.python-version }} 80 | - name: Install dependencies 81 | # Installing wheel due to https://github.com/pypa/pip/issues/8559 82 | run: | 83 | python3 -m pip -q install --upgrade pip wheel 84 | python3 -m pip -q install -r requirements.txt --upgrade 85 | python3 -m pip -q install -r requirements-dev.txt --upgrade 86 | - name: Run tests on ${{ matrix.os }} with python ${{ matrix.python-version }} 87 | run: | 88 | cd ${GITHUB_WORKSPACE} 89 | sh run_tests.sh 90 | coverage lcov 91 | mv .coverage ".coverage.${{ matrix.os }}.${{ matrix.python-version }}" 92 | shell: bash 93 | - name: Test installation process 94 | run: | 95 | python3 setup.py install 96 | odmpy --version 97 | shell: bash 98 | - name: Upload coverage artifacts 99 | uses: actions/upload-artifact@v3 100 | with: 101 | name: coverage-results 102 | path: .coverage.* 103 | retention-days: 1 104 | - name: Coveralls Parallel 105 | uses: coverallsapp/github-action@v2 106 | with: 107 | path-to-lcov: "coverage.lcov" 108 | flag-name: run-${{ matrix.os }}-${{ matrix.python-version }} 109 | parallel: true 110 | 111 | coverage-report: 112 | runs-on: ubuntu-latest 113 | needs: tests 114 | steps: 115 | - name: Set up Python 116 | uses: actions/setup-python@v4 117 | with: 118 | python-version: "3.11" 119 | - name: Install dependencies 120 | run: | 121 | python3 -m pip install --upgrade pip 122 | python3 -m pip install coverage coverage-badge 123 | - name: Checkout source 124 | uses: actions/checkout@v3 125 | with: 126 | fetch-depth: 1 127 | path: "source" 128 | - name: Coveralls Finished 129 | uses: coverallsapp/github-action@v2 130 | with: 131 | parallel-finished: true 132 | - name: Download a single artifact 133 | uses: actions/download-artifact@v3 134 | with: 135 | name: coverage-results 136 | path: source/ 137 | - name: Merge and report 138 | run: | 139 | cd source/ 140 | coverage combine && coverage json && python cov2md.py 141 | cat 'coverage.md' >> $GITHUB_STEP_SUMMARY 142 | coverage html -d "$GITHUB_WORKSPACE/coverage/" --precision=1 --title="Coverage Report for ${GITHUB_SHA:0:7}" 143 | coverage-badge -o "$GITHUB_WORKSPACE/coverage/badge.svg" -f 144 | - name: Update coverage html report 145 | uses: actions/upload-artifact@v3 146 | with: 147 | name: coverage-report 148 | path: coverage/ 149 | retention-days: 14 150 | - name: Checkout gh-pages 151 | uses: actions/checkout@v3 152 | if: github.ref == 'refs/heads/master' 153 | with: 154 | ref: "gh-pages" 155 | fetch-depth: 1 156 | path: "pages" 157 | - name: Publish coverage to gh-pages 158 | if: github.ref == 'refs/heads/master' 159 | run: | 160 | cd "$GITHUB_WORKSPACE/pages/" 161 | rm -rf coverage/ 162 | rm -f ../coverage/.gitignore 163 | mv ../coverage . 164 | git config user.name github-actions 165 | git config user.email github-actions@github.com 166 | git add -A coverage 167 | git status 168 | if [[ `git status --porcelain --untracked-files=no` ]]; then git commit -m "Updated coverage results from $GITHUB_SHA"; git push; fi 169 | -------------------------------------------------------------------------------- /tests/data/ebook/sync.json: -------------------------------------------------------------------------------- 1 | { 2 | "result": "synchronized", 3 | "cards": [ 4 | { 5 | "cardId": "123456789", 6 | "library": { 7 | "name": "Test Library" 8 | } 9 | } 10 | ], 11 | "loans": [ 12 | { 13 | "firstCreatorId": 999999, 14 | "renewableOn": "2023-03-01T00:00:00Z", 15 | "checkoutDate": "2023-03-01T00:00:00Z", 16 | "expireDate": "2023-03-31T00:00:00Z", 17 | "expires": "2023-03-31T00:00:00Z", 18 | "isLuckyDayCheckout": false, 19 | "isAdvantageFiltered": false, 20 | "isHoldable": true, 21 | "isOwned": true, 22 | "isAssigned": false, 23 | "isBundledChild": false, 24 | "isFormatLockedIn": false, 25 | "isReturnable": true, 26 | "luckyDayOwnedCopies": 0, 27 | "luckyDayAvailableCopies": 0, 28 | "ownedCopies": 0, 29 | "availableCopies": 0, 30 | "checkoutId": 9999999999, 31 | "otherFormats": [], 32 | "subjects": [ 33 | { 34 | "name": "Science", 35 | "id": "9999" 36 | } 37 | ], 38 | "type": { 39 | "name": "eBook", 40 | "id": "ebook" 41 | }, 42 | "covers": { 43 | "cover150Wide": { 44 | "width": 150, 45 | "height": 200, 46 | "href": "http://localhost/mock/cover.jpg" 47 | }, 48 | "cover300Wide": { 49 | "width": 300, 50 | "height": 400, 51 | "href": "http://localhost/mock/cover.jpg" 52 | }, 53 | "cover510Wide": { 54 | "width": 510, 55 | "height": 680, 56 | "href": "http://localhost/mock/cover.jpg" 57 | } 58 | }, 59 | "formats": [ 60 | { 61 | "identifiers": [ 62 | { 63 | "value": "9789999999999", 64 | "type": "ISBN" 65 | } 66 | ], 67 | "isLockedIn": false, 68 | "id": "ebook-overdrive", 69 | "name": "OverDrive Read", 70 | "fulfillmentType": "bifocal" 71 | }, 72 | { 73 | "identifiers": [ 74 | { 75 | "value": "9789999999999", 76 | "type": "ISBN" 77 | } 78 | ], 79 | "isLockedIn": false, 80 | "id": "ebook-epub-adobe", 81 | "name": "EPUB eBook", 82 | "isbn": "9789999999999" 83 | } 84 | ], 85 | "publisherAccount": { 86 | "name": "Tests", 87 | "id": "12345" 88 | }, 89 | "id": "9999999", 90 | "firstCreatorName": "Test Author", 91 | "firstCreatorSortName": "Author, Test", 92 | "title": "Test EBook", 93 | "sortTitle": "Test EBook", 94 | "edition": "Jan 20 2023", 95 | "publishDateText": "Jan 10 2023 7:00PM", 96 | "holdsCount": 0, 97 | "availabilityType": "always", 98 | "series": "Test EBook", 99 | "reserveId": "12345a67-b8c9-12de-a1b2-cd345678efa9", 100 | "websiteId": "99", 101 | "cardId": "123456789" 102 | }, 103 | { 104 | "firstCreatorId": 999999, 105 | "renewableOn": "2023-03-02T00:00:00Z", 106 | "checkoutDate": "2023-03-02T00:00:00Z", 107 | "expireDate": "2023-03-31T00:00:00Z", 108 | "expires": "2023-03-31T00:00:00Z", 109 | "isLuckyDayCheckout": false, 110 | "isAdvantageFiltered": false, 111 | "isHoldable": true, 112 | "isOwned": true, 113 | "isAssigned": false, 114 | "isBundledChild": false, 115 | "isFormatLockedIn": false, 116 | "isReturnable": true, 117 | "luckyDayOwnedCopies": 0, 118 | "luckyDayAvailableCopies": 0, 119 | "ownedCopies": 0, 120 | "availableCopies": 0, 121 | "checkoutId": 9999999999, 122 | "otherFormats": [], 123 | "subjects": [ 124 | { 125 | "name": "Science", 126 | "id": "9999" 127 | } 128 | ], 129 | "type": { 130 | "name": "eBook", 131 | "id": "ebook" 132 | }, 133 | "covers": { 134 | "cover150Wide": { 135 | "width": 150, 136 | "height": 200, 137 | "href": "http://localhost/mock/cover.jpg" 138 | }, 139 | "cover300Wide": { 140 | "width": 300, 141 | "height": 400, 142 | "href": "http://localhost/mock/cover.jpg" 143 | }, 144 | "cover510Wide": { 145 | "width": 510, 146 | "height": 680, 147 | "href": "http://localhost/mock/cover.jpg" 148 | } 149 | }, 150 | "formats": [ 151 | { 152 | "identifiers": [ 153 | { 154 | "value": "9789999999999", 155 | "type": "ISBN" 156 | } 157 | ], 158 | "isLockedIn": false, 159 | "id": "ebook-overdrive", 160 | "name": "OverDrive Read", 161 | "fulfillmentType": "bifocal" 162 | }, 163 | { 164 | "identifiers": [ 165 | { 166 | "value": "9789999999999", 167 | "type": "ISBN" 168 | } 169 | ], 170 | "isLockedIn": false, 171 | "id": "ebook-epub-adobe", 172 | "name": "EPUB eBook", 173 | "isbn": "9789999999999" 174 | }, 175 | { 176 | "identifiers": [ 177 | { 178 | "value": "9789999999999", 179 | "type": "ISBN" 180 | } 181 | ], 182 | "id": "ebook-epub-open", 183 | "name": "Open EPUB eBook", 184 | "isbn": "9789999999999" 185 | } 186 | ], 187 | "publisherAccount": { 188 | "name": "Tests", 189 | "id": "12345" 190 | }, 191 | "id": "9999990", 192 | "firstCreatorName": "Test Author", 193 | "firstCreatorSortName": "Author, Test", 194 | "title": "Test Open EBook", 195 | "sortTitle": "Test Open EBook", 196 | "edition": "Jan 20 2023", 197 | "publishDateText": "Jan 10 2023 7:00PM", 198 | "publishDate": "2023-01-10T19:00:00Z", 199 | "holdsCount": 0, 200 | "availabilityType": "always", 201 | "series": "Test EBook", 202 | "reserveId": "12345a67-b8c9-12de-a1b2-cd345678efa9", 203 | "websiteId": "99", 204 | "cardId": "123456789" 205 | } 206 | ] 207 | } -------------------------------------------------------------------------------- /odmpy/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2021 github.com/ping 2 | # 3 | # This file is part of odmpy. 4 | # 5 | # odmpy is free software: you can redistribute it and/or modify 6 | # it under the terms of the GNU General Public License as published by 7 | # the Free Software Foundation, either version 3 of the License, or 8 | # (at your option) any later version. 9 | # 10 | # odmpy is distributed in the hope that it will be useful, 11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | # GNU General Public License for more details. 14 | # 15 | # You should have received a copy of the GNU General Public License 16 | # along with odmpy. If not, see . 17 | # 18 | 19 | import os 20 | import platform 21 | import re 22 | import unicodedata 23 | import xml.etree.ElementTree as ET 24 | from mimetypes import guess_type 25 | from pathlib import Path 26 | from typing import Optional 27 | 28 | from mutagen.mp3 import MP3 # type: ignore[import] 29 | 30 | # 31 | # Small utility type functions used across the board 32 | # 33 | 34 | TIMESTAMP_RE = re.compile( 35 | r"^((?P
[0-9]+):)?(?P[0-9]+):(?P[0-9]+)(\.(?P[0-9]+))?$" 36 | ) 37 | ILLEGAL_WIN_PATH_CHARS_RE = re.compile(r'[<>:"/\\|?*]') 38 | MIMETYPE_MAP = { 39 | ".xhtml": "application/xhtml+xml", 40 | ".html": "text/html", 41 | ".css": "text/css", 42 | ".png": "image/png", 43 | ".gif": "image/gif", 44 | ".jpeg": "image/jpeg", 45 | ".jpg": "image/jpeg", 46 | ".otf": "font/otf", 47 | ".ttf": "font/ttf", 48 | ".woff": "font/woff", 49 | ".woff2": "font/woff2", 50 | ".eot": "application/vnd.ms-fontobject", 51 | ".svg": "image/svg+xml", 52 | ".ncx": "application/x-dtbncx+xml", 53 | } 54 | 55 | 56 | def guess_mimetype(url: str) -> Optional[str]: 57 | """ 58 | Attempt to guess the mimetype for a given url 59 | 60 | :param url: 61 | :return: 62 | """ 63 | url_path = Path(url) 64 | mime_type, _ = guess_type(url_path.name, strict=False) 65 | if not mime_type: 66 | mime_type = MIMETYPE_MAP.get(url_path.suffix.lower(), None) 67 | return mime_type 68 | 69 | 70 | def is_windows() -> bool: 71 | """ 72 | Returns True if running on Windows. 73 | 74 | :return: 75 | """ 76 | return os.name == "nt" or platform.system().lower() == "windows" 77 | 78 | 79 | def plural_or_singular_noun( 80 | value: float, singular_noun: str, plural_noun: str = "" 81 | ) -> str: 82 | """ 83 | Returns the appropriate noun based on the value provided. 84 | 85 | :param value: 86 | :param singular_noun: 87 | :param plural_noun: 88 | :return: 89 | """ 90 | if not plural_noun: 91 | plural_noun = singular_noun + "s" 92 | return plural_noun if value != 1 else singular_noun 93 | 94 | 95 | def sanitize_path(text: str, sub_text: str = "-", exclude_chars: str = "") -> str: 96 | """ 97 | Strips invalid characters from a local file path component. 98 | 99 | :param text: 100 | :param sub_text: 101 | :param exclude_chars: 102 | :return: 103 | """ 104 | if not exclude_chars: 105 | exclude_chars = "" 106 | if os.name == "nt" or platform.system().lower() == "windows": 107 | # just replacing `os.sep` is not enough on Windows 108 | # ref https://github.com/ping/odmpy/issues/30 109 | text = ILLEGAL_WIN_PATH_CHARS_RE.sub(sub_text, text) 110 | for c in exclude_chars: 111 | # example, if "-" is in additional_exclude_chars, we can't use "-" as replacement, 112 | # so we'll just remove it 113 | text = text.replace( 114 | c, sub_text if sub_text and sub_text not in exclude_chars else "" 115 | ) 116 | 117 | text = text.replace(os.sep, sub_text) 118 | # also strip away non-printable chars just to be safe 119 | return "".join(c for c in text if c.isprintable()) 120 | 121 | 122 | def get_element_text(ele: Optional[ET.Element]) -> str: 123 | """ 124 | Returns the element text 125 | 126 | :param ele: 127 | :return: 128 | """ 129 | if (ele is not None) and ele.text: 130 | return ele.text or "" 131 | return "" 132 | 133 | 134 | def parse_duration_to_milliseconds(text: str) -> int: 135 | """ 136 | Converts a duration string into milliseconds 137 | 138 | :param text: A duration string, e.g. "10:15", "10:15.300", "1:10:15" 139 | :return: 140 | """ 141 | mobj = TIMESTAMP_RE.match(text) 142 | if not mobj: 143 | raise ValueError(f"Invalid timestamp text: {text}") 144 | hours = int(mobj.group("hr") or 0) 145 | minutes = int(mobj.group("min") or 0) 146 | seconds = int(mobj.group("sec") or 0) 147 | milliseconds = int((mobj.group("ms") or "0").ljust(3, "0")) 148 | return hours * 60 * 60 * 1000 + minutes * 60 * 1000 + seconds * 1000 + milliseconds 149 | 150 | 151 | def parse_duration_to_seconds(text: str) -> int: 152 | """ 153 | Converts a duration string into seconds 154 | 155 | :param text: A duration string, e.g. "10:15", "10:15.300", "1:10:15" 156 | :return: 157 | """ 158 | return round(parse_duration_to_milliseconds(text) / 1000.0) 159 | 160 | 161 | def mp3_duration_ms(filename: Path) -> int: 162 | # Ref: https://github.com/ping/odmpy/pull/3 163 | # returns the length of the mp3 in ms 164 | 165 | # eyeD3's audio length function: 166 | # audiofile.info.time_secs 167 | # returns incorrect times due to its header computation 168 | # mutagen does not have this issue 169 | audio = MP3(filename) 170 | if not audio.info: 171 | raise ValueError(f"Unable to parse MP3 info from: {filename}") 172 | return int(round(audio.info.length * 1000)) 173 | 174 | 175 | # From django 176 | def slugify(value: str, allow_unicode: bool = False) -> str: 177 | """ 178 | Convert to ASCII if 'allow_unicode' is False. Convert spaces to hyphens. 179 | Remove characters that aren't alphanumerics, underscores, or hyphens. 180 | Convert to lowercase. Also strip leading and trailing whitespace. 181 | """ 182 | if allow_unicode: 183 | value = unicodedata.normalize("NFKC", value) 184 | value = re.sub(r"[^\w\s-]", "", value, flags=re.U).strip().lower() 185 | return re.sub(r"[-\s]+", "-", value, flags=re.U) 186 | value = ( 187 | unicodedata.normalize("NFKD", value).encode("ascii", "ignore").decode("ascii") 188 | ) 189 | value = re.sub(r"[^\w\s-]", "", value).strip().lower() 190 | return re.sub(r"[-\s]+", "-", value) 191 | -------------------------------------------------------------------------------- /tests/data/audiobook/odm/media.json: -------------------------------------------------------------------------------- 1 | {"firstCreatorId":411244,"youngAdultEligible":false,"juvenileEligible":false,"visitorEligible":false,"isPreReleaseTitle":false,"isPublicPerformanceAllowed":false,"isPublicDomain":false,"isBundledChild":false,"unitsSold":162,"popularity":908,"subjects":[{"name":"Fiction","id":"26"},{"name":"Science Fiction","id":"80"}],"bisacCodes":["FIC028000","FIC028100"],"bisac":[{"description":"Fiction / Science Fiction / General","code":"FIC028000"},{"description":"Fiction / Science Fiction / Cyberpunk","code":"FIC028100"}],"levels":[],"creators":[{"id":411244,"sortName":"Doctorow, Cory","role":"Author","name":"Cory Doctorow"},{"id":648330,"sortName":"Benson, Amber","role":"Narrator","name":"Amber Benson"}],"languages":[{"name":"English","id":"en"}],"ratings":{"maturityLevel":{"name":"General content","id":"generalcontent"},"naughtyScore":{"name":"General content","id":"GeneralContent"}},"reviewCounts":{"publisherSupplier":0,"premium":1},"publisher":{"name":"Cordoc-Co LLC","id":"250661"},"keywords":[],"sample":{"href":"https://samples.overdrive.com/?crid=0fef5121-bb1f-42a5-b62a-d9fded939d50&.epub-sample.overdrive.com"},"bundledContentChildrenTitleIds":[],"constraints":{"isDisneyEulaRequired":false},"type":{"name":"Audiobook","id":"audiobook"},"covers":{"cover150Wide":{"primaryColor":{"rgb":{"blue":255,"green":15,"red":0},"hex":"#000FFF"},"width":150,"height":200,"href":"https://img1.od-cdn.com/ImageType-150/7552-1/0FE/F51/21/{0FEF5121-BB1F-42A5-B62A-D9FDED939D50}Img150.jpg"},"cover300Wide":{"primaryColor":{"rgb":{"blue":255,"green":13,"red":1},"hex":"#010DFF"},"width":300,"height":400,"href":"https://img1.od-cdn.com/ImageType-400/7552-1/0FE/F51/21/{0FEF5121-BB1F-42A5-B62A-D9FDED939D50}Img400.jpg"},"cover510Wide":{"primaryColor":{"rgb":{"blue":255,"green":13,"red":1},"hex":"#010DFF"},"width":510,"height":680,"href":"https://img1.od-cdn.com/ImageType-100/7552-1/{0FEF5121-BB1F-42A5-B62A-D9FDED939D50}Img100.jpg"}},"formats":[{"isBundleParent":false,"hasAudioSynchronizedText":false,"identifiers":[{"value":"9781664913257","type":"ISBN"}],"rights":[],"bundledContent":[],"sample":{"href":"https://samples.overdrive.com/?crid=0fef5121-bb1f-42a5-b62a-d9fded939d50&.epub-sample.overdrive.com"},"id":"audiobook-overdrive","name":"OverDrive Listen audiobook","onSaleDateUtc":"2020-10-13T04:00:00Z","fileSize":432378560,"fulfillmentType":"bifocal","isbn":"9781664913257","duration":"15:00:47"},{"isBundleParent":false,"hasAudioSynchronizedText":false,"identifiers":[{"value":"9781664913257","type":"ISBN"}],"rights":[],"bundledContent":[],"sample":{"href":"https://excerpts.cdn.overdrive.com/FormatType-425/7552-1/5754614-AttackSurface.mp3"},"id":"audiobook-mp3","name":"MP3 audiobook","partCount":17,"onSaleDateUtc":"2020-10-13T04:00:00Z","fileSize":432437650,"fulfillmentType":"odm","isbn":"9781664913257","duration":"15:07:46"}],"publisherAccount":{"name":"Findaway","id":"38413"},"detailedSeries":{"rank":15,"seriesId":503067,"seriesName":"Little Brother"},"id":"5754614","firstCreatorName":"Cory Doctorow","firstCreatorSortName":"Doctorow, Cory","title":"Attack Surface","sortTitle":"Attack Surface","starRating":3.4,"starRatingCount":47,"edition":"Unabridged","publishDate":"2020-10-13T04:00:00Z","publishDateText":"10/13/2020","estimatedReleaseDate":"2020-10-13T04:00:00Z","isBundleChild":false,"description":"

Cory Doctorow's Attack Surface is a standalone novel set in the world of New York Times bestsellers Little Brother and Homeland.


Most days, Masha Maximow was sure she'd chosen the winning side.


In her day job as a counterterrorism wizard for an transnational cybersecurity firm, she made the hacks that allowed repressive regimes to spy on dissidents, and manipulate their every move. The perks were fantastic, and the pay was obscene.


Just for fun, and to piss off her masters, Masha sometimes used her mad skills to help those same troublemakers evade detection, if their cause was just. It was a dangerous game and a hell of a rush. But seriously self-destructive. And unsustainable.


When her targets were strangers in faraway police states, it was easy to compartmentalize, to ignore the collateral damage of murder, rape, and torture. But when it hits close to home, and the hacks and exploits she's devised are directed at her friends and family—including boy wonder Marcus Yallow, her old crush and archrival, and his entourage of naïve idealists—Masha realizes she has to choose.


And whatever choice she makes, someone is going to get hurt.

","fullDescription":"

Cory Doctorow's Attack Surface is a standalone novel set in the world of New York Times bestsellers Little Brother and Homeland.


Most days, Masha Maximow was sure she'd chosen the winning side.


In her day job as a counterterrorism wizard for an transnational cybersecurity firm, she made the hacks that allowed repressive regimes to spy on dissidents, and manipulate their every move. The perks were fantastic, and the pay was obscene.


Just for fun, and to piss off her masters, Masha sometimes used her mad skills to help those same troublemakers evade detection, if their cause was just. It was a dangerous game and a hell of a rush. But seriously self-destructive. And unsustainable.


When her targets were strangers in faraway police states, it was easy to compartmentalize, to ignore the collateral damage of murder, rape, and torture. But when it hits close to home, and the hacks and exploits she's devised are directed at her friends and family—including boy wonder Marcus Yallow, her old crush and archrival, and his entourage of naïve idealists—Masha realizes she has to choose.


And whatever choice she makes, someone is going to get hurt.

","shortDescription":"

Cory Doctorow's Attack Surface is a standalone novel set in the world of New York Times bestsellers Little Brother and Homeland.


Most days, Masha Maximow was sure she'd chosen the winning side.


In her day job as a counterterrorism wizard for an transnational cybersecurity firm, she made the hacks that allowed repressive regimes to spy on dissidents, and manipulate their every move. The perks were fantastic, and the pay was obscene.


Just for fun, and to piss off her masters, Masha sometimes used her mad skills to help those same troublemakers evade detection, if their cause was just. It was a dangerous game and a hell of a rush. But seriously self-destructive. And unsustainable.


When her targets were strangers in faraway police states, it was easy to compartmentalize, to ignore the collateral damage of murder, rape, and torture. But when it hits close to home, and the hacks and exploits she's devised are directed...","series":"Little Brother","reserveId":"0fef5121-bb1f-42a5-b62a-d9fded939d50"} -------------------------------------------------------------------------------- /tests/overdrive_tests.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import requests 4 | 5 | from odmpy.overdrive import OverDriveClient 6 | from tests.base import BaseTestCase 7 | 8 | test_logger = logging.getLogger(__name__) 9 | test_logger.setLevel(logging.WARNING) 10 | client_logger = logging.getLogger(OverDriveClient.__module__) 11 | client_logger.setLevel(logging.WARNING) 12 | requests_logger = logging.getLogger("urllib3") 13 | requests_logger.setLevel(logging.WARNING) 14 | requests_logger.propagate = True 15 | 16 | 17 | class OverDriveClientTests(BaseTestCase): 18 | def setUp(self): 19 | super().setUp() 20 | self.client = OverDriveClient(retry=1) 21 | 22 | def tearDown(self) -> None: 23 | super().tearDown() 24 | self.client.session.close() 25 | 26 | def test_media(self): 27 | item = self.client.media("284716") 28 | for k in ( 29 | "id", 30 | "title", 31 | "sortTitle", 32 | "description", 33 | "fullDescription", 34 | "shortDescription", 35 | "publishDate", 36 | "type", 37 | "formats", 38 | "covers", 39 | "languages", 40 | "creators", 41 | "subjects", 42 | "starRating", 43 | "starRatingCount", 44 | "unitsSold", 45 | "popularity", 46 | ): 47 | with self.subTest(key=k): 48 | self.assertIn(k, item, msg=f'"{k}" not found') 49 | 50 | def test_media_bulk(self): 51 | items = self.client.media_bulk(["284716", "5704038"]) 52 | self.assertEqual(len(items), 2) 53 | for item in items: 54 | for k in ( 55 | "id", 56 | "title", 57 | "sortTitle", 58 | "description", 59 | "fullDescription", 60 | "shortDescription", 61 | "publishDate", 62 | "type", 63 | "formats", 64 | "covers", 65 | "languages", 66 | "creators", 67 | "subjects", 68 | "starRating", 69 | "starRatingCount", 70 | "unitsSold", 71 | "popularity", 72 | ): 73 | with self.subTest(key=k): 74 | self.assertIn(k, item, msg=f'"{k}" not found') 75 | 76 | def test_library(self): 77 | for library_key in ("lapl", "ocpl"): 78 | with self.subTest(library_key=library_key): 79 | library = self.client.library(library_key) 80 | for k in ( 81 | "recommendToLibraryEnabled", 82 | "lastModifiedDate", 83 | "allowAnonymousSampling", 84 | "allowDeepSearch", 85 | "isDemo", 86 | "areLuckyDayTitlesAllocated", 87 | "canAddLibrariesInSora", 88 | "isLuckyDayEnabled", 89 | "isLexisNexis", 90 | "isAuroraEnabled", 91 | "isInstantAccessEnabled", 92 | "hasAdvantageAccounts", 93 | "isAutocompleteEnabled", 94 | "allowRecommendToLibrary", 95 | "isConsortium", 96 | "accessId", 97 | "websiteId", 98 | "accounts", 99 | "settings", 100 | "links", 101 | "messages", 102 | "defaultLanguage", 103 | "supportedLanguages", 104 | "formats", 105 | "enabledPlatforms", 106 | "visitableLibraries", 107 | "luckyDayPreferredLendingPeriods", 108 | "visitorsHaveLowerHoldPriority", 109 | "visitorsCanRecommendTitles", 110 | "visitorsCanPlaceHolds", 111 | "isReadingHistoryEnabled", 112 | "parentCRAccessId", 113 | "showcaseTarget", 114 | "type", 115 | "status", 116 | "name", 117 | "fulfillmentId", 118 | "visitorKey", 119 | "preferredKey", 120 | "id", 121 | ): 122 | with self.subTest(key=k): 123 | self.assertIn(k, library, msg=f'"{k}" not found') 124 | 125 | def test_library_media_availability(self): 126 | availability = self.client.library_media_availability("lapl", "1330527") 127 | for k in ( 128 | "isAdvantageFiltered", 129 | "youngAdultEligible", 130 | "juvenileEligible", 131 | "visitorEligible", 132 | "isRecommendableToLibrary", 133 | "isHoldable", 134 | "isOwned", 135 | "isFastlane", 136 | "isAvailable", 137 | "formats", 138 | "estimatedWaitDays", 139 | "holdsRatio", 140 | "holdsCount", 141 | "luckyDayOwnedCopies", 142 | "luckyDayAvailableCopies", 143 | "ownedCopies", 144 | "availableCopies", 145 | "availabilityType", 146 | "id", 147 | ): 148 | with self.subTest(key=k): 149 | self.assertIn(k, availability, msg=f'"{k}" not found') 150 | 151 | with self.assertRaises(requests.HTTPError) as context: 152 | self.client.library_media_availability("brooklyn", "2006069") 153 | self.assertEqual(context.exception.response.status_code, 404) 154 | 155 | def test_library_media(self): 156 | media = self.client.library_media("lapl", "7017021") 157 | 158 | for k in ( 159 | "id", 160 | "title", 161 | "sortTitle", 162 | "description", 163 | "publishDate", 164 | "type", 165 | "formats", 166 | "covers", 167 | "languages", 168 | "creators", 169 | "subjects", 170 | "isAdvantageFiltered", 171 | "youngAdultEligible", 172 | "juvenileEligible", 173 | "visitorEligible", 174 | "isRecommendableToLibrary", 175 | "isHoldable", 176 | "isOwned", 177 | "isFastlane", 178 | "isAvailable", 179 | "isBundledChild", 180 | ): 181 | with self.subTest(key=k): 182 | self.assertIn(k, media, msg=f'"{k}" not found') 183 | -------------------------------------------------------------------------------- /odmpy/constants.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2021 github.com/ping 2 | # 3 | # This file is part of odmpy. 4 | # 5 | # odmpy is free software: you can redistribute it and/or modify 6 | # it under the terms of the GNU General Public License as published by 7 | # the Free Software Foundation, either version 3 of the License, or 8 | # (at your option) any later version. 9 | # 10 | # odmpy is distributed in the hope that it will be useful, 11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | # GNU General Public License for more details. 14 | # 15 | # You should have received a copy of the GNU General Public License 16 | # along with odmpy. If not, see . 17 | # 18 | 19 | # 20 | # Constants mainly for use with the legacy ODM processing 21 | # 22 | 23 | OMC = "1.2.0" 24 | OS = "10.11.6" 25 | UA = "OverDrive Media Console" 26 | UA_LONG = "OverDrive Media Console/3.7.0.28 iOS/10.3.3" 27 | 28 | PERFORMER_FID = b"TPE3" 29 | LANGUAGE_FID = b"TLAN" 30 | 31 | # Ref: https://github.com/ping/odmpy/issues/19 32 | UNSUPPORTED_PARSER_ENTITIES = { 33 | # https://www.freeformatter.com/html-entities.html#iso88591-characters 34 | "Agrave": "À", 35 | "Aacute": "Á", 36 | "Acirc": "Â", 37 | "Atilde": "Ã", 38 | "Auml": "Ä", 39 | "Aring": "Å", 40 | "AElig": "Æ", 41 | "Ccedil": "Ç", 42 | "Egrave": "È", 43 | "Eacute": "É", 44 | "Ecirc": "Ê", 45 | "Euml": "Ë", 46 | "Igrave": "Ì", 47 | "Iacute": "Í", 48 | "Icirc": "Î", 49 | "Iuml": "Ï", 50 | "ETH": "Ð", 51 | "Ntilde": "Ñ", 52 | "Ograve": "Ò", 53 | "Oacute": "Ó", 54 | "Ocirc": "Ô", 55 | "Otilde": "Õ", 56 | "Ouml": "Ö", 57 | "Oslash": "Ø", 58 | "Ugrave": "Ù", 59 | "Uacute": "Ú", 60 | "Ucirc": "Û", 61 | "Uuml": "Ü", 62 | "Yacute": "Ý", 63 | "THORN": "Þ", 64 | "szlig": "ß", 65 | "agrave": "à", 66 | "aacute": "á", 67 | "acirc": "â", 68 | "atilde": "ã", 69 | "auml": "ä", 70 | "aring": "å", 71 | "aelig": "æ", 72 | "ccedil": "ç", 73 | "egrave": "è", 74 | "eacute": "é", 75 | "ecirc": "ê", 76 | "euml": "ë", 77 | "igrave": "ì", 78 | "iacute": "í", 79 | "icirc": "î", 80 | "iuml": "ï", 81 | "eth": "ð", 82 | "ntilde": "ñ", 83 | "ograve": "ò", 84 | "oacute": "ó", 85 | "ocirc": "ô", 86 | "otilde": "õ", 87 | "ouml": "ö", 88 | "oslash": "ø", 89 | "ugrave": "ù", 90 | "uacute": "ú", 91 | "ucirc": "û", 92 | "uuml": "ü", 93 | "yacute": "ý", 94 | "thorn": "þ", 95 | "yuml": "ÿ", 96 | # https://www.freeformatter.com/html-entities.html#iso88591-symbols 97 | "nbsp": " ", 98 | "iexcl": "¡", 99 | "cent": "¢", 100 | "pound": "£", 101 | "curren": "¤", 102 | "yen": "¥", 103 | "brvbar": "¦", 104 | "sect": "§", 105 | "uml": "¨", 106 | "copy": "©", 107 | "ordf": "ª", 108 | "laquo": "«", 109 | "not": "¬", 110 | "shy": "­", 111 | "reg": "®", 112 | "macr": "¯", 113 | "deg": "°", 114 | "plusmn": "±", 115 | "sup2": "²", 116 | "sup3": "³", 117 | "acute": "´", 118 | "micro": "µ", 119 | "para": "¶", 120 | "cedil": "¸", 121 | "sup1": "¹", 122 | "ordm": "º", 123 | "raquo": "»", 124 | "frac14": "¼", 125 | "frac12": "½", 126 | "frac34": "¾", 127 | "iquest": "¿", 128 | "times": "×", 129 | "divide": "÷", 130 | # https://www.freeformatter.com/html-entities.html#math-symbols 131 | "forall": "∀", 132 | "part": "∂", 133 | "exist": "∃", 134 | "empty": "∅", 135 | "nabla": "∇", 136 | "isin": "∈", 137 | "notin": "∉", 138 | "ni": "∋", 139 | "prod": "∏", 140 | "sum": "∑", 141 | "minus": "−", 142 | "lowast": "∗", 143 | "radic": "√", 144 | "prop": "∝", 145 | "infin": "∞", 146 | "ang": "∠", 147 | "and": "∧", 148 | "or": "∨", 149 | "cap": "∩", 150 | "cup": "∪", 151 | "int": "∫", 152 | "there4": "∴", 153 | "sim": "∼", 154 | "cong": "≅", 155 | "asymp": "≈", 156 | "ne": "≠", 157 | "equiv": "≡", 158 | "le": "≤", 159 | "ge": "≥", 160 | "sub": "⊂", 161 | "sup": "⊃", 162 | "nsub": "⊄", 163 | "sube": "⊆", 164 | "supe": "⊇", 165 | "oplus": "⊕", 166 | "otimes": "⊗", 167 | "perp": "⊥", 168 | "sdot": "⋅", 169 | # https://www.freeformatter.com/html-entities.html#greek-letters 170 | "Alpha": "Α", 171 | "Beta": "Β", 172 | "Gamma": "Γ", 173 | "Delta": "Δ", 174 | "Epsilon": "Ε", 175 | "Zeta": "Ζ", 176 | "Eta": "Η", 177 | "Theta": "Θ", 178 | "Iota": "Ι", 179 | "Kappa": "Κ", 180 | "Lambda": "Λ", 181 | "Mu": "Μ", 182 | "Nu": "Ν", 183 | "Xi": "Ξ", 184 | "Omicron": "Ο", 185 | "Pi": "Π", 186 | "Rho": "Ρ", 187 | "Sigma": "Σ", 188 | "Tau": "Τ", 189 | "Upsilon": "Υ", 190 | "Phi": "Φ", 191 | "Chi": "Χ", 192 | "Psi": "Ψ", 193 | "Omega": "Ω", 194 | "alpha": "α", 195 | "beta": "β", 196 | "gamma": "γ", 197 | "delta": "δ", 198 | "epsilon": "ε", 199 | "zeta": "ζ", 200 | "eta": "η", 201 | "theta": "θ", 202 | "iota": "ι", 203 | "kappa": "κ", 204 | "lambda": "λ", 205 | "mu": "μ", 206 | "nu": "ν", 207 | "xi": "ξ", 208 | "omicron": "ο", 209 | "pi": "π", 210 | "rho": "ρ", 211 | "sigmaf": "ς", 212 | "sigma": "σ", 213 | "tau": "τ", 214 | "upsilon": "υ", 215 | "phi": "φ", 216 | "chi": "χ", 217 | "psi": "ψ", 218 | "omega": "ω", 219 | "thetasym": "ϑ", 220 | "upsih": "ϒ", 221 | "piv": "ϖ", 222 | # https://www.freeformatter.com/html-entities.html#misc-html-entities 223 | "OElig": "Œ", 224 | "oelig": "œ", 225 | "Scaron": "Š", 226 | "scaron": "š", 227 | "Yuml": "Ÿ", 228 | "fnof": "ƒ", 229 | "circ": "ˆ", 230 | "tilde": "˜", 231 | "ensp": " ", 232 | "emsp": " ", 233 | "thinsp": " ", 234 | "zwnj": "‌", 235 | "zwj": "‍", 236 | "lrm": "‎", 237 | "rlm": "‏", # pylint: disable=(bidirectional-unicode 238 | "ndash": "–", 239 | "mdash": "—", 240 | "lsquo": "‘", 241 | "rsquo": "’", 242 | "sbquo": "‚", 243 | "ldquo": "“", 244 | "rdquo": "”", 245 | "bdquo": "„", 246 | "dagger": "†", 247 | "Dagger": "‡", 248 | "bull": "•", 249 | "hellip": "…", 250 | "permil": "‰", 251 | "prime": "′", 252 | "Prime": "″", 253 | "lsaquo": "‹", 254 | "rsaquo": "›", 255 | "oline": "‾", 256 | "euro": "€", 257 | "trade": "™", 258 | "larr": "←", 259 | "uarr": "↑", 260 | "rarr": "→", 261 | "darr": "↓", 262 | "harr": "↔", 263 | "crarr": "↵", 264 | "lceil": "⌈", 265 | "rceil": "⌉", 266 | "lfloor": "⌊", 267 | "rfloor": "⌋", 268 | "loz": "◊", 269 | "spades": "♠", 270 | "clubs": "♣", 271 | "hearts": "♥", 272 | "diams": "♦", 273 | } 274 | -------------------------------------------------------------------------------- /odmpy/overdrive.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2023 github.com/ping 2 | # 3 | # This file is part of odmpy. 4 | # 5 | # odmpy is free software: you can redistribute it and/or modify 6 | # it under the terms of the GNU General Public License as published by 7 | # the Free Software Foundation, either version 3 of the License, or 8 | # (at your option) any later version. 9 | # 10 | # odmpy is distributed in the hope that it will be useful, 11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | # GNU General Public License for more details. 14 | # 15 | # You should have received a copy of the GNU General Public License 16 | # along with odmpy. If not, see . 17 | # 18 | 19 | import logging 20 | from typing import Optional, Dict, List 21 | from urllib.parse import urljoin 22 | 23 | import requests 24 | from requests.adapters import HTTPAdapter, Retry 25 | 26 | # 27 | # Basic skeletal client for the OverDrive Thunder API 28 | # 29 | 30 | USER_AGENT = ( 31 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 11_1) AppleWebKit/605.1.15 (KHTML, like Gecko) " # noqa 32 | "Version/14.0.2 Safari/605.1.15" 33 | ) 34 | SITE_URL = "https://libbyapp.com" 35 | THUNDER_API_URL = "https://thunder.api.overdrive.com/v2/" 36 | CLIENT_ID = "dewey" 37 | 38 | 39 | class OverDriveClient(object): 40 | """ 41 | A really simplified OverDrive Thunder API client 42 | """ 43 | 44 | def __init__(self, **kwargs) -> None: 45 | """ 46 | Constructor. 47 | 48 | :param kwargs: 49 | - user_agent: User Agent string for requests 50 | - timeout: The timeout interval for a network request. Default 15 (seconds). 51 | - retries: The number of times to retry a network request on failure. Default 0. 52 | """ 53 | self.logger = logging.getLogger(__name__) 54 | self.user_agent = kwargs.pop("user_agent", USER_AGENT) 55 | self.timeout = int(kwargs.pop("timeout", 15)) 56 | self.retries = int(kwargs.pop("retry", 0)) 57 | 58 | session = requests.Session() 59 | adapter = HTTPAdapter(max_retries=Retry(total=self.retries, backoff_factor=0.1)) 60 | # noinspection HttpUrlsUsage 61 | for prefix in ("http://", "https://"): 62 | session.mount(prefix, adapter) 63 | self.session = kwargs.pop("session", None) or session 64 | 65 | def default_headers(self) -> Dict: 66 | """ 67 | Default http request headers. 68 | 69 | :return: 70 | """ 71 | headers = { 72 | "User-Agent": self.user_agent, 73 | "Referer": SITE_URL + "/", 74 | "Origin": SITE_URL, 75 | "Cache-Control": "no-cache", 76 | "Pragma": "no-cache", 77 | } 78 | return headers 79 | 80 | def default_params(self) -> Dict: 81 | """ 82 | Default set of GET request parameters. 83 | 84 | :return: 85 | """ 86 | params = {"x-client-id": CLIENT_ID} 87 | return params 88 | 89 | def make_request( 90 | self, 91 | endpoint: str, 92 | params: Optional[Dict] = None, 93 | data: Optional[Dict] = None, 94 | headers: Optional[Dict] = None, 95 | method: Optional[str] = None, 96 | ): 97 | """ 98 | Sends an API request. 99 | 100 | :param endpoint: Relative path to endpoint 101 | :param params: URL query parameters 102 | :param data: POST data parameters 103 | :param method: HTTP method, e.g. 'PUT' 104 | :param headers: Custom headers 105 | :return: Union[List, Dict, str] 106 | """ 107 | endpoint_url = urljoin(THUNDER_API_URL, endpoint) 108 | headers = headers or self.default_headers() 109 | if not method: 110 | # try to set an HTTP method 111 | if data is not None: 112 | method = "POST" 113 | else: 114 | method = "GET" 115 | 116 | req = requests.Request( 117 | method, 118 | endpoint_url, 119 | headers=headers, 120 | params=params, 121 | data=data, 122 | ) 123 | res = self.session.send(self.session.prepare_request(req), timeout=self.timeout) 124 | self.logger.debug("body: %s", res.text) 125 | res.raise_for_status() 126 | 127 | if res.headers.get("content-type", "").startswith("application/json"): 128 | return res.json() 129 | return res.text 130 | 131 | def media(self, title_id: str, **kwargs) -> Dict: 132 | """ 133 | Retrieve a title. 134 | Title id can also be a reserve id. 135 | 136 | :param title_id: A unique id that identifies the content. 137 | :return: 138 | """ 139 | params = self.default_params() 140 | params.update(kwargs) 141 | return self.make_request(f"media/{title_id}", params=params) 142 | 143 | def media_bulk(self, title_ids: List[str], **kwargs) -> List[Dict]: 144 | """ 145 | Retrieve a list of titles. 146 | 147 | :param title_ids: The ids passed in this request can be titleIds or reserveIds. 148 | :return: 149 | """ 150 | params = self.default_params() 151 | params.update({"titleIds": ",".join(title_ids)}) 152 | params.update(kwargs) 153 | return self.make_request("media/bulk", params=params) 154 | 155 | def library(self, library_key: str, **kwargs) -> Dict: 156 | """ 157 | Get a library's configuration data. 158 | 159 | :param library_key: A unique key that identifies the library, e.g. lapl 160 | :param kwargs: 161 | :return: 162 | """ 163 | params = self.default_params() 164 | params.update(kwargs) 165 | return self.make_request(f"libraries/{library_key}", params=params) 166 | 167 | def library_media(self, library_key: str, title_id: str, **kwargs) -> dict: 168 | """ 169 | Get title. 170 | 171 | :param library_key: A unique key that identifies the library 172 | :param title_id: 173 | :return: 174 | """ 175 | params = self.default_params() 176 | params.update({"titleIds": title_id}) 177 | params.update(kwargs) 178 | return self.make_request( 179 | f"libraries/{library_key}/media/{title_id}", params=params 180 | ) 181 | 182 | def library_media_availability( 183 | self, library_key: str, title_id: str, **kwargs 184 | ) -> dict: 185 | """ 186 | Get title availability. 187 | 188 | :param library_key: A unique key that identifies the library, e.g. lapl 189 | :param title_id: 190 | :param kwargs: 191 | :return: 192 | """ 193 | params = self.default_params() 194 | params.update(kwargs) 195 | return self.make_request( 196 | f"libraries/{library_key}/media/{title_id}/availability", params=params 197 | ) 198 | -------------------------------------------------------------------------------- /tests/data/opf.schema.xml: -------------------------------------------------------------------------------- 1 | 2 | 4 | 5 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 2.0 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | 131 | 132 | 133 | 134 | 135 | 136 | 137 | 138 | 139 | 140 | 141 | 142 | 143 | 144 | 145 | 146 | 147 | 148 | 149 | 150 | 151 | 152 | 153 | 154 | 155 | 156 | 157 | 158 | 159 | 160 | 161 | 162 | 163 | 164 | 165 | 166 | 167 | 168 | 169 | 170 | 171 | 172 | 173 | 174 | 175 | 176 | 177 | 178 | 179 | 180 | 181 | 182 | 183 | 184 | 185 | 186 | 187 | 188 | 189 | 190 | 191 | 192 | 193 | 194 | 195 | 196 | 197 | 198 | 199 | 200 | 201 | 202 | 203 | 204 | 205 | 206 | 207 | 208 | 209 | 210 | 211 | 212 | 213 | 214 | 215 | 216 | 217 | 218 | 219 | 220 | 221 | 222 | 223 | 224 | 225 | 226 | 227 | 228 | 229 | 230 | 231 | 232 | 233 | 234 | 235 | 236 | 237 | 238 | 239 | 240 | 241 | 242 | 243 | 244 | 245 | 246 | 247 | 248 | 249 | 250 | 251 | 252 | 253 | 254 | 255 | 256 | 257 | 258 | 259 | 260 | 261 | 262 | 263 | 264 | 265 | 266 | 267 | 268 | 269 | 270 | 271 | 272 | 273 | 274 | 275 | 276 | 277 | 278 | 279 | 280 | 281 | 282 | 283 | 284 | 285 | 286 | 287 | 288 | 289 | 290 | 291 | 292 | 293 | 294 | 295 | 296 | 297 | 298 | 299 | 300 | 301 | 302 | 303 | 304 | 305 | 306 | 307 | 308 | 309 | 310 | 311 | 312 | 313 | 314 | 315 | 316 | 317 | 318 | 319 | 320 | 321 | 322 | 323 | 324 | 325 | 326 | 327 | 328 | 329 | 330 | 331 | 332 | 333 | 334 | 335 | 336 | 337 | 338 | 339 | 340 | 341 | 342 | 343 | 344 | 345 | 346 | 347 | 348 | 349 | 350 | 351 | 352 | 353 | 354 | 355 | 356 | 357 | 358 | 359 | 360 | 361 | 362 | 363 | 364 | 365 | 366 | 367 | 368 | 369 | 370 | 371 | 372 | 373 | 374 | 375 | 376 | 377 | 378 | 379 | 380 | 381 | 382 | 383 | 384 | 385 | 386 | 387 | 388 | 389 | 390 | 391 | 392 | yes 393 | no 394 | 395 | 396 | 397 | 398 | 399 | 400 | 401 | 402 | 403 | 404 | 405 | 406 | 407 | 408 | 409 | 410 | 411 | 412 | 413 | 414 | 415 | 416 | 417 | 418 | 419 | 420 | 421 | 422 | 423 | 424 | 425 | 426 | 427 | 428 | 429 | 430 | 431 | 432 | 433 | 434 | 435 | 436 | 437 | 438 | 439 | 440 | 441 | 442 | 443 | 444 | 445 | 446 | 447 | 448 | 449 | 450 | 451 | 452 | 453 | 454 | 455 | 456 | 457 | 458 | 459 | 460 | 461 | 462 | 463 | 464 | 465 | 466 | 467 | 468 | 469 | 470 | 471 | 472 | 473 | 474 | 475 | 476 | 477 | 478 | 479 | 480 | 481 | 482 | 483 | 484 | 485 | 486 | 487 | 488 | 489 | 490 | 491 | 492 | 493 | 494 | 495 | 496 | 497 | 498 | 499 | 500 | 501 | 502 | 503 | 504 | 505 | 506 | -------------------------------------------------------------------------------- /tests/odmpy_tests.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2020 https://github.com/ping 2 | # 3 | # This software is released under the MIT License. 4 | # https://opensource.org/licenses/MIT 5 | 6 | import json 7 | from http import HTTPStatus 8 | 9 | import responses 10 | from lxml import etree # type: ignore[import] 11 | 12 | from odmpy.errors import OdmpyRuntimeError 13 | from odmpy.odm import run 14 | from odmpy.overdrive import OverDriveClient 15 | from .base import BaseTestCase 16 | from .data import ( 17 | get_expected_result, 18 | ) 19 | 20 | 21 | # [i] USE run_tests.sh 22 | 23 | 24 | class OdmpyTests(BaseTestCase): 25 | def setUp(self): 26 | super().setUp() 27 | 28 | # test1.odm - book with ascii meta 29 | # test2.odm - book with non-ascii meta 30 | # test3.odm - Issue using mutagen, ref #17 31 | # test4.odm - HTML entities decoding, ref #19 32 | self.test_odms = ["test1.odm", "test2.odm", "test3.odm", "test4.odm"] 33 | 34 | def test_info(self): 35 | """ 36 | `odmpy info test.odm` 37 | """ 38 | for test_odm_file in self.test_odms: 39 | with self.subTest(odm=test_odm_file): 40 | expected_file = self.test_data_dir.joinpath( 41 | f"{test_odm_file}.info.expected.txt" 42 | ) 43 | with self.assertLogs(run.__module__, level="INFO") as context: 44 | run( 45 | [ 46 | "--noversioncheck", 47 | "info", 48 | str(self.test_data_dir.joinpath(test_odm_file)), 49 | ], 50 | be_quiet=True, 51 | ) 52 | with expected_file.open("r", encoding="utf-8") as expected: 53 | self.assertEqual( 54 | "\n".join([r.msg for r in context.records]) + "\n", 55 | expected.read(), 56 | ) 57 | 58 | def test_info_json(self): 59 | """ 60 | `odmpy info test.odm` --format json` 61 | """ 62 | for test_odm_file in self.test_odms: 63 | with self.subTest(odm=test_odm_file): 64 | with self.assertLogs(run.__module__, level="INFO") as context: 65 | run( 66 | [ 67 | "--noversioncheck", 68 | "info", 69 | str(self.test_data_dir.joinpath(test_odm_file)), 70 | "--format", 71 | "json", 72 | ], 73 | be_quiet=True, 74 | ) 75 | info = json.loads("\n".join([r.msg for r in context.records])) 76 | for tag in [ 77 | "title", 78 | "creators", 79 | "publisher", 80 | "subjects", 81 | "languages", 82 | "description", 83 | "total_duration", 84 | ]: 85 | with self.subTest(tag=tag): 86 | self.assertTrue( 87 | info.get(tag), msg="'{}' is not set".format(tag) 88 | ) 89 | 90 | def _setup_common_responses(self): 91 | with self.test_data_dir.joinpath("audiobook", "cover.jpg").open("rb") as c: 92 | img_bytes = c.read() 93 | # cover from OD API 94 | responses.get( 95 | "https://ic.od-cdn.com/resize?type=auto&width=510&height=510&force=true&quality=80&url=%2Fodmpy%2Ftest_data%2Fcover.jpg", 96 | content_type="image/jpeg", 97 | body=img_bytes, 98 | ) 99 | responses.get( 100 | "https://ic.od-cdn.com/resize?type=auto&width=510&height=510&force=true&quality=80&url=%2Fodmpy%2Ftest_data%2Fcover_NOTFOUND.jpg", 101 | status=404, 102 | ) 103 | 104 | odm_test_data_dir = self.test_data_dir.joinpath("audiobook", "odm") 105 | with odm_test_data_dir.joinpath("test.license").open( 106 | "r", encoding="utf-8" 107 | ) as license_file: 108 | responses.get( 109 | "https://ping.github.io/odmpy/test_data/test.license", 110 | content_type="application/xml", 111 | body=license_file.read(), 112 | ) 113 | for mp3 in ( 114 | "book1/ceremonies_herrick_bk_64kb.mp3", 115 | "book1/ceremonies_herrick_cjph_64kb.mp3", 116 | "book1/ceremonies_herrick_gg_64kb.mp3", 117 | "book3/01_ceremonies_herrick_cjph_64kb.mp3", 118 | ): 119 | with odm_test_data_dir.joinpath(mp3).open("rb") as m: 120 | responses.get( 121 | f"https://ping.github.io/odmpy/test_data/{mp3}", 122 | content_type="audio/mp3", 123 | body=m.read(), 124 | ) 125 | with odm_test_data_dir.joinpath("media.json").open("r", encoding="utf-8") as m: 126 | responses.get( 127 | "https://thunder.api.overdrive.com/v2/media/0fef5121-bb1f-42a5-b62a-d9fded939d50", 128 | content_type="application/json", 129 | body=m.read(), 130 | ) 131 | responses.get( 132 | "https://ping.github.io/odmpy/test_data/cover_NOTFOUND.jpg", 133 | status=HTTPStatus.NOT_FOUND, 134 | ) 135 | 136 | @responses.activate 137 | def test_cover_fail_ref24(self): 138 | """ 139 | Test for #24 error downloading cover 140 | """ 141 | self._setup_common_responses() 142 | test_odm_file = "test_ref24.odm" 143 | run( 144 | [ 145 | "--noversioncheck", 146 | "dl", 147 | str(self.test_data_dir.joinpath(test_odm_file)), 148 | "--downloaddir", 149 | str(self.test_downloads_dir), 150 | "--keepcover", 151 | "--hideprogress", 152 | ], 153 | be_quiet=True, 154 | ) 155 | expected_result = get_expected_result(self.test_downloads_dir, test_odm_file) 156 | self.assertTrue(expected_result.book_folder.is_dir()) 157 | for i in range(1, expected_result.total_parts + 1): 158 | book_file = expected_result.book_folder.joinpath( 159 | expected_result.mp3_name_format.format(i) 160 | ) 161 | self.assertTrue(book_file.exists()) 162 | self.assertFalse(expected_result.book_folder.joinpath("cover.jpg").exists()) 163 | 164 | @responses.activate 165 | def test_opf(self): 166 | """ 167 | `odmpy dl test.odm --opf` 168 | 169 | Test for #26 opf generation 170 | """ 171 | self._setup_common_responses() 172 | test_odm_file = "test1.odm" 173 | run( 174 | [ 175 | "--noversioncheck", 176 | "dl", 177 | str(self.test_data_dir.joinpath(test_odm_file)), 178 | "--downloaddir", 179 | str(self.test_downloads_dir), 180 | "--keepcover", 181 | "--opf", 182 | "--hideprogress", 183 | ], 184 | be_quiet=True, 185 | ) 186 | expected_result = get_expected_result(self.test_downloads_dir, test_odm_file) 187 | 188 | # schema file has been edited to remove the legacy toc attribute for spine 189 | schema_file = self.test_data_dir.joinpath("opf.schema.xml") 190 | test_file = expected_result.book_folder.joinpath("ceremonies-for-christmas.opf") 191 | self.assertTrue(test_file.exists()) 192 | 193 | with test_file.open("r", encoding="utf-8") as actual, schema_file.open( 194 | "r", encoding="utf-8" 195 | ) as schema: 196 | # pylint: disable=c-extension-no-member 197 | actual_opf = etree.parse(actual) 198 | relaxng = etree.RelaxNG(etree.parse(schema)) 199 | self.assertTrue(relaxng.validate(actual_opf)) 200 | 201 | root = actual_opf.getroot() 202 | metadata = root.find("metadata", root.nsmap) 203 | self.assertIsNotNone(metadata) 204 | 205 | metadata_nsmap = {k: v for k, v in metadata.nsmap.items() if k} 206 | 207 | overdrive_reserve_identifier = metadata.xpath( 208 | "//dc:identifier[@opf:scheme='OverDriveReserveId']", 209 | namespaces=metadata_nsmap, 210 | ) 211 | self.assertEqual(len(overdrive_reserve_identifier), 1) 212 | overdrive_reserve_id = overdrive_reserve_identifier[0].text 213 | self.assertTrue(overdrive_reserve_id) 214 | 215 | od = OverDriveClient() 216 | try: 217 | media_info = od.media(overdrive_reserve_id) 218 | 219 | # title 220 | self.assertEqual( 221 | metadata.find("dc:title", metadata_nsmap).text, media_info["title"] 222 | ) 223 | # language 224 | self.assertEqual( 225 | metadata.find("dc:language", metadata_nsmap).text, 226 | media_info["languages"][0]["id"], 227 | ) 228 | # publisher 229 | self.assertEqual( 230 | metadata.find("dc:publisher", metadata_nsmap).text, 231 | media_info["publisher"]["name"], 232 | ) 233 | # description 234 | self.assertEqual( 235 | metadata.find("dc:description", metadata_nsmap).text, 236 | media_info["description"], 237 | ) 238 | 239 | # pub date 240 | pub_date = metadata.find("dc:date", metadata_nsmap) 241 | self.assertIsNotNone(pub_date) 242 | self.assertEqual( 243 | pub_date.get(f"{{{metadata_nsmap['opf']}}}event"), "publication" 244 | ) 245 | self.assertEqual(pub_date.text, media_info["publishDate"]) 246 | 247 | # book ID, isbn 248 | self.assertEqual( 249 | metadata.xpath( 250 | "//dc:identifier[@id='publication-id']", 251 | namespaces=metadata_nsmap, 252 | )[0].text, 253 | [f for f in media_info["formats"] if f["id"] == "audiobook-mp3"][0][ 254 | "isbn" 255 | ], 256 | ) 257 | 258 | # authors 259 | authors = metadata.xpath( 260 | "//dc:creator[@opf:role='aut']", namespaces=metadata_nsmap 261 | ) 262 | authors_od = [ 263 | c for c in media_info["creators"] if c["role"] == "Author" 264 | ] 265 | self.assertTrue(len(authors), len(authors_od)) 266 | for author_opf, author_od in zip(authors, authors_od): 267 | self.assertEqual(author_opf.text, author_od["name"]) 268 | self.assertEqual( 269 | author_opf.get(f"{{{metadata_nsmap['opf']}}}file-as"), 270 | author_od["sortName"], 271 | ) 272 | 273 | # narrators 274 | narrators = metadata.xpath( 275 | "//dc:creator[@opf:role='nrt']", namespaces=metadata_nsmap 276 | ) 277 | narrators_od = [ 278 | c for c in media_info["creators"] if c["role"] == "Narrator" 279 | ] 280 | self.assertTrue(len(narrators), len(narrators_od)) 281 | for narrator_opf, narrator_od in zip(narrators, narrators_od): 282 | self.assertEqual(narrator_opf.text, narrator_od["name"]) 283 | self.assertEqual( 284 | narrator_opf.get(f"{{{metadata_nsmap['opf']}}}file-as"), 285 | narrator_od["sortName"], 286 | ) 287 | 288 | # manifest 289 | manifest = root.find("manifest", root.nsmap) 290 | self.assertIsNotNone(manifest) 291 | cover_ele = next( 292 | iter( 293 | [ 294 | i 295 | for i in manifest.findall("item", namespaces=manifest.nsmap) 296 | if i.get("id") == "cover" 297 | ] 298 | ), 299 | None, 300 | ) 301 | self.assertIsNotNone(cover_ele) 302 | self.assertEqual(cover_ele.get("href"), "cover.jpg") 303 | self.assertEqual(cover_ele.get("media-type"), "image/jpeg") 304 | manifest_audio_files = [ 305 | i 306 | for i in manifest.findall("item", namespaces=manifest.nsmap) 307 | if i.get("media-type") == "audio/mpeg" 308 | ] 309 | self.assertEqual( 310 | len(manifest_audio_files), 311 | expected_result.total_parts, 312 | ) 313 | 314 | # spine 315 | spine = root.find("spine", root.nsmap) 316 | self.assertIsNotNone(spine) 317 | sprine_audio_files = [ 318 | i for i in spine.findall("itemref", namespaces=spine.nsmap) 319 | ] 320 | self.assertEqual(len(sprine_audio_files), len(manifest_audio_files)) 321 | 322 | finally: 323 | # close this to prevent "ResourceWarning: unclosed socket" error 324 | od.session.close() 325 | 326 | @responses.activate 327 | def test_odm_return(self): 328 | """ 329 | `odmpy ret test.odm` 330 | """ 331 | responses.get("https://ping.github.io/odmpy/test_data") 332 | run( 333 | [ 334 | "--noversioncheck", 335 | "ret", 336 | str(self.test_data_dir.joinpath(self.test_odms[0])), 337 | ], 338 | be_quiet=True, 339 | ) 340 | 341 | @responses.activate 342 | def test_odm_return_fail(self): 343 | """ 344 | `odmpy ret test.odm` 345 | """ 346 | responses.get( 347 | "https://ping.github.io/odmpy/test_data", status=HTTPStatus.FORBIDDEN 348 | ) 349 | with self.assertLogs(run.__module__, level="INFO") as context: 350 | run( 351 | [ 352 | "--noversioncheck", 353 | "ret", 354 | str(self.test_data_dir.joinpath(self.test_odms[0])), 355 | ], 356 | be_quiet=True, 357 | ) 358 | self.assertIn( 359 | "Loan is probably already returned.", [r.msg for r in context.records] 360 | ) 361 | 362 | @responses.activate 363 | def test_odm_return_error(self): 364 | """ 365 | `odmpy ret test.odm` 366 | """ 367 | responses.get( 368 | "https://ping.github.io/odmpy/test_data", status=HTTPStatus.BAD_REQUEST 369 | ) 370 | with self.assertRaisesRegex(OdmpyRuntimeError, "HTTP error returning odm"): 371 | run( 372 | [ 373 | "--noversioncheck", 374 | "ret", 375 | str(self.test_data_dir.joinpath(self.test_odms[0])), 376 | ], 377 | be_quiet=True, 378 | ) 379 | -------------------------------------------------------------------------------- /tests/odmpy_dl_tests.py: -------------------------------------------------------------------------------- 1 | import json 2 | import shutil 3 | import subprocess 4 | 5 | import responses 6 | from mutagen.mp3 import MP3 7 | 8 | from odmpy.odm import run 9 | from .base import BaseTestCase 10 | from .data import ( 11 | part_title_formats, 12 | album_artists, 13 | markers, 14 | get_expected_result, 15 | ) 16 | 17 | 18 | # Test non-interactive options 19 | class OdmpyDlTests(BaseTestCase): 20 | def setUp(self) -> None: 21 | super().setUp() 22 | 23 | # test1.odm - book with ascii meta 24 | # test2.odm - book with non-ascii meta 25 | # test3.odm - Issue using mutagen, ref #17 26 | # test4.odm - HTML entities decoding, ref #19 27 | self.test_odms = ["test1.odm", "test2.odm", "test3.odm", "test4.odm"] 28 | 29 | def _setup_common_responses(self): 30 | with self.test_data_dir.joinpath("audiobook", "cover.jpg").open("rb") as c: 31 | img_bytes = c.read() 32 | # cover from OD API 33 | responses.get( 34 | "https://ic.od-cdn.com/resize?type=auto&width=510&height=510&force=true&quality=80&url=%2Fodmpy%2Ftest_data%2Fcover.jpg", 35 | content_type="image/jpeg", 36 | body=img_bytes, 37 | ) 38 | 39 | odm_test_data_dir = self.test_data_dir.joinpath("audiobook", "odm") 40 | with odm_test_data_dir.joinpath("test.license").open( 41 | "r", encoding="utf-8" 42 | ) as license_file: 43 | responses.get( 44 | "https://ping.github.io/odmpy/test_data/test.license", 45 | content_type="application/xml", 46 | body=license_file.read(), 47 | ) 48 | for mp3 in ( 49 | "book1/ceremonies_herrick_bk_64kb.mp3", 50 | "book1/ceremonies_herrick_cjph_64kb.mp3", 51 | "book1/ceremonies_herrick_gg_64kb.mp3", 52 | "book2/ceremonies_herrick_bk_64kb.mp3", 53 | "book2/ceremonies_herrick_cjph_64kb.mp3", 54 | "book2/ceremonies_herrick_gg_64kb.mp3", 55 | "book3/01_ceremonies_herrick_cjph_64kb.mp3", 56 | ): 57 | with odm_test_data_dir.joinpath(mp3).open("rb") as m: 58 | responses.get( 59 | f"https://ping.github.io/odmpy/test_data/{mp3}", 60 | content_type="audio/mp3", 61 | body=m.read(), 62 | ) 63 | 64 | @responses.activate 65 | def test_standard_download(self): 66 | """ 67 | `odmpy dl test.odm --keepcover` 68 | """ 69 | for test_odm_file in self.test_odms: 70 | with self.subTest(odm=test_odm_file): 71 | expected_result = get_expected_result( 72 | self.test_downloads_dir, test_odm_file 73 | ) 74 | self._setup_common_responses() 75 | 76 | run( 77 | [ 78 | "--noversioncheck", 79 | "dl", 80 | str(self.test_data_dir.joinpath(test_odm_file)), 81 | "--downloaddir", 82 | str(self.test_downloads_dir), 83 | "--keepcover", 84 | "--hideprogress", 85 | ], 86 | be_quiet=True, 87 | ) 88 | self.assertTrue(expected_result.book_folder.exists()) 89 | for i in range(1, expected_result.total_parts + 1): 90 | book_file = expected_result.book_folder.joinpath( 91 | expected_result.mp3_name_format.format(i) 92 | ) 93 | self.assertTrue(book_file.exists()) 94 | audio_file = MP3(book_file) 95 | self.assertTrue(audio_file.tags) 96 | self.assertEqual(audio_file.tags.version[1], 4) 97 | self.assertEqual(audio_file.tags["TLAN"].text[0], "eng") 98 | self.assertTrue(expected_result.book_folder.joinpath("cover.jpg").exists()) 99 | 100 | @responses.activate 101 | def test_add_chapters(self): 102 | """ 103 | `odmpy dl test.odm --chapters` 104 | """ 105 | for test_odm_file in self.test_odms: 106 | # clear remnant downloads 107 | if self.test_downloads_dir.exists(): 108 | shutil.rmtree(self.test_downloads_dir, ignore_errors=True) 109 | 110 | with self.subTest(odm=test_odm_file): 111 | expected_result = get_expected_result( 112 | self.test_downloads_dir, test_odm_file 113 | ) 114 | self._setup_common_responses() 115 | 116 | run( 117 | [ 118 | "--noversioncheck", 119 | "dl", 120 | str(self.test_data_dir.joinpath(test_odm_file)), 121 | "--downloaddir", 122 | str(self.test_downloads_dir), 123 | "--chapters", 124 | "--id3v2version", 125 | "3", 126 | "--hideprogress", 127 | ], 128 | be_quiet=True, 129 | ) 130 | for i in range(1, expected_result.total_parts + 1): 131 | book_file = expected_result.book_folder.joinpath( 132 | expected_result.mp3_name_format.format(i) 133 | ) 134 | audio_file = MP3(book_file) 135 | self.assertTrue(audio_file.tags) 136 | self.assertEqual(audio_file.tags.version[1], 3) 137 | self.assertTrue( 138 | audio_file.tags["TIT2"] 139 | .text[0] 140 | .startswith(part_title_formats[test_odm_file].format(i)) 141 | ) 142 | self.assertEqual( 143 | audio_file.tags["TALB"].text[0], "Ceremonies For Christmas" 144 | ) 145 | self.assertEqual(audio_file.tags["TLAN"].text[0], "eng") 146 | self.assertEqual(audio_file.tags["TPE1"].text[0], "Robert Herrick") 147 | self.assertEqual( 148 | audio_file.tags["TPE2"].text[0], 149 | album_artists[test_odm_file], 150 | ) 151 | self.assertEqual(audio_file.tags["TRCK"], str(i)) 152 | self.assertEqual(audio_file.tags["TPUB"].text[0], "Librivox") 153 | self.assertEqual( 154 | audio_file.tags["TPE3"].text[0], 155 | "LibriVox Volunteers", 156 | ) 157 | self.assertTrue(audio_file.tags["CTOC:toc"]) 158 | for j, chap_id in enumerate( 159 | audio_file.tags["CTOC:toc"].child_element_ids 160 | ): 161 | chap_tag = audio_file.tags[f"CHAP:{chap_id}"] 162 | self.assertTrue(chap_tag.sub_frames) 163 | self.assertEqual( 164 | chap_tag.sub_frames["TIT2"].text[0], 165 | markers[test_odm_file][j + i - 1], 166 | ) 167 | 168 | @responses.activate 169 | def test_merge_formats(self): 170 | """ 171 | `odmpy dl test.odm --merge` 172 | """ 173 | for test_odm_file in self.test_odms: 174 | with self.subTest(odm=test_odm_file): 175 | expected_result = get_expected_result( 176 | self.test_downloads_dir, test_odm_file 177 | ) 178 | self._setup_common_responses() 179 | 180 | run( 181 | [ 182 | "--noversioncheck", 183 | "dl", 184 | str(self.test_data_dir.joinpath(test_odm_file)), 185 | "--downloaddir", 186 | str(self.test_downloads_dir), 187 | "--merge", 188 | "--hideprogress", 189 | ], 190 | be_quiet=True, 191 | ) 192 | mp3_file = expected_result.book_folder.joinpath( 193 | f"{expected_result.merged_book_basename}.mp3" 194 | ) 195 | self.assertTrue(mp3_file.exists()) 196 | 197 | run( 198 | [ 199 | "--noversioncheck", 200 | "dl", 201 | str(self.test_data_dir.joinpath(test_odm_file)), 202 | "--downloaddir", 203 | str(self.test_downloads_dir), 204 | "--merge", 205 | "--mergeformat", 206 | "m4b", 207 | "--hideprogress", 208 | ], 209 | be_quiet=True, 210 | ) 211 | m4b_file = expected_result.book_folder.joinpath( 212 | f"{expected_result.merged_book_basename}.m4b" 213 | ) 214 | self.assertTrue(m4b_file.exists()) 215 | 216 | @responses.activate 217 | def test_merge_formats_add_chapters(self): 218 | """ 219 | `odmpy dl test.odm --merge --chapters` 220 | """ 221 | for test_odm_file in self.test_odms: 222 | with self.subTest(odm=test_odm_file): 223 | # clear remnant downloads 224 | if self.test_downloads_dir.exists(): 225 | shutil.rmtree(self.test_downloads_dir, ignore_errors=True) 226 | 227 | expected_result = get_expected_result( 228 | self.test_downloads_dir, test_odm_file 229 | ) 230 | self._setup_common_responses() 231 | 232 | run( 233 | [ 234 | "--noversioncheck", 235 | "dl", 236 | str(self.test_data_dir.joinpath(test_odm_file)), 237 | "--downloaddir", 238 | str(self.test_downloads_dir), 239 | "--merge", 240 | "--chapters", 241 | "--hideprogress", 242 | ], 243 | be_quiet=True, 244 | ) 245 | mp3_file = expected_result.book_folder.joinpath( 246 | f"{expected_result.merged_book_basename}.mp3" 247 | ) 248 | self.assertTrue(mp3_file.exists()) 249 | ffprobe_cmd = [ 250 | "ffprobe", 251 | "-v", 252 | "quiet", 253 | "-print_format", 254 | "json", 255 | "-show_format", 256 | "-show_streams", 257 | "-show_chapters", 258 | str(mp3_file), 259 | ] 260 | cmd_result = subprocess.run( 261 | ffprobe_cmd, 262 | capture_output=True, 263 | text=True, 264 | check=True, 265 | encoding="utf-8", 266 | ) 267 | meta = json.loads(str(cmd_result.stdout)) 268 | 269 | last_end = 0 270 | self.assertEqual( 271 | len(meta.get("chapters", [])), expected_result.total_chapters 272 | ) 273 | for i, ch in enumerate( 274 | sorted(meta["chapters"], key=lambda c: c["start"]) 275 | ): 276 | self.assertEqual(ch["tags"]["title"], markers[test_odm_file][i]) 277 | start = ch["start"] 278 | end = ch["end"] 279 | self.assertGreater(end, start) 280 | self.assertEqual(start, last_end) 281 | self.assertGreater(end, last_end) 282 | self.assertAlmostEqual( 283 | (end - start) / 1000.0, 284 | expected_result.chapter_durations_sec[i], 285 | 0, 286 | ) 287 | last_end = end 288 | for tag in [ 289 | "title", 290 | "album", 291 | "artist", 292 | "album_artist", 293 | "performer", 294 | "publisher", 295 | "track", 296 | ]: 297 | with self.subTest(tag=tag): 298 | self.assertTrue(meta["format"]["tags"].get(tag)) 299 | 300 | run( 301 | [ 302 | "--noversioncheck", 303 | "dl", 304 | str(self.test_data_dir.joinpath(test_odm_file)), 305 | "--downloaddir", 306 | str(self.test_downloads_dir), 307 | "--merge", 308 | "--mergeformat", 309 | "m4b", 310 | "--chapters", 311 | "--hideprogress", 312 | ], 313 | be_quiet=True, 314 | ) 315 | m4b_file = expected_result.book_folder.joinpath( 316 | f"{expected_result.merged_book_basename}.m4b" 317 | ) 318 | self.assertTrue(m4b_file.exists()) 319 | ffprobe_cmd = [ 320 | "ffprobe", 321 | "-v", 322 | "quiet", 323 | "-print_format", 324 | "json", 325 | "-show_format", 326 | "-show_streams", 327 | "-show_chapters", 328 | str(m4b_file), 329 | ] 330 | cmd_result = subprocess.run( 331 | ffprobe_cmd, 332 | capture_output=True, 333 | text=True, 334 | check=True, 335 | encoding="utf-8", 336 | ) 337 | 338 | last_end = 0 339 | meta = json.loads(str(cmd_result.stdout)) 340 | self.assertEqual( 341 | len(meta.get("chapters", [])), expected_result.total_chapters 342 | ) 343 | for i, ch in enumerate( 344 | sorted(meta["chapters"], key=lambda c: c["start"]) 345 | ): 346 | self.assertEqual(ch["tags"]["title"], markers[test_odm_file][i]) 347 | start = ch["start"] 348 | end = ch["end"] 349 | self.assertGreater(end, start) 350 | self.assertEqual(start, last_end) 351 | self.assertGreater(end, last_end) 352 | if ch["id"] > 0: 353 | # first chapter has a tiny bit difference for some reason 354 | # AssertionError: 66.467 != 67 within 0 places (0.5330000000000013 difference) 355 | self.assertAlmostEqual( 356 | (end - start) / 1000.0, 357 | expected_result.chapter_durations_sec[i], 358 | 0, 359 | ) 360 | last_end = end 361 | 362 | @responses.activate 363 | def test_nobook_folder(self): 364 | """ 365 | `odmpy dl test.odm --nobookfolder` 366 | """ 367 | for test_odm_file in self.test_odms: 368 | with self.subTest(odm=test_odm_file): 369 | expected_result = get_expected_result( 370 | self.test_downloads_dir, test_odm_file 371 | ) 372 | self._setup_common_responses() 373 | 374 | run( 375 | [ 376 | "--noversioncheck", 377 | "dl", 378 | str(self.test_data_dir.joinpath(test_odm_file)), 379 | "--downloaddir", 380 | str(self.test_downloads_dir), 381 | "--merge", 382 | "--nobookfolder", 383 | "--hideprogress", 384 | "--writejson", 385 | ], 386 | be_quiet=True, 387 | ) 388 | mp3_file = self.test_data_dir.joinpath( 389 | "downloads", f"{expected_result.merged_book_basename}.mp3" 390 | ) 391 | self.assertTrue(mp3_file.exists()) 392 | self.assertTrue( 393 | self.test_data_dir.joinpath("downloads", "debug.json").exists() 394 | ) 395 | -------------------------------------------------------------------------------- /.pylintrc: -------------------------------------------------------------------------------- 1 | [MASTER] 2 | 3 | # A comma-separated list of package or module names from where C extensions may 4 | # be loaded. Extensions are loading into the active Python interpreter and may 5 | # run arbitrary code 6 | extension-pkg-whitelist= 7 | 8 | # Add files or directories to the blacklist. They should be base names, not 9 | # paths. 10 | ignore=CVS 11 | 12 | # Add files or directories matching the regex patterns to the blacklist. The 13 | # regex matches against base names, not paths. 14 | ignore-patterns= 15 | 16 | # Python code to execute, usually for sys.path manipulation such as 17 | # pygtk.require(). 18 | #init-hook= 19 | 20 | # Use multiple processes to speed up Pylint. 21 | jobs=1 22 | 23 | # List of plugins (as comma separated values of python modules names) to load, 24 | # usually to register additional checkers. 25 | load-plugins= 26 | 27 | # Pickle collected data for later comparisons. 28 | persistent=yes 29 | 30 | # Specify a configuration file. 31 | #rcfile= 32 | 33 | # When enabled, pylint would attempt to guess common misconfiguration and emit 34 | # user-friendly hints instead of false-positive error messages 35 | suggestion-mode=yes 36 | 37 | # Allow loading of arbitrary C extensions. Extensions are imported into the 38 | # active Python interpreter and may run arbitrary code. 39 | unsafe-load-any-extension=no 40 | 41 | 42 | [MESSAGES CONTROL] 43 | 44 | # Only show warnings with the listed confidence levels. Leave empty to show 45 | # all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED 46 | confidence= 47 | 48 | # Disable the message, report, category or checker with the given id(s). You 49 | # can either give multiple identifiers separated by comma (,) or put this 50 | # option multiple times (only on the command line, not in the configuration 51 | # file where it should appear only once).You can also use "--disable=all" to 52 | # disable everything first and then reenable specific checks. For example, if 53 | # you want to run only the similarities checker, you can use "--disable=all 54 | # --enable=similarities". If you want to run only the classes checker, but have 55 | # no Warning level messages displayed, use"--disable=all --enable=classes 56 | # --disable=W" 57 | #disable=print-statement, 58 | # parameter-unpacking, 59 | # unpacking-in-except, 60 | # old-raise-syntax, 61 | # backtick, 62 | # long-suffix, 63 | # old-ne-operator, 64 | # old-octal-literal, 65 | # import-star-module-level, 66 | # non-ascii-bytes-literal, 67 | # invalid-unicode-literal, 68 | # raw-checker-failed, 69 | # bad-inline-option, 70 | # locally-disabled, 71 | # locally-enabled, 72 | # file-ignored, 73 | # suppressed-message, 74 | # useless-suppression, 75 | # deprecated-pragma, 76 | # apply-builtin, 77 | # basestring-builtin, 78 | # buffer-builtin, 79 | # cmp-builtin, 80 | # coerce-builtin, 81 | # execfile-builtin, 82 | # file-builtin, 83 | # long-builtin, 84 | # raw_input-builtin, 85 | # reduce-builtin, 86 | # standarderror-builtin, 87 | # unicode-builtin, 88 | # xrange-builtin, 89 | # coerce-method, 90 | # delslice-method, 91 | # getslice-method, 92 | # setslice-method, 93 | # no-absolute-import, 94 | # old-division, 95 | # dict-iter-method, 96 | # dict-view-method, 97 | # next-method-called, 98 | # metaclass-assignment, 99 | # indexing-exception, 100 | # raising-string, 101 | # reload-builtin, 102 | # oct-method, 103 | # hex-method, 104 | # nonzero-method, 105 | # cmp-method, 106 | # input-builtin, 107 | # round-builtin, 108 | # intern-builtin, 109 | # unichr-builtin, 110 | # map-builtin-not-iterating, 111 | # zip-builtin-not-iterating, 112 | # range-builtin-not-iterating, 113 | # filter-builtin-not-iterating, 114 | # using-cmp-argument, 115 | # eq-without-hash, 116 | # div-method, 117 | # idiv-method, 118 | # rdiv-method, 119 | # exception-message-attribute, 120 | # invalid-str-codec, 121 | # sys-max-int, 122 | # bad-python3-import, 123 | # deprecated-string-function, 124 | # deprecated-str-translate-call, 125 | # deprecated-itertools-function, 126 | # deprecated-types-field, 127 | # next-method-defined, 128 | # dict-items-not-iterating, 129 | # dict-keys-not-iterating, 130 | # dict-values-not-iterating, 131 | # deprecated-operator-function, 132 | # deprecated-urllib-function, 133 | # xreadlines-attribute, 134 | # deprecated-sys-function, 135 | # exception-escape, 136 | # comprehension-escape 137 | disable=R,C, 138 | logging-format-interpolation, 139 | logging-fstring-interpolation, 140 | unnecessary-pass, 141 | redefined-builtin, 142 | self-assigning-variable, 143 | raise-missing-from, 144 | c-extension-no-member 145 | 146 | # Enable the message, report, category or checker with the given id(s). You can 147 | # either give multiple identifier separated by comma (,) or put this option 148 | # multiple time (only on the command line, not in the configuration file where 149 | # it should appear only once). See also the "--disable" option for examples. 150 | #enable=c-extension-no-member 151 | 152 | 153 | [REPORTS] 154 | 155 | # Python expression which should return a note less than 10 (10 is the highest 156 | # note). You have access to the variables errors warning, statement which 157 | # respectively contain the number of errors / warnings messages and the total 158 | # number of statements analyzed. This is used by the global evaluation report 159 | # (RP0004). 160 | evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) 161 | 162 | # Template used to display messages. This is a python new-style format string 163 | # used to format the message information. See doc for all details 164 | #msg-template= 165 | 166 | # Set the output format. Available formats are text, parseable, colorized, json 167 | # and msvs (visual studio).You can also give a reporter class, eg 168 | # mypackage.mymodule.MyReporterClass. 169 | output-format=text 170 | 171 | # Tells whether to display a full report or only the messages 172 | reports=no 173 | 174 | # Activate the evaluation score. 175 | score=yes 176 | 177 | 178 | [REFACTORING] 179 | 180 | # Maximum number of nested blocks for function / method body 181 | max-nested-blocks=5 182 | 183 | # Complete name of functions that never returns. When checking for 184 | # inconsistent-return-statements if a never returning function is called then 185 | # it will be considered as an explicit return statement and no message will be 186 | # printed. 187 | never-returning-functions=optparse.Values,sys.exit 188 | 189 | 190 | [LOGGING] 191 | 192 | # Logging modules to check that the string format arguments are in logging 193 | # function parameter format 194 | logging-modules=logging 195 | 196 | 197 | [SPELLING] 198 | 199 | # Limits count of emitted suggestions for spelling mistakes 200 | max-spelling-suggestions=4 201 | 202 | # Spelling dictionary name. Available dictionaries: none. To make it working 203 | # install python-enchant package. 204 | spelling-dict= 205 | 206 | # List of comma separated words that should not be checked. 207 | spelling-ignore-words= 208 | 209 | # A path to a file that contains private dictionary; one word per line. 210 | spelling-private-dict-file= 211 | 212 | # Tells whether to store unknown words to indicated private dictionary in 213 | # --spelling-private-dict-file option instead of raising a message. 214 | spelling-store-unknown-words=no 215 | 216 | 217 | [MISCELLANEOUS] 218 | 219 | # List of note tags to take in consideration, separated by a comma. 220 | notes=FIXME, 221 | XXX, 222 | TODO 223 | 224 | 225 | [SIMILARITIES] 226 | 227 | # Ignore comments when computing similarities. 228 | ignore-comments=yes 229 | 230 | # Ignore docstrings when computing similarities. 231 | ignore-docstrings=yes 232 | 233 | # Ignore imports when computing similarities. 234 | ignore-imports=no 235 | 236 | # Minimum lines number of a similarity. 237 | min-similarity-lines=4 238 | 239 | 240 | [TYPECHECK] 241 | 242 | # List of decorators that produce context managers, such as 243 | # contextlib.contextmanager. Add to this list to register other decorators that 244 | # produce valid context managers. 245 | contextmanager-decorators=contextlib.contextmanager 246 | 247 | # List of members which are set dynamically and missed by pylint inference 248 | # system, and so shouldn't trigger E1101 when accessed. Python regular 249 | # expressions are accepted. 250 | generated-members= 251 | 252 | # Tells whether missing members accessed in mixin class should be ignored. A 253 | # mixin class is detected if its name ends with "mixin" (case insensitive). 254 | ignore-mixin-members=yes 255 | 256 | # This flag controls whether pylint should warn about no-member and similar 257 | # checks whenever an opaque object is returned when inferring. The inference 258 | # can return multiple potential results while evaluating a Python object, but 259 | # some branches might not be evaluated, which results in partial inference. In 260 | # that case, it might be useful to still emit no-member and other checks for 261 | # the rest of the inferred objects. 262 | ignore-on-opaque-inference=yes 263 | 264 | # List of class names for which member attributes should not be checked (useful 265 | # for classes with dynamically set attributes). This supports the use of 266 | # qualified names. 267 | ignored-classes=optparse.Values,thread._local,_thread._local 268 | 269 | # List of module names for which member attributes should not be checked 270 | # (useful for modules/projects where namespaces are manipulated during runtime 271 | # and thus existing member attributes cannot be deduced by static analysis. It 272 | # supports qualified module names, as well as Unix pattern matching. 273 | ignored-modules= 274 | 275 | # Show a hint with possible names when a member name was not found. The aspect 276 | # of finding the hint is based on edit distance. 277 | missing-member-hint=yes 278 | 279 | # The minimum edit distance a name should have in order to be considered a 280 | # similar match for a missing member name. 281 | missing-member-hint-distance=1 282 | 283 | # The total number of similar names that should be taken in consideration when 284 | # showing a hint for a missing member. 285 | missing-member-max-choices=1 286 | 287 | 288 | [VARIABLES] 289 | 290 | # List of additional names supposed to be defined in builtins. Remember that 291 | # you should avoid to define new builtins when possible. 292 | additional-builtins= 293 | 294 | # Tells whether unused global variables should be treated as a violation. 295 | allow-global-unused-variables=yes 296 | 297 | # List of strings which can identify a callback function by name. A callback 298 | # name must start or end with one of those strings. 299 | callbacks=cb_, 300 | _cb 301 | 302 | # A regular expression matching the name of dummy variables (i.e. expectedly 303 | # not used). 304 | dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ 305 | 306 | # Argument names that match this expression will be ignored. Default to name 307 | # with leading underscore 308 | ignored-argument-names=_.*|^ignored_|^unused_ 309 | 310 | # Tells whether we should check for unused import in __init__ files. 311 | init-import=no 312 | 313 | # List of qualified module names which can have objects that can redefine 314 | # builtins. 315 | redefining-builtins-modules=six.moves,past.builtins,future.builtins,io,builtins 316 | 317 | 318 | [FORMAT] 319 | 320 | # Expected format of line ending, e.g. empty (any line ending), LF or CRLF. 321 | expected-line-ending-format= 322 | 323 | # Regexp for a line that is allowed to be longer than the limit. 324 | ignore-long-lines=^\s*(# )??$ 325 | 326 | # Number of spaces of indent required inside a hanging or continued line. 327 | indent-after-paren=4 328 | 329 | # String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 330 | # tab). 331 | indent-string=' ' 332 | 333 | # Maximum number of characters on a single line. 334 | max-line-length=120 335 | 336 | # Maximum number of lines in a module 337 | max-module-lines=1000 338 | 339 | # List of optional constructs for which whitespace checking is disabled. `dict- 340 | # separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}. 341 | # `trailing-comma` allows a space between comma and closing bracket: (a, ). 342 | # `empty-line` allows space-only lines. 343 | #no-space-check=trailing-comma, 344 | # dict-separator 345 | 346 | # Allow the body of a class to be on the same line as the declaration if body 347 | # contains single statement. 348 | single-line-class-stmt=no 349 | 350 | # Allow the body of an if to be on the same line as the test if there is no 351 | # else. 352 | single-line-if-stmt=no 353 | 354 | 355 | [BASIC] 356 | 357 | # Naming style matching correct argument names 358 | argument-naming-style=snake_case 359 | 360 | # Regular expression matching correct argument names. Overrides argument- 361 | # naming-style 362 | #argument-rgx= 363 | 364 | # Naming style matching correct attribute names 365 | attr-naming-style=snake_case 366 | 367 | # Regular expression matching correct attribute names. Overrides attr-naming- 368 | # style 369 | #attr-rgx= 370 | 371 | # Bad variable names which should always be refused, separated by a comma 372 | bad-names=foo, 373 | bar, 374 | baz, 375 | toto, 376 | tutu, 377 | tata 378 | 379 | # Naming style matching correct class attribute names 380 | class-attribute-naming-style=any 381 | 382 | # Regular expression matching correct class attribute names. Overrides class- 383 | # attribute-naming-style 384 | #class-attribute-rgx= 385 | 386 | # Naming style matching correct class names 387 | class-naming-style=PascalCase 388 | 389 | # Regular expression matching correct class names. Overrides class-naming-style 390 | #class-rgx= 391 | 392 | # Naming style matching correct constant names 393 | const-naming-style=UPPER_CASE 394 | 395 | # Regular expression matching correct constant names. Overrides const-naming- 396 | # style 397 | #const-rgx= 398 | 399 | # Minimum line length for functions/classes that require docstrings, shorter 400 | # ones are exempt. 401 | docstring-min-length=-1 402 | 403 | # Naming style matching correct function names 404 | function-naming-style=snake_case 405 | 406 | # Regular expression matching correct function names. Overrides function- 407 | # naming-style 408 | #function-rgx= 409 | 410 | # Good variable names which should always be accepted, separated by a comma 411 | good-names=i, 412 | j, 413 | k, 414 | ex, 415 | Run, 416 | _ 417 | 418 | # Include a hint for the correct naming format with invalid-name 419 | include-naming-hint=no 420 | 421 | # Naming style matching correct inline iteration names 422 | inlinevar-naming-style=any 423 | 424 | # Regular expression matching correct inline iteration names. Overrides 425 | # inlinevar-naming-style 426 | #inlinevar-rgx= 427 | 428 | # Naming style matching correct method names 429 | method-naming-style=snake_case 430 | 431 | # Regular expression matching correct method names. Overrides method-naming- 432 | # style 433 | #method-rgx= 434 | 435 | # Naming style matching correct module names 436 | module-naming-style=snake_case 437 | 438 | # Regular expression matching correct module names. Overrides module-naming- 439 | # style 440 | #module-rgx= 441 | 442 | # Colon-delimited sets of names that determine each other's naming style when 443 | # the name regexes allow several styles. 444 | name-group= 445 | 446 | # Regular expression which should only match function or class names that do 447 | # not require a docstring. 448 | no-docstring-rgx=^_ 449 | 450 | # List of decorators that produce properties, such as abc.abstractproperty. Add 451 | # to this list to register other decorators that produce valid properties. 452 | property-classes=abc.abstractproperty 453 | 454 | # Naming style matching correct variable names 455 | variable-naming-style=snake_case 456 | 457 | # Regular expression matching correct variable names. Overrides variable- 458 | # naming-style 459 | #variable-rgx= 460 | 461 | 462 | [DESIGN] 463 | 464 | # Maximum number of arguments for function / method 465 | max-args=5 466 | 467 | # Maximum number of attributes for a class (see R0902). 468 | max-attributes=7 469 | 470 | # Maximum number of boolean expressions in a if statement 471 | max-bool-expr=5 472 | 473 | # Maximum number of branch for function / method body 474 | max-branches=12 475 | 476 | # Maximum number of locals for function / method body 477 | max-locals=15 478 | 479 | # Maximum number of parents for a class (see R0901). 480 | max-parents=7 481 | 482 | # Maximum number of public methods for a class (see R0904). 483 | max-public-methods=20 484 | 485 | # Maximum number of return / yield for function / method body 486 | max-returns=6 487 | 488 | # Maximum number of statements in function / method body 489 | max-statements=50 490 | 491 | # Minimum number of public methods for a class (see R0903). 492 | min-public-methods=2 493 | 494 | 495 | [CLASSES] 496 | 497 | # List of method names used to declare (i.e. assign) instance attributes. 498 | defining-attr-methods=__init__, 499 | __new__, 500 | setUp 501 | 502 | # List of member names, which should be excluded from the protected access 503 | # warning. 504 | exclude-protected=_asdict, 505 | _fields, 506 | _replace, 507 | _source, 508 | _make 509 | 510 | # List of valid names for the first argument in a class method. 511 | valid-classmethod-first-arg=cls 512 | 513 | # List of valid names for the first argument in a metaclass class method. 514 | valid-metaclass-classmethod-first-arg=mcs 515 | 516 | 517 | [IMPORTS] 518 | 519 | # Allow wildcard imports from modules that define __all__. 520 | allow-wildcard-with-all=no 521 | 522 | # Analyse import fallback blocks. This can be used to support both Python 2 and 523 | # 3 compatible code, which means that the block might have code that exists 524 | # only in one or another interpreter, leading to false positives when analysed. 525 | analyse-fallback-blocks=no 526 | 527 | # Deprecated modules which should not be used, separated by a comma 528 | deprecated-modules=regsub, 529 | TERMIOS, 530 | Bastion, 531 | rexec 532 | 533 | # Create a graph of external dependencies in the given file (report RP0402 must 534 | # not be disabled) 535 | ext-import-graph= 536 | 537 | # Create a graph of every (i.e. internal and external) dependencies in the 538 | # given file (report RP0402 must not be disabled) 539 | import-graph= 540 | 541 | # Create a graph of internal dependencies in the given file (report RP0402 must 542 | # not be disabled) 543 | int-import-graph= 544 | 545 | # Force import order to recognize a module as part of the standard 546 | # compatibility libraries. 547 | known-standard-library= 548 | 549 | # Force import order to recognize a module as part of a third party library. 550 | known-third-party=enchant 551 | 552 | 553 | [EXCEPTIONS] 554 | 555 | # Exceptions that will emit a warning when being caught. Defaults to 556 | # "Exception" 557 | overgeneral-exceptions=builtins.Exception 558 | -------------------------------------------------------------------------------- /odmpy/processing/audiobook.py: -------------------------------------------------------------------------------- 1 | # Copyright (C) 2023 github.com/ping 2 | # 3 | # This file is part of odmpy. 4 | # 5 | # odmpy is free software: you can redistribute it and/or modify 6 | # it under the terms of the GNU General Public License as published by 7 | # the Free Software Foundation, either version 3 of the License, or 8 | # (at your option) any later version. 9 | # 10 | # odmpy is distributed in the hope that it will be useful, 11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | # GNU General Public License for more details. 14 | # 15 | # You should have received a copy of the GNU General Public License 16 | # along with odmpy. If not, see . 17 | # 18 | 19 | import argparse 20 | import datetime 21 | import json 22 | import logging 23 | import shutil 24 | from typing import Optional, Any, Dict, List 25 | from typing import OrderedDict as OrderedDictType 26 | 27 | import eyed3 # type: ignore[import] 28 | import requests 29 | from eyed3.id3 import ID3_DEFAULT_VERSION, ID3_V2_3, ID3_V2_4 # type: ignore[import] 30 | from requests.exceptions import HTTPError, ConnectionError 31 | from termcolor import colored 32 | from tqdm import tqdm 33 | 34 | from .shared import ( 35 | generate_names, 36 | write_tags, 37 | generate_cover, 38 | remux_mp3, 39 | merge_into_mp3, 40 | convert_to_m4b, 41 | create_opf, 42 | get_best_cover_url, 43 | extract_isbn, 44 | ) 45 | from ..errors import OdmpyRuntimeError 46 | from ..libby import USER_AGENT, merge_toc, PartMeta, LibbyFormats 47 | from ..overdrive import OverDriveClient 48 | from ..utils import slugify, plural_or_singular_noun as ps 49 | 50 | 51 | # 52 | # Main processing logic for libby direct audiobook loans 53 | # 54 | 55 | 56 | def process_audiobook_loan( 57 | loan: Dict, 58 | openbook: Dict, 59 | parsed_toc: OrderedDictType[str, PartMeta], 60 | session: requests.Session, 61 | args: argparse.Namespace, 62 | logger: logging.Logger, 63 | ) -> None: 64 | """ 65 | Download the audiobook loan directly via Libby without the use of 66 | an odm file 67 | 68 | :param loan: 69 | :param openbook: 70 | :param parsed_toc: 71 | :param session: From `LibbyClient.libby_session` because it contains a needed auth cookie 72 | :param args: 73 | :param logger: 74 | :return: 75 | """ 76 | 77 | ffmpeg_loglevel = "info" if logger.level == logging.DEBUG else "fatal" 78 | id3v2_version = ID3_DEFAULT_VERSION 79 | if args.id3v2_version == 3: 80 | id3v2_version = ID3_V2_3 81 | if args.id3v2_version == 4: 82 | id3v2_version = ID3_V2_4 83 | 84 | title = loan["title"] 85 | overdrive_media_id = loan["id"] 86 | sub_title = loan.get("subtitle", None) 87 | cover_url = get_best_cover_url(loan) 88 | authors = [ 89 | c["name"] for c in openbook.get("creator", []) if c.get("role", "") == "author" 90 | ] 91 | if not authors: 92 | authors = [ 93 | c["name"] 94 | for c in openbook.get("creator", []) 95 | if c.get("role", "") == "editor" 96 | ] 97 | if not authors: 98 | authors = [c["name"] for c in openbook.get("creator", [])] 99 | narrators = [ 100 | c["name"] 101 | for c in openbook.get("creator", []) 102 | if c.get("role", "") == "narrator" 103 | ] 104 | languages: Optional[List[str]] = ( 105 | [str(openbook.get("language"))] if openbook.get("language") else [] 106 | ) 107 | subjects = [subj["name"] for subj in loan.get("subjects", []) if subj.get("name")] 108 | publish_date = loan.get("publishDate", None) 109 | publisher = loan.get("publisherAccount", {}).get("name", "") or "" 110 | series = loan.get("series", "") 111 | series_reading_order = loan.get("detailedSeries", {}).get("readingOrder", "") 112 | description = ( 113 | openbook.get("description", {}).get("full", "") 114 | or openbook.get("description", {}).get("short") 115 | or "" 116 | ) 117 | debug_meta: Dict[str, Any] = { 118 | "meta": { 119 | "title": title, 120 | "coverUrl": cover_url, 121 | "authors": authors, 122 | "publisher": publisher, 123 | "description": description, 124 | } 125 | } 126 | 127 | download_parts: List[PartMeta] = list(parsed_toc.values()) # noqa 128 | debug_meta["download_parts"] = [] 129 | for p in download_parts: 130 | chapters = [ 131 | {"title": m.title, "start": m.start_second, "end": m.end_second} 132 | for m in p["chapters"] 133 | ] 134 | debug_meta["download_parts"].append( 135 | { 136 | "url": p["url"], 137 | "audio-duration": p["audio-duration"], 138 | "file-length": p["file-length"], 139 | "spine-position": p["spine-position"], 140 | "chapters": chapters, 141 | } 142 | ) 143 | 144 | logger.info( 145 | f'Downloading "{colored(title, "blue", attrs=["bold"])}" ' 146 | f'by "{colored(", ".join(authors), "blue", attrs=["bold"])}" ' 147 | f'in {len(download_parts)} {ps(len(download_parts), "part")}...' 148 | ) 149 | 150 | book_folder, book_filename = generate_names( 151 | title=title, 152 | series=series, 153 | series_reading_order=series_reading_order, 154 | authors=authors, 155 | edition=loan.get("edition") or "", 156 | title_id=loan["id"], 157 | args=args, 158 | logger=logger, 159 | ) 160 | book_m4b_filename = book_filename.with_suffix(".m4b") 161 | 162 | # check early if a merged file is already saved 163 | if ( 164 | args.merge_output 165 | and ( 166 | book_filename if args.merge_format == "mp3" else book_m4b_filename 167 | ).exists() 168 | ): 169 | logger.warning( 170 | 'Already saved "%s"', 171 | colored( 172 | str(book_filename if args.merge_format == "mp3" else book_m4b_filename), 173 | "magenta", 174 | ), 175 | ) 176 | return 177 | 178 | if args.is_debug_mode: 179 | with book_folder.joinpath("loan.json").open("w", encoding="utf-8") as f: 180 | json.dump(loan, f, indent=2) 181 | 182 | with book_folder.joinpath("openbook.json").open("w", encoding="utf-8") as f: 183 | json.dump(openbook, f, indent=2) 184 | 185 | cover_filename, cover_bytes = generate_cover( 186 | book_folder=book_folder, 187 | cover_url=cover_url, 188 | session=session, 189 | timeout=args.timeout, 190 | logger=logger, 191 | ) 192 | 193 | keep_cover = args.always_keep_cover 194 | file_tracks = [] 195 | audio_bitrate = 0 196 | for p in download_parts: 197 | part_number = p["spine-position"] + 1 198 | part_filename = book_folder.joinpath( 199 | f"{slugify(f'{title} - Part {part_number:02d}', allow_unicode=True)}.mp3" 200 | ) 201 | part_tmp_filename = part_filename.with_suffix(".part") 202 | part_file_size = p["file-length"] 203 | part_download_url = p["url"] 204 | 205 | if part_filename.exists(): 206 | logger.warning("Already saved %s", colored(str(part_filename), "magenta")) 207 | else: 208 | try: 209 | already_downloaded_len = 0 210 | if part_tmp_filename.exists(): 211 | already_downloaded_len = part_tmp_filename.stat().st_size 212 | 213 | part_download_res = session.get( 214 | part_download_url, 215 | headers={ 216 | "User-Agent": USER_AGENT, 217 | "Range": f"bytes={already_downloaded_len}-" 218 | if already_downloaded_len 219 | else None, 220 | }, 221 | timeout=args.timeout, 222 | stream=True, 223 | ) 224 | part_download_res.raise_for_status() 225 | 226 | with tqdm.wrapattr( 227 | part_download_res.raw, 228 | "read", 229 | total=part_file_size, 230 | initial=already_downloaded_len, 231 | desc=f"Part {part_number:2d}", 232 | disable=args.hide_progress, 233 | ) as res_raw: 234 | with part_tmp_filename.open( 235 | "ab" if already_downloaded_len else "wb" 236 | ) as outfile: 237 | shutil.copyfileobj(res_raw, outfile) 238 | 239 | # try to remux file to remove mp3 lame tag errors 240 | remux_mp3( 241 | part_tmp_filename=part_tmp_filename, 242 | part_filename=part_filename, 243 | ffmpeg_loglevel=ffmpeg_loglevel, 244 | logger=logger, 245 | ) 246 | 247 | except HTTPError as he: 248 | logger.error(f"HTTPError: {str(he)}") 249 | logger.debug(he.response.content) 250 | raise OdmpyRuntimeError("HTTP Error while downloading part file.") 251 | 252 | except ConnectionError as ce: 253 | logger.error(f"ConnectionError: {str(ce)}") 254 | raise OdmpyRuntimeError("Connection Error while downloading part file.") 255 | 256 | # Save id3 info only on new download, ref #42 257 | # This also makes handling of part files consistent with merged files 258 | try: 259 | # Fill id3 info for mp3 part 260 | audiofile = eyed3.load(part_filename) 261 | variable_bitrate, audio_bitrate = audiofile.info.bit_rate 262 | if variable_bitrate: 263 | # don't use vbr 264 | audio_bitrate = 0 265 | write_tags( 266 | audiofile=audiofile, 267 | title=title, 268 | sub_title=sub_title, 269 | authors=authors, 270 | narrators=narrators, 271 | publisher=publisher, 272 | description=description, 273 | cover_bytes=cover_bytes, 274 | genres=subjects, 275 | languages=languages, 276 | published_date=publish_date, 277 | series=series, 278 | part_number=part_number, 279 | total_parts=len(download_parts), 280 | overdrive_id=overdrive_media_id, 281 | isbn=extract_isbn( 282 | loan.get("formats", []), [LibbyFormats.AudioBookMP3] 283 | ), 284 | always_overwrite=args.overwrite_tags, 285 | delimiter=args.tag_delimiter, 286 | ) 287 | audiofile.tag.save(version=id3v2_version) 288 | 289 | if ( 290 | args.add_chapters 291 | and not args.merge_output 292 | and (args.overwrite_tags or not audiofile.tag.table_of_contents) 293 | ): 294 | if args.overwrite_tags and audiofile.tag.table_of_contents: 295 | # Clear existing toc to prevent "There may only be one top-level table of contents. 296 | # Toc 'b'toc'' is current top-level." error 297 | for f in list(audiofile.tag.table_of_contents): 298 | audiofile.tag.table_of_contents.remove(f.element_id) # type: ignore[attr-defined] 299 | 300 | toc = audiofile.tag.table_of_contents.set( 301 | "toc".encode("ascii"), 302 | toplevel=True, 303 | ordered=True, 304 | child_ids=[], 305 | description="Table of Contents", 306 | ) 307 | chapter_marks = p["chapters"] 308 | for i, m in enumerate(chapter_marks): 309 | title_frameset = eyed3.id3.frames.FrameSet() 310 | title_frameset.setTextFrame(eyed3.id3.frames.TITLE_FID, m.title) 311 | chap = audiofile.tag.chapters.set( 312 | f"ch{i:02d}".encode("ascii"), 313 | times=( 314 | round(m.start_second * 1000), 315 | round(m.end_second * 1000), 316 | ), 317 | sub_frames=title_frameset, 318 | ) 319 | toc.child_ids.append(chap.element_id) 320 | start_time = datetime.timedelta(seconds=m.start_second) 321 | end_time = datetime.timedelta(seconds=m.end_second) 322 | logger.debug( 323 | 'Added chap tag => %s: %s-%s "%s" to "%s"', 324 | colored(f"ch{i:02d}", "cyan"), 325 | start_time, 326 | end_time, 327 | colored(m.title, "cyan"), 328 | colored(str(part_filename), "blue"), 329 | ) 330 | audiofile.tag.save(version=id3v2_version) 331 | 332 | except Exception as e: # pylint: disable=broad-except 333 | logger.warning( 334 | "Error saving ID3: %s", colored(str(e), "red", attrs=["bold"]) 335 | ) 336 | keep_cover = True 337 | 338 | logger.info('Saved "%s"', colored(str(part_filename), "magenta")) 339 | 340 | file_tracks.append({"file": part_filename}) 341 | 342 | debug_meta["file_tracks"] = [{"file": str(ft["file"])} for ft in file_tracks] 343 | if args.merge_output: 344 | logger.info( 345 | 'Generating "%s"...', 346 | colored( 347 | str(book_filename if args.merge_format == "mp3" else book_m4b_filename), 348 | "magenta", 349 | ), 350 | ) 351 | 352 | merge_into_mp3( 353 | book_filename=book_filename, 354 | file_tracks=file_tracks, 355 | audio_bitrate=audio_bitrate, 356 | ffmpeg_loglevel=ffmpeg_loglevel, 357 | hide_progress=args.hide_progress, 358 | logger=logger, 359 | ) 360 | 361 | audiofile = eyed3.load(book_filename) 362 | write_tags( 363 | audiofile=audiofile, 364 | title=title, 365 | sub_title=sub_title, 366 | authors=authors, 367 | narrators=narrators, 368 | publisher=publisher, 369 | description=description, 370 | cover_bytes=cover_bytes, 371 | genres=subjects, 372 | languages=languages, 373 | published_date=publish_date, 374 | series=series, 375 | part_number=0, 376 | total_parts=0, 377 | overdrive_id=overdrive_media_id, 378 | isbn=extract_isbn(loan.get("formats", []), [LibbyFormats.AudioBookMP3]), 379 | always_overwrite=args.overwrite_tags, 380 | delimiter=args.tag_delimiter, 381 | ) 382 | 383 | if args.add_chapters and ( 384 | args.overwrite_tags or not audiofile.tag.table_of_contents 385 | ): 386 | if args.overwrite_tags and audiofile.tag.table_of_contents: 387 | # Clear existing toc to prevent "There may only be one top-level table of contents. 388 | # Toc 'b'toc'' is current top-level." error 389 | for f in list(audiofile.tag.table_of_contents): 390 | audiofile.tag.table_of_contents.remove(f.element_id) # type: ignore[attr-defined] 391 | 392 | toc = audiofile.tag.table_of_contents.set( 393 | "toc".encode("ascii"), 394 | toplevel=True, 395 | ordered=True, 396 | child_ids=[], 397 | description="Table of Contents", 398 | ) 399 | merged_markers = merge_toc(parsed_toc) 400 | debug_meta["merged_markers"] = [ 401 | {"title": m.title, "start": m.start_second, "end": m.end_second} 402 | for m in merged_markers 403 | ] 404 | 405 | for i, m in enumerate(merged_markers): 406 | title_frameset = eyed3.id3.frames.FrameSet() 407 | title_frameset.setTextFrame(eyed3.id3.frames.TITLE_FID, m.title) 408 | chap = audiofile.tag.chapters.set( 409 | f"ch{i}".encode("ascii"), 410 | times=(round(m.start_second * 1000), round(m.end_second * 1000)), 411 | sub_frames=title_frameset, 412 | ) 413 | toc.child_ids.append(chap.element_id) 414 | start_time = datetime.timedelta(seconds=m.start_second) 415 | end_time = datetime.timedelta(seconds=m.end_second) 416 | logger.debug( 417 | 'Added chap tag => %s: %s-%s "%s" to "%s"', 418 | colored(f"ch{i}", "cyan"), 419 | start_time, 420 | end_time, 421 | colored(m.title, "cyan"), 422 | colored(str(book_filename), "blue"), 423 | ) 424 | 425 | audiofile.tag.save(version=id3v2_version) 426 | 427 | if args.merge_format == "mp3": 428 | logger.info( 429 | 'Merged files into "%s"', 430 | colored( 431 | str( 432 | book_filename 433 | if args.merge_format == "mp3" 434 | else book_m4b_filename 435 | ), 436 | "magenta", 437 | ), 438 | ) 439 | 440 | if args.merge_format == "m4b": 441 | convert_to_m4b( 442 | book_filename=book_filename, 443 | book_m4b_filename=book_m4b_filename, 444 | cover_filename=cover_filename, 445 | merge_codec=args.merge_codec, 446 | audio_bitrate=audio_bitrate, 447 | ffmpeg_loglevel=ffmpeg_loglevel, 448 | hide_progress=args.hide_progress, 449 | logger=logger, 450 | ) 451 | 452 | if not args.keep_mp3: 453 | for file_track in file_tracks: 454 | try: 455 | file_track["file"].unlink() 456 | except Exception as e: # pylint: disable=broad-except 457 | logger.warning(f'Error deleting "{file_track["file"]}": {str(e)}') 458 | 459 | if not keep_cover and cover_filename.exists(): 460 | try: 461 | cover_filename.unlink() 462 | except Exception as e: # pylint: disable=broad-except 463 | logger.warning(f'Error deleting "{cover_filename}": {str(e)}') 464 | 465 | if args.generate_opf: 466 | if args.merge_output: 467 | opf_file_path = book_filename.with_suffix(".opf") 468 | else: 469 | opf_file_path = book_folder.joinpath( 470 | f"{slugify(title, allow_unicode=True)}.opf" 471 | ) 472 | if not opf_file_path.exists(): 473 | od_client = OverDriveClient( 474 | user_agent=USER_AGENT, timeout=args.timeout, retry=args.retries 475 | ) 476 | media_info = od_client.media(loan["id"]) 477 | create_opf( 478 | media_info, 479 | cover_filename if keep_cover else None, 480 | file_tracks 481 | if not args.merge_output 482 | else [ 483 | { 484 | "file": book_filename 485 | if args.merge_format == "mp3" 486 | else book_m4b_filename 487 | } 488 | ], 489 | opf_file_path, 490 | logger, 491 | ) 492 | else: 493 | logger.info("Already saved %s", colored(str(opf_file_path), "magenta")) 494 | 495 | if args.write_json: 496 | with book_folder.joinpath("debug.json").open("w", encoding="utf-8") as outfile: 497 | json.dump(debug_meta, outfile, indent=2) 498 | 499 | if not args.is_debug_mode: 500 | # clean up 501 | for file_name in ( 502 | "openbook.json", 503 | "loan.json", 504 | ): 505 | target = book_folder.joinpath(file_name) 506 | if target.exists(): 507 | target.unlink() 508 | --------------------------------------------------------------------------------