├── .gitignore ├── LICENSE ├── README.md ├── omnivore-export.py ├── omnivore-summary.py └── omnivore-to-wallabag.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # PyCharm project settings 139 | .idea 140 | 141 | # VS Code project settings 142 | .vscode 143 | 144 | # mkdocs documentation 145 | /site 146 | 147 | # mypy 148 | .mypy_cache/ 149 | .dmypy.json 150 | dmypy.json 151 | 152 | # Pyre type checker 153 | .pyre/ 154 | 155 | # pytype static type analyzer 156 | .pytype/ 157 | 158 | # Cython debug symbols 159 | cython_debug/ 160 | 161 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Christoph Zwerschke 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # omnivore-export 2 | 3 | This is an export script for the 4 | [Omnivore](https://github.com/omnivore-app/omnivore) 5 | read-it-later solution written in Python. 6 | 7 | Its main purpose was to create a backup of all links saved in Omnivore, 8 | as long as the Omnivore app still lacked an export function. 9 | 10 | Since Omnivore also lacked functionality to show the number of articles, 11 | this repository also provides a script to show these numbers. 12 | 13 | Meanwhile, Omnivore provides a way to export the links in the web UI 14 | via the menu option "Settings" - "Export Data", so that the export script 15 | is not really needed any more. 16 | 17 | But unfortunately, the Omnivore team decided to 18 | [shut down](https://blog.omnivore.app/p/details-on-omnivore-shutting-down) 19 | the service in November 2024 and recommends using 20 | [Readwise](https://readwise.io/) (paid service) or 21 | [Wallabag](https://wallabag.org/) (open-source) instead. 22 | 23 | While Readwise has released 24 | an [Omnivore importer](https://x.com/ReadwiseReader/status/1851426417684193527), 25 | Wallabag was initially lacking such a feature. 26 | 27 | Therefore, this repository also contains a script to convert the exported 28 | links from Omnivore to the import format of Wallabag v2. 29 | 30 | Meanwhile, Wallabag also 31 | [integrated](https://github.com/wallabag/wallabag/pull/7754) a function 32 | to import from Omnivore, which will be available in version 2.6.10 of Wallabag 33 | and can be already used in the hosted solution, 34 | so that this script is not needed any more either. 35 | 36 | ## Export and summary scripts 37 | 38 | ### Prerequisites 39 | 40 | To run this script, you need to install 41 | [gql](https://github.com/graphql-python/gql) with httpx support first: 42 | 43 | ```sh 44 | pip install --pre -U gql[httpx] 45 | ``` 46 | 47 | Next, you must 48 | [create an API key for Omnivore](https://omnivore.app/settings/api). 49 | 50 | Then, change the global variable `API_KEY` 51 | in the script `omnivore-export.py`, 52 | or set the environment variable `OMNIVORE_API_KEY`. 53 | 54 | If you're not using the free hosted Omnivore, 55 | you must also change the global variable `API_URL` 56 | or set the environment variable `OMNIVORE_API_URL`. 57 | 58 | ### Other options 59 | 60 | You can change the path for the exported data 61 | with the global variable `BACKUP_PATH` 62 | or the environment variable `OMNIVORE_BACKUP_PATH`. 63 | The current date is automatically added to the backup filename, 64 | unless you change the global variable `WITH_DATE` to False 65 | or set the environment variable `OMNIVORE_WITH_DATE` to `no`. 66 | 67 | There are some more global variables that you can change in the script: 68 | 69 | - `SEARCH = "in:all"` - change if you don't want to export everything 70 | - `LIMIT = 100` - the batch size when querying the API (max. 100) 71 | - `TIMEOUT = 15` - the request timeout in seconds when querying the API 72 | - `WITH_CONTENT = False` - change if you want to export the content as well 73 | 74 | ### Running the script 75 | 76 | Finally, just run the script via Python: 77 | 78 | ```sh 79 | python omnivore-export.py 80 | ``` 81 | 82 | ### Data store summary 83 | 84 | This repository also contains a script `omnivore-summary.py` 85 | that can be used to print a summary of the data store in Omnivore. 86 | 87 | After configuring it in the same way as the export script, run: 88 | 89 | ```sh 90 | python omnivore-summary.py 91 | ``` 92 | 93 | ### Command line options 94 | 95 | Instead of setting parameters in the script or via environment variables, 96 | you can also pass them as options on the command line. You can show the 97 | exact command line syntax by running the script with the `--help` option. 98 | 99 | ## Data conversion script 100 | 101 | ### Prerequisites 102 | 103 | In order to convert your Omnivore links to Wallabag, you need to export them 104 | first using the function "Settings" - "Export Data" in the Omnivore web UI. 105 | 106 | Then you need to unpack the downloaded zip file into a sub-directory name 107 | "export" that should be in the same directory as the conversion script 108 | `omnivore-to-wallabag.py`. You can also specify a different input directory 109 | by changing the global variable `OMNIVORE_EXPORT_DIR` in the script. 110 | 111 | ### Running the script 112 | 113 | Then simply run the script: 114 | 115 | ```sh 116 | python omnivore-to-wallabag.py 117 | ``` 118 | 119 | It will produce an output file with the name `wallabag.json`. If your export 120 | from Omnivore contains more than 500 links, then it will produce multiple 121 | enumerated output files instead, because Wallabag might run out of memory 122 | when the imported files are too huge. You can specify the batch size using 123 | the global variable `BATCH_SIZE` in the script. 124 | 125 | ### Re-importing the data 126 | 127 | Finally, import the JSON files generated by the script (in the correct order) 128 | into Wallabag. You can find the import function in the web UI under 129 | "My account" - "Import" - "Wallabag v2" - "Import contents" - "Upload file". 130 | 131 | If you imported the wrong files and want to start from scratch, you can use 132 | the function "Config" - "Reset area" - "Remove all entries" in Wallabag. 133 | 134 | Note that the script currently does not convert highlights, just the links 135 | and the content. 136 | -------------------------------------------------------------------------------- /omnivore-export.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3.12 2 | 3 | """Export Links from Omnivore. 4 | 5 | more info at https://github.com/Cito/omnivore-export 6 | """ 7 | 8 | import argparse 9 | import sys 10 | from datetime import date 11 | from json import dump 12 | from os import environ 13 | from typing import Any 14 | 15 | from gql import Client, gql 16 | from gql.transport.httpx import HTTPXTransport 17 | 18 | API_URL = "https://api-prod.omnivore.app/api/graphql" 19 | API_KEY = "XXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX" 20 | 21 | BACKUP_PATH = "omnivore_backup.json" 22 | WITH_DATE = True 23 | 24 | SEARCH = "in:all" 25 | LIMIT = 100 26 | TIMEOUT = 30 27 | WITH_CONTENT = False 28 | 29 | QUERY_EXPORT = """ 30 | query Export($search: String!, 31 | $limit: Int!, $after: String, 32 | $withContent: Boolean!) { 33 | search(query: $search, 34 | first: $limit, after: $after, 35 | includeContent: $withContent) { 36 | ... on SearchSuccess { 37 | edges { 38 | node { 39 | title 40 | slug 41 | url 42 | pageType 43 | contentReader 44 | createdAt 45 | updatedAt 46 | isArchived 47 | readingProgressPercent 48 | readingProgressTopPercent 49 | readingProgressAnchorIndex 50 | author 51 | image 52 | description 53 | publishedAt 54 | ownedByViewer 55 | originalArticleUrl 56 | uploadFileId 57 | labels { 58 | name 59 | } 60 | pageId 61 | shortId 62 | quote 63 | annotation 64 | state 65 | siteName 66 | subscription 67 | readAt 68 | savedAt 69 | wordsCount 70 | content 71 | archivedAt 72 | } 73 | cursor 74 | } 75 | } 76 | ... on SearchError { 77 | errorCodes 78 | } 79 | } 80 | } 81 | """ 82 | 83 | 84 | def get_all( 85 | url: str, key: str, search: str, with_content: bool 86 | ) -> list[dict[str, Any]]: 87 | print("Reading data...") 88 | 89 | headers = {"Authorization": key} 90 | transport = HTTPXTransport(url=url, headers=headers, timeout=TIMEOUT) 91 | client = Client(transport=transport) 92 | query = gql(QUERY_EXPORT) 93 | variables = { 94 | "search": search, 95 | "limit": LIMIT, 96 | "after": None, 97 | "withContent": with_content, 98 | } 99 | 100 | all_nodes: list[Any] = [] 101 | while True: 102 | print(".", end="", flush=True) 103 | result = client.execute(query, variables) 104 | edges = result["search"]["edges"] 105 | if not edges: 106 | break 107 | variables["after"] = edges[-1]["cursor"] 108 | nodes = [edge["node"] for edge in edges] 109 | all_nodes += nodes 110 | print() 111 | return all_nodes 112 | 113 | 114 | def save_backup(data: Any, path: str): 115 | print("Saving data...") 116 | 117 | with open(path, "w", encoding="utf-8") as backup: 118 | print(f"Dumping at {path}") 119 | dump(data, backup) 120 | 121 | 122 | def main(): 123 | parser = argparse.ArgumentParser(description="Export links from Omnivore API") 124 | parser.add_argument( 125 | "--url", 126 | default=environ.get("OMNIVORE_API_URL", API_URL), 127 | help="the Omnivore API URL", 128 | ) 129 | parser.add_argument( 130 | "--key", 131 | default=environ.get("OMNIVORE_API_KEY", API_KEY), 132 | help="the Omnivore API Key", 133 | ) 134 | parser.add_argument( 135 | "--search", 136 | default=environ.get("OMNIVORE_QUERY", SEARCH), 137 | help="the Omnivore search query", 138 | ) 139 | with_content = environ.get("OMNIVORE_WITH_CONTENT", WITH_CONTENT) 140 | if isinstance(with_content, str): 141 | with_content = not with_content.lower() in ("", "0", "no", "false") 142 | parser.add_argument( 143 | "--with-content", 144 | action="store_true", 145 | default=with_content, 146 | help="include page content in the backup", 147 | ) 148 | parser.add_argument( 149 | "--path", 150 | default=environ.get("OMNIVORE_BACKUP_PATH", BACKUP_PATH), 151 | help="the backup file path", 152 | ) 153 | with_date = environ.get("OMNIVORE_WITH_DATE", WITH_DATE) 154 | if isinstance(with_date, str): 155 | with_date = not with_date.lower() in ("", "0", "no", "false") 156 | parser.add_argument( 157 | "--without-date", 158 | action="store_true", 159 | default=not with_date, 160 | help="do not add the current date to the backup path", 161 | ) 162 | 163 | args = parser.parse_args() 164 | url = args.url 165 | key = args.key 166 | search = args.search 167 | with_content = args.with_content 168 | path = args.path 169 | with_date = not args.without_date 170 | 171 | if not key or "X" in key: 172 | print("Please specify your Omnivore API key.") 173 | sys.exit(1) 174 | 175 | nodes = get_all(url, key, search, with_content) 176 | print("Number of links:", len(nodes)) 177 | if with_date: 178 | parts = list(path.partition(".")) 179 | parts.insert(-2, "-" + date.today().isoformat()) 180 | path = "".join(parts) 181 | save_backup(nodes, path) 182 | 183 | 184 | if __name__ == "__main__": 185 | main() 186 | -------------------------------------------------------------------------------- /omnivore-summary.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3.12 2 | 3 | """Summarize Links from Omnivore. 4 | 5 | more info at https://github.com/Cito/omnivore-export 6 | """ 7 | 8 | import argparse 9 | import sys 10 | from collections import Counter 11 | from os import environ 12 | from typing import Any 13 | 14 | from gql import Client, gql 15 | from gql.transport.httpx import HTTPXTransport 16 | 17 | API_URL = "https://api-prod.omnivore.app/api/graphql" 18 | API_KEY = "XXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX" 19 | 20 | SEARCH = "in:all" 21 | LIMIT = 100 22 | TIMEOUT = 30 23 | 24 | QUERY_SUMMARIZE = """ 25 | query Summarize($search: String!, 26 | $limit: Int!, $after: String) { 27 | search(query: $search, 28 | first: $limit, after: $after, 29 | includeContent: false) { 30 | ... on SearchSuccess { 31 | edges { 32 | node { 33 | pageType 34 | contentReader 35 | createdAt 36 | isArchived 37 | readingProgressPercent 38 | readingProgressTopPercent 39 | readingProgressAnchorIndex 40 | labels { 41 | name 42 | } 43 | state 44 | readAt 45 | savedAt 46 | } 47 | cursor 48 | } 49 | } 50 | ... on SearchError { 51 | errorCodes 52 | } 53 | } 54 | } 55 | """ 56 | 57 | 58 | def get_all(url: str, key: str, search: str) -> list[dict[str, Any]]: 59 | print("Reading data...") 60 | 61 | headers = {"Authorization": key} 62 | transport = HTTPXTransport(url=url, headers=headers, timeout=TIMEOUT) 63 | client = Client(transport=transport) 64 | query = gql(QUERY_SUMMARIZE) 65 | variables = {"search": search, "limit": LIMIT, "after": None} 66 | 67 | all_nodes: list[Any] = [] 68 | while True: 69 | print(".", end="", flush=True) 70 | result = client.execute(query, variables) 71 | edges = result["search"]["edges"] 72 | if not edges: 73 | break 74 | variables["after"] = edges[-1]["cursor"] 75 | nodes = [edge["node"] for edge in edges] 76 | all_nodes += nodes 77 | print() 78 | 79 | return all_nodes 80 | 81 | 82 | def show_table(data: dict[str, Any], width: int = 80) -> None: 83 | max_key_len = max(len(key) for key in data) 84 | max_val_len = max(len(str(val)) for val in data.values()) 85 | cols = width // (max_key_len + max_val_len + 5) 86 | 87 | row: list[str] = [] 88 | for key, val in sorted(data.items()): 89 | row.append(f"{key:<{max_key_len}}: {val:>{max_val_len}}") 90 | if len(row) >= cols: 91 | print(" | ".join(row)) 92 | row = [] 93 | if row: 94 | print(" | ".join(row)) 95 | 96 | 97 | def summarize(nodes: list[dict[str, Any]]) -> None: 98 | print("Summarizing...") 99 | 100 | num_archived = num_inbox = 0 101 | page_type_counter = Counter[str]() 102 | label_counter = Counter[str]() 103 | for node in nodes: 104 | if node["isArchived"]: 105 | num_archived += 1 106 | else: 107 | num_inbox += 1 108 | page_type = node["pageType"] 109 | if page_type: 110 | page_type_counter.update([page_type.capitalize()]) 111 | labels = node["labels"] 112 | if labels: 113 | labels = [label["name"] for label in node["labels"]] 114 | label_counter.update(labels) 115 | 116 | print() 117 | print("* Inbox:", num_inbox) 118 | print("* Archive:", num_archived) 119 | print() 120 | print("* Page types:") 121 | show_table(page_type_counter) 122 | print() 123 | print("* Labels:") 124 | show_table(label_counter) 125 | print() 126 | 127 | 128 | def main(): 129 | parser = argparse.ArgumentParser(description="Export links from Omnivore API") 130 | parser.add_argument( 131 | "--url", 132 | default=environ.get("OMNIVORE_API_URL", API_URL), 133 | help="the Omnivore API URL", 134 | ) 135 | parser.add_argument( 136 | "--key", 137 | default=environ.get("OMNIVORE_API_KEY", API_KEY), 138 | help="the Omnivore API Key", 139 | ) 140 | parser.add_argument( 141 | "--search", 142 | default=environ.get("OMNIVORE_QUERY", SEARCH), 143 | help="the Omnivore search query", 144 | ) 145 | 146 | args = parser.parse_args() 147 | 148 | url = args.url 149 | key = args.key 150 | search = args.search 151 | 152 | if not key or "X" in key: 153 | print("Please specify your Omnivore API key.") 154 | sys.exit(1) 155 | 156 | nodes = get_all(url, key, search) 157 | summarize(nodes) 158 | 159 | 160 | if __name__ == "__main__": 161 | main() 162 | -------------------------------------------------------------------------------- /omnivore-to-wallabag.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3.12 2 | 3 | """Convert Omnivore export to Wallabag. 4 | 5 | more info at https://github.com/Cito/omnivore-export 6 | """ 7 | 8 | import json 9 | from pathlib import Path 10 | from typing import Any 11 | 12 | OMNIVORE_EXPORT_DIR = './export' 13 | WALLABAG_IMPORT_FILE = 'wallabag.json' 14 | 15 | BATCH_SIZE = 500 16 | 17 | 18 | def get_json_file_number(json_file: Path) -> int: 19 | return int(json_file.stem.split('_', 2)[1]) 20 | 21 | 22 | def convert_tag(tag: str) -> str: 23 | return tag.lower() 24 | 25 | 26 | def convert_data(data: dict[str, Any]) -> dict[str, Any]: 27 | slug = data['slug'] 28 | data = { 29 | 'title': data['title'], 30 | 'url': data['url'], 31 | 'tags': [convert_tag(label) for label in data['labels']], 32 | 'is_archived': 1 if data['state'] == 'Archived' else 0, 33 | 'is_starred': 0, 34 | 'mimetype': 'text/html; charset=utf-8', 35 | 'created_at': data['savedAt'], 36 | 'updated_at': data['updatedAt'], 37 | 'published_at': data['publishedAt'], 38 | 'preview_picture': data['thumbnail'] 39 | } 40 | if slug: 41 | content_file = Path( 42 | OMNIVORE_EXPORT_DIR).joinpath("content", slug + ".html") 43 | if content_file.exists(): 44 | content = content_file.read_text(encoding='utf-8') 45 | data['content'] = content 46 | return data 47 | 48 | 49 | def write_output(output: list[dict[str, Any]], suffix: str) -> None: 50 | import_file = WALLABAG_IMPORT_FILE 51 | if suffix: 52 | import_file = import_file.replace('.json', f'_{suffix}.json') 53 | print("Writing output to", import_file) 54 | with open(import_file, 'w', encoding='utf-8') as f: 55 | json.dump(output, f, ensure_ascii=False, indent=None) 56 | 57 | 58 | def main(): 59 | print("Reading input from", OMNIVORE_EXPORT_DIR) 60 | json_files = sorted( 61 | Path(OMNIVORE_EXPORT_DIR).glob('metadata_*_to_*.json'), 62 | key=get_json_file_number) 63 | if BATCH_SIZE: 64 | num_articles = int( 65 | str(json_files[-1]).split('_', 3)[3].split('.', 1)[0]) + 1 66 | num_parts = num_articles // BATCH_SIZE + 1 67 | num_digits = len(str(num_parts)) if num_parts > 1 else 0 68 | else: 69 | num_digits = 0 70 | num_batch = 1 71 | output: list[dict[str, Any]] = [] 72 | for json_file in json_files: 73 | print("Converting", json_file) 74 | with open(json_file, encoding='utf-8') as f: 75 | articles = json.load(f) 76 | for article in articles: 77 | output.append(convert_data(article)) 78 | if len(output) > BATCH_SIZE: 79 | write_output( 80 | output, 81 | f"{num_batch:0{num_digits}}" if num_digits else '') 82 | num_batch += 1 83 | output.clear() 84 | if output: 85 | write_output( 86 | output, 87 | f"{num_batch:0{num_digits}}" if num_digits else '') 88 | print("Conversion finished.") 89 | 90 | 91 | if __name__ == "__main__": 92 | main() 93 | --------------------------------------------------------------------------------