├── .gitignore ├── LICENSE ├── README.md └── doctr ├── dealing_with_rotations.ipynb ├── export_as_pdfa.ipynb ├── quicktour.ipynb └── using_standalone_predictors.ipynb /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Notebooks 2 | Home to jupyter notebooks for Mindee OSS projects 3 | 4 | 5 | ## Using notebooks 6 | 7 | This repository was made to store Jupyter notebooks, but users are expected to access them through the corresponding projects. For instance, in [docTR](https://github.com/mindee/doctr), you can find a reference to the dedicated notebooks [here](https://github.com/mindee/doctr/tree/main/notebooks), which is automatically deployed in the documentation over [there](https://mindee.github.io/doctr/latest/notebooks.html). 8 | 9 | This will provide you with better options to interact with those, such as [Google Colab](https://research.google.com/colaboratory/). 10 | 11 | 12 | ## Adding/modifying notebooks 13 | 14 | Should you wish to add a jupyter notebook here, please update the corresponding documentation references to those notebooks in each project. 15 | 16 | For instance, if you modify a notebook in `doctr/`, you might consider editing its [README](https://github.com/mindee/doctr/blob/main/notebooks/README.md). 17 | 18 | 19 | 20 | ### Notebook guidelines 21 | 22 | Here are a few things we try to keep uniform between all notebooks for a clearer understanding by viewers: 23 | 24 | 1. **Picking a good title**: it shouldn't be too long but remain explicit. The notebook will be referenced in the documentation "Notebook tutorials" section, so no need to include "tutorial" in it. 25 | 2. **Picking the file name**: as the number of notebooks grows, we need to keep a sane naming convention that should reflect the notebook title (`export_as_pdfa.ipynb` for instance). 26 | 3. **A description goes a long way**: while the title should be informative, please add a description right after it. Your description should explain the purpose of this notebook tutorial. 27 | 4. **Title/section hierarchy**: just like markdown, use `#` for the notebook title, `##` for section title, `###` for subsections and so on. 28 | 5. **Cell ordering**: please move all imports up into a single cell, and ensure that the notebook can be run successfully with the cell order arrangement. 29 | 6. **Asset accessibility**: if you use some assets (image, etc.) in your notebook, provide the code to download the corresponding asset. This means that all assets have to be publicly accessible, and that you have the right to use them. 30 | 7. **Use text cells**: your content will help other users who have different coding skills, so please don't refrain from adding explanations in text cells (hyperlinks if necessary, etc.). 31 | 8. **Dear grammar**: watch out for your grammar; for instance, sentences should begin with an uppercase letter & end with a dot, etc. 32 | 9. **Credits where it's due**: the golden rule in open source; remember to credit all external works that you've been using. 33 | 34 | 35 | 36 | ## License 37 | 38 | Distributed under the Apache 2.0 License. See [`LICENSE`](LICENSE) for more information. 39 | -------------------------------------------------------------------------------- /doctr/dealing_with_rotations.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "id": "XvNBvlRI0kVk" 7 | }, 8 | "source": [ 9 | "# Dealing with rotated documents\n", 10 | "\n", 11 | "Sometimes, you have to deal with rotations on page level or multi-oriented text inside a document. This notebook shows how to deal with such cases using the `docTR` library." 12 | ] 13 | }, 14 | { 15 | "cell_type": "code", 16 | "execution_count": null, 17 | "metadata": { 18 | "id": "CniRhIKD0kVl" 19 | }, 20 | "outputs": [], 21 | "source": [ 22 | "# First we have to uninstall the preinstalled tensorflow version if we want to work with PyTorch as backend\n", 23 | "# because the env variables USE_TORCH=1 / USE_TF=1 doesn't have an effect in Colab\n", 24 | "!pip uninstall -y tensorflow\n", 25 | "# Install doctr\n", 26 | "#!pip install python-doctr[torch,viz]\n", 27 | "# From source\n", 28 | "!pip install python-doctr[torch,viz]@git+https://github.com/mindee/doctr.git" 29 | ] 30 | }, 31 | { 32 | "cell_type": "code", 33 | "execution_count": 2, 34 | "metadata": { 35 | "id": "YVMnvP9K0kVl" 36 | }, 37 | "outputs": [], 38 | "source": [ 39 | "# Imports\n", 40 | "import requests\n", 41 | "import cv2\n", 42 | "\n", 43 | "from doctr.io import DocumentFile\n", 44 | "from doctr.models import ocr_predictor" 45 | ] 46 | }, 47 | { 48 | "cell_type": "markdown", 49 | "metadata": { 50 | "id": "xR4uUzqU0kVm" 51 | }, 52 | "source": [ 53 | "Let's load such an possible example and see how we can deal with it." 54 | ] 55 | }, 56 | { 57 | "cell_type": "code", 58 | "execution_count": null, 59 | "metadata": { 60 | "vscode": { 61 | "languageId": "plaintext" 62 | }, 63 | "id": "hqw2mLEf0kVm" 64 | }, 65 | "outputs": [], 66 | "source": [ 67 | "# Download a sample\n", 68 | "!wget https://github.com/mindee/doctr/releases/download/v0.1.0/back_cover.jpg\n", 69 | "\n", 70 | "# Display the image with matplotlib\n", 71 | "import matplotlib.pyplot as plt\n", 72 | "\n", 73 | "img = plt.imread('back_cover.jpg')\n", 74 | "plt.imshow(img); plt.axis('off'); plt.show()" 75 | ] 76 | }, 77 | { 78 | "cell_type": "markdown", 79 | "source": [ 80 | "As we can see our document is sligthly rotated.\n", 81 | "\n", 82 | "We have several options to deal with it.\n", 83 | "\n", 84 | "First we should set `assume_straight_pages` to `False` to indicate that the predictor has to deal with possible rotations.\n", 85 | "Second we should set `detect_orientation` to `True` to get the orientation appended to our results.\n", 86 | "\n", 87 | "If we deal only with small rotations in the range ~45 to -45 we can additionally disable the page orientation classification by setting `disable_page_orientation` to `True` and the same for `disable_crop_orientatio` if our document contains only horizontal text to speed up the pipeline." 88 | ], 89 | "metadata": { 90 | "id": "kJ7vxtF11C7f" 91 | } 92 | }, 93 | { 94 | "cell_type": "code", 95 | "execution_count": null, 96 | "metadata": { 97 | "vscode": { 98 | "languageId": "plaintext" 99 | }, 100 | "id": "g4hn2SQ20kVm" 101 | }, 102 | "outputs": [], 103 | "source": [ 104 | "doc = DocumentFile.from_images(['back_cover.jpg'])\n", 105 | "predictor = ocr_predictor(\n", 106 | " pretrained=True,\n", 107 | " det_arch=\"fast_base\",\n", 108 | " reco_arch=\"parseq\",\n", 109 | " assume_straight_pages=False,\n", 110 | " detect_orientation=True,\n", 111 | " disable_crop_orientation=True,\n", 112 | " disable_page_orientation=True,\n", 113 | " straighten_pages=False\n", 114 | ") # .cuda().half() uncomment this line if we run on GPU\n", 115 | "result = predictor(doc)\n", 116 | "\n", 117 | "# Visualize the result\n", 118 | "result.show()\n", 119 | "\n", 120 | "# Export the result to json like dictionary\n", 121 | "json_export = result.export()\n", 122 | "print(f\"Detected orientation: {json_export['pages'][0]['orientation']['value']} degrees\")" 123 | ] 124 | }, 125 | { 126 | "cell_type": "markdown", 127 | "source": [ 128 | "Let's see how it looks if we have to deal with higher rotations and enabled page orientation classification." 129 | ], 130 | "metadata": { 131 | "id": "U1sQWv7f4gjd" 132 | } 133 | }, 134 | { 135 | "cell_type": "code", 136 | "source": [ 137 | "from doctr.utils.geometry import rotate_image\n", 138 | "\n", 139 | "doc = DocumentFile.from_images(['back_cover.jpg'])\n", 140 | "# Let's rotate the document by 180 degrees\n", 141 | "doc = [rotate_image(doc[0], 180, expand=False)]\n", 142 | "\n", 143 | "predictor = ocr_predictor(\n", 144 | " pretrained=True,\n", 145 | " det_arch=\"fast_base\",\n", 146 | " reco_arch=\"parseq\",\n", 147 | " assume_straight_pages=False,\n", 148 | " detect_orientation=True,\n", 149 | " disable_crop_orientation=False,\n", 150 | " disable_page_orientation=False,\n", 151 | " straighten_pages=False\n", 152 | ") # .cuda().half() uncomment this line if we run on GPU\n", 153 | "result = predictor(doc)\n", 154 | "\n", 155 | "# Visualize the result\n", 156 | "result.show()\n", 157 | "\n", 158 | "# Export the result to json like dictionary\n", 159 | "json_export = result.export()\n", 160 | "print(f\"Detected orientation: {json_export['pages'][0]['orientation']['value']} degrees\")" 161 | ], 162 | "metadata": { 163 | "id": "GRNPmIDO4xBh" 164 | }, 165 | "execution_count": null, 166 | "outputs": [] 167 | }, 168 | { 169 | "cell_type": "markdown", 170 | "source": [ 171 | "Now let's correct this by setting `straighten_pages` to `True`." 172 | ], 173 | "metadata": { 174 | "id": "3U63axmt67R2" 175 | } 176 | }, 177 | { 178 | "cell_type": "code", 179 | "source": [ 180 | "from doctr.utils.geometry import rotate_image\n", 181 | "\n", 182 | "doc = DocumentFile.from_images(['back_cover.jpg'])\n", 183 | "# Let's rotate the document by 180 degrees\n", 184 | "doc = [rotate_image(doc[0], 180, expand=False)]\n", 185 | "\n", 186 | "predictor = ocr_predictor(\n", 187 | " pretrained=True,\n", 188 | " det_arch=\"fast_base\",\n", 189 | " reco_arch=\"parseq\",\n", 190 | " assume_straight_pages=False,\n", 191 | " detect_orientation=True,\n", 192 | " disable_crop_orientation=False,\n", 193 | " disable_page_orientation=False,\n", 194 | " straighten_pages=True\n", 195 | ") # .cuda().half() uncomment this line if we run on GPU\n", 196 | "result = predictor(doc)\n", 197 | "\n", 198 | "# Visualize the result\n", 199 | "result.show()\n", 200 | "\n", 201 | "# Export the result to json like dictionary\n", 202 | "json_export = result.export()\n", 203 | "print(f\"Detected orientation: {json_export['pages'][0]['orientation']['value']} degrees\")\n", 204 | "print()\n", 205 | "print(f\"Extracted text:\\n{result.render()}\")" 206 | ], 207 | "metadata": { 208 | "id": "JBXWDkZo6vUl" 209 | }, 210 | "execution_count": null, 211 | "outputs": [] 212 | } 213 | ], 214 | "metadata": { 215 | "language_info": { 216 | "name": "python" 217 | }, 218 | "colab": { 219 | "provenance": [] 220 | }, 221 | "kernelspec": { 222 | "name": "python3", 223 | "display_name": "Python 3" 224 | } 225 | }, 226 | "nbformat": 4, 227 | "nbformat_minor": 0 228 | } -------------------------------------------------------------------------------- /doctr/export_as_pdfa.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "id": "ID2g9XHMvAXO" 7 | }, 8 | "source": [ 9 | "# Generate PDF/A files from docTR output\n", 10 | "\n", 11 | "These files have also a readable text layer on top of the image, which can be used to search in a document with any PDF-Viewer." 12 | ] 13 | }, 14 | { 15 | "cell_type": "code", 16 | "execution_count": null, 17 | "metadata": { 18 | "id": "sPMz5UYUvAXQ" 19 | }, 20 | "outputs": [], 21 | "source": [ 22 | "# Install the most up-to-date version from GitHub\n", 23 | "\n", 24 | "# TensorFlow\n", 25 | "# !pip install python-doctr[tf,viz]@git+https://github.com/mindee/doctr.git\n", 26 | "\n", 27 | "# PyTorch\n", 28 | "# First we have to uninstall the preinstalled tensorflow version if we want to work with PyTorch as backend\n", 29 | "# because the env variables USE_TORCH=1 / USE_TF=1 doesn't have an effect in Colab\n", 30 | "!pip uninstall -y tensorflow\n", 31 | "!pip install python-doctr[torch,viz]@git+https://github.com/mindee/doctr.git\n", 32 | "\n", 33 | "# Install additional requirements (for hOCR conversion and PDF file merging)\n", 34 | "!pip install ocrmypdf\n", 35 | "!pip install PyPDF2" 36 | ] 37 | }, 38 | { 39 | "cell_type": "markdown", 40 | "source": [ 41 | "We will use `ocrmypdf` to parse our exported XML-Tree/File in hOCR format ([hOCR convention](https://github.com/kba/hocr-spec/blob/master/1.2/spec.md)) and `PyPDF` to merge the single generated PDF/A files into one." 42 | ], 43 | "metadata": { 44 | "id": "165zmKJeC5a4" 45 | } 46 | }, 47 | { 48 | "cell_type": "code", 49 | "execution_count": 34, 50 | "metadata": { 51 | "id": "ZC8sIbEZvAXS" 52 | }, 53 | "outputs": [], 54 | "source": [ 55 | "# Imports\n", 56 | "import os\n", 57 | "import base64\n", 58 | "import re\n", 59 | "from tempfile import TemporaryDirectory\n", 60 | "\n", 61 | "from PyPDF2 import PdfMerger, PdfReader\n", 62 | "from doctr.io import DocumentFile\n", 63 | "from doctr.models import ocr_predictor, from_hub\n", 64 | "from PIL import Image\n", 65 | "from ocrmypdf.hocrtransform import HocrTransform" 66 | ] 67 | }, 68 | { 69 | "cell_type": "markdown", 70 | "metadata": { 71 | "id": "hzpt5nHFvAXe" 72 | }, 73 | "source": [ 74 | "## OCR the files and show the results\n", 75 | "\n", 76 | "Now we are ready to start the OCR process and show the results." 77 | ] 78 | }, 79 | { 80 | "cell_type": "code", 81 | "execution_count": null, 82 | "metadata": { 83 | "id": "HjhRaZ6tvAXe" 84 | }, 85 | "outputs": [], 86 | "source": [ 87 | "# Download a sample\n", 88 | "!wget https://github.com/mindee/doctr/releases/download/v0.1.0/Versicherungsbedingungen-08-2021.pdf" 89 | ] 90 | }, 91 | { 92 | "cell_type": "code", 93 | "execution_count": null, 94 | "metadata": { 95 | "id": "ytK2KRfR_XIg" 96 | }, 97 | "outputs": [], 98 | "source": [ 99 | "# Read the file\n", 100 | "docs = DocumentFile.from_pdf(\"Versicherungsbedingungen-08-2021.pdf\")\n", 101 | "# The document contains german text let's use a multilingual fine tuned model from the Hugging Face hub.\n", 102 | "reco_model = from_hub(\"Felix92/doctr-torch-parseq-multilingual-v1\")\n", 103 | "model = ocr_predictor(det_arch='fast_base', reco_arch=reco_model, pretrained=True)\n", 104 | "# we will grab only the first two pages from the pdf for demonstration\n", 105 | "result = model(docs[:2])\n", 106 | "result.show()" 107 | ] 108 | }, 109 | { 110 | "cell_type": "markdown", 111 | "metadata": { 112 | "id": "MiA8N21GvAXf" 113 | }, 114 | "source": [ 115 | "## Export as PDF/A\n", 116 | "In this section we will export our documents as PDF/A files.\n", 117 | "\n", 118 | "We show 3 possible options for this." 119 | ] 120 | }, 121 | { 122 | "cell_type": "markdown", 123 | "metadata": { 124 | "id": "3LgvG3p7_XIh" 125 | }, 126 | "source": [ 127 | "### All as single PDF/A file\n", 128 | "All files will be saved as single file." 129 | ] 130 | }, 131 | { 132 | "cell_type": "code", 133 | "execution_count": 37, 134 | "metadata": { 135 | "id": "2dDy5t1UvAXf" 136 | }, 137 | "outputs": [], 138 | "source": [ 139 | "# returns: list of tuple where the first element is the (bytes) xml string and the second is the ElementTree\n", 140 | "xml_outputs = result.export_as_xml()\n", 141 | "\n", 142 | "# iterate through the xml outputs and images and export to pdf/a\n", 143 | "# the image is optional else you can set invisible_text=False and the text will be printed on a blank page\n", 144 | "with TemporaryDirectory() as tmpdir:\n", 145 | " for i, (xml, img) in enumerate(zip(xml_outputs, docs)):\n", 146 | " # write the images temp\n", 147 | " Image.fromarray(img).save(os.path.join(tmpdir, f\"{i}.jpg\"))\n", 148 | " # write the xml content temp\n", 149 | " with open(os.path.join(tmpdir, f\"{i}.xml\"),\"w\") as f :\n", 150 | " f.write(xml_outputs[i][0].decode())\n", 151 | " # Init hOCR transfomer\n", 152 | " hocr = HocrTransform(hocr_filename=os.path.join(tmpdir, f\"{i}.xml\"), dpi=300)\n", 153 | " # Save as PDF/A\n", 154 | " hocr.to_pdf(out_filename=f\"{i}.pdf\", image_filename=os.path.join(tmpdir, f\"{i}.jpg\"))" 155 | ] 156 | }, 157 | { 158 | "cell_type": "markdown", 159 | "metadata": { 160 | "id": "VMgD73uO_XIh" 161 | }, 162 | "source": [ 163 | "### All merged into one PDF/A file\n", 164 | "All PDF/A files will be merged into one PDF/A file." 165 | ] 166 | }, 167 | { 168 | "cell_type": "code", 169 | "execution_count": 38, 170 | "metadata": { 171 | "id": "SkEZrL-hvAXg" 172 | }, 173 | "outputs": [], 174 | "source": [ 175 | "# returns: list of tuple where the first element is the (bytes) xml string and the second is the ElementTree\n", 176 | "xml_outputs = result.export_as_xml()\n", 177 | "\n", 178 | "# you can also merge multiple pdfs into one\n", 179 | "\n", 180 | "merger = PdfMerger()\n", 181 | "\n", 182 | "with TemporaryDirectory() as tmpdir:\n", 183 | " for i, (xml, img) in enumerate(zip(xml_outputs, docs)):\n", 184 | " # write the images temp\n", 185 | " Image.fromarray(img).save(os.path.join(tmpdir, f\"{i}.jpg\"))\n", 186 | " # write the xml content temp\n", 187 | " with open(os.path.join(tmpdir, f\"{i}.xml\"),\"w\") as f :\n", 188 | " f.write(xml_outputs[i][0].decode())\n", 189 | " # Init hOCR transfomer\n", 190 | " hocr = HocrTransform(hocr_filename=os.path.join(tmpdir, f\"{i}.xml\"), dpi=300)\n", 191 | " # Save as PDF/A\n", 192 | " hocr.to_pdf(out_filename=os.path.join(tmpdir, f\"{i}.pdf\"), image_filename=os.path.join(tmpdir, f\"{i}.jpg\"))\n", 193 | " # Append to merger\n", 194 | " merger.append(f'{tmpdir}/{i}.pdf')\n", 195 | " # Save as combined pdf\n", 196 | " merger.write(f'docTR-PDF.pdf')" 197 | ] 198 | }, 199 | { 200 | "cell_type": "markdown", 201 | "metadata": { 202 | "id": "jaupVFSW_XIi" 203 | }, 204 | "source": [ 205 | "### All as base64 encoded PDF/A files\n", 206 | "All PDF/A files will be saved as base64 strings in a list." 207 | ] 208 | }, 209 | { 210 | "cell_type": "code", 211 | "execution_count": null, 212 | "metadata": { 213 | "id": "owWVIPcKvAXg" 214 | }, 215 | "outputs": [], 216 | "source": [ 217 | "# returns: list of tuple where the first element is the (bytes) xml string and the second is the ElementTree\n", 218 | "xml_outputs = result.export_as_xml()\n", 219 | "\n", 220 | "# or encode the pdfs into base64 (Rest API usage)\n", 221 | "base64_encoded_pdfs = list()\n", 222 | "\n", 223 | "with TemporaryDirectory() as tmpdir:\n", 224 | " for i, (xml, img) in enumerate(zip(xml_outputs, docs)):\n", 225 | " # write the images temp\n", 226 | " Image.fromarray(img).save(os.path.join(tmpdir, f\"{i}.jpg\"))\n", 227 | " # write the xml content temp\n", 228 | " with open(os.path.join(tmpdir, f\"{i}.xml\"),\"w\") as f :\n", 229 | " f.write(xml_outputs[i][0].decode())\n", 230 | " # Init hOCR transfomer\n", 231 | " hocr = HocrTransform(hocr_filename=os.path.join(tmpdir, f\"{i}.xml\"), dpi=300)\n", 232 | " # Save as PDF/A\n", 233 | " hocr.to_pdf(out_filename=os.path.join(tmpdir, f\"{i}.pdf\"), image_filename=os.path.join(tmpdir, f\"{i}.jpg\"))\n", 234 | " with open(os.path.join(tmpdir, f\"{i}.pdf\"), \"rb\") as f:\n", 235 | " base64_encoded_pdfs.append(base64.b64encode(f.read()))\n", 236 | "\n", 237 | "print(f'{len(base64_encoded_pdfs)} pdfs encoded')" 238 | ] 239 | }, 240 | { 241 | "cell_type": "markdown", 242 | "metadata": { 243 | "id": "Fd7UAqBu_XIj" 244 | }, 245 | "source": [ 246 | "## How can I use a PDF/A?\n", 247 | "You can open the saved pdf's with any PDF-Viewer and type some words you are searching for in the document.\n", 248 | "\n", 249 | "Matches will be highlighted in the text layer.\n", 250 | "\n", 251 | "Or you use Python to search, for example words in the text layer." 252 | ] 253 | }, 254 | { 255 | "cell_type": "code", 256 | "execution_count": null, 257 | "metadata": { 258 | "id": "w6enw9Rz_XIj" 259 | }, 260 | "outputs": [], 261 | "source": [ 262 | "# search specific words in the pdf and print all matches\n", 263 | "pattern = \"Allianz\"\n", 264 | "file_name = \"docTR-PDF.pdf\"\n", 265 | "\n", 266 | "reader = PdfReader(file_name)\n", 267 | "num_pages = len(reader.pages)\n", 268 | "\n", 269 | "for i in range(0, num_pages):\n", 270 | " page = reader.pages[i]\n", 271 | " text = page.extract_text()\n", 272 | "\n", 273 | " for match in re.finditer(pattern, text):\n", 274 | " print(f'Page no: {i} | Match: {match}')" 275 | ] 276 | }, 277 | { 278 | "cell_type": "markdown", 279 | "metadata": { 280 | "id": "zAe03C8w_XIj" 281 | }, 282 | "source": [ 283 | "## To go further\n", 284 | "[Wikipedia PDF/A](https://en.wikipedia.org/wiki/PDF/A)\n", 285 | "\n", 286 | "[Difference between PDF/A and PDF](https://askanydifference.com/difference-between-pdf-a-and-pdf/)\n", 287 | "\n", 288 | "### Happy Coding :)" 289 | ] 290 | } 291 | ], 292 | "metadata": { 293 | "colab": { 294 | "provenance": [] 295 | }, 296 | "interpreter": { 297 | "hash": "103ff19777622dbbd1c8370264e2f56c4c49c75e119dc12fd0a9f41eba03d66f" 298 | }, 299 | "kernelspec": { 300 | "display_name": "Python 3", 301 | "language": "python", 302 | "name": "python3" 303 | }, 304 | "language_info": { 305 | "codemirror_mode": { 306 | "name": "ipython", 307 | "version": 3 308 | }, 309 | "file_extension": ".py", 310 | "mimetype": "text/x-python", 311 | "name": "python", 312 | "nbconvert_exporter": "python", 313 | "pygments_lexer": "ipython3", 314 | "version": "3.8.8" 315 | } 316 | }, 317 | "nbformat": 4, 318 | "nbformat_minor": 0 319 | } 320 | -------------------------------------------------------------------------------- /doctr/quicktour.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "id": "OOb64_drQ8HZ" 7 | }, 8 | "source": [ 9 | "Looking for some examples on how to use docTR for OCR-related tasks? You've come to the right place 😀" 10 | ] 11 | }, 12 | { 13 | "cell_type": "markdown", 14 | "metadata": { 15 | "id": "0eiwDT8qIh4X" 16 | }, 17 | "source": [ 18 | "# Installation\n", 19 | "\n", 20 | "Install all the dependencies to make the most out of docTR. The project provides two main [installation](https://mindee.github.io/doctr/latest/installing.html) streams: one for stable release, and developer mode." 21 | ] 22 | }, 23 | { 24 | "cell_type": "markdown", 25 | "metadata": { 26 | "id": "Bh8uHvOVIvFW" 27 | }, 28 | "source": [ 29 | "## Latest stable release\n", 30 | "\n", 31 | "This will install the last stable release that was published by our teams on pypi. It is expected to provide a clean and non-buggy experience for all users." 32 | ] 33 | }, 34 | { 35 | "cell_type": "code", 36 | "execution_count": null, 37 | "metadata": { 38 | "id": "43tpfKq1IxQg" 39 | }, 40 | "outputs": [], 41 | "source": [ 42 | "# TensorFlow\n", 43 | "# !pip install python-doctr[tf,viz]\n", 44 | "\n", 45 | "# PyTorch\n", 46 | "# First we have to uninstall the preinstalled tensorflow version if we want to work with PyTorch as backend\n", 47 | "# because the env variables USE_TORCH=1 / USE_TF=1 doesn't have an effect in Colab\n", 48 | "!pip uninstall -y tensorflow\n", 49 | "!pip install python-doctr[torch,viz]" 50 | ] 51 | }, 52 | { 53 | "cell_type": "markdown", 54 | "metadata": { 55 | "id": "zUbhZiRiIxbN" 56 | }, 57 | "source": [ 58 | "## From source\n", 59 | "\n", 60 | "Before being staged for a stable release, we constantly iterate on the community feedback to improve the library. Bug fixes and performance improvements are regularly pushed to the project Git repository. Using this installation method, you will access all the latest features that have not yet made their way to a pypi release!" 61 | ] 62 | }, 63 | { 64 | "cell_type": "code", 65 | "execution_count": null, 66 | "metadata": { 67 | "id": "AJZgLM_CIzKf" 68 | }, 69 | "outputs": [], 70 | "source": [ 71 | "# Install the most up-to-date version from GitHub\n", 72 | "\n", 73 | "# TensorFlow\n", 74 | "# !pip install python-doctr[tf,viz]@git+https://github.com/mindee/doctr.git\n", 75 | "\n", 76 | "# PyTorch\n", 77 | "# First we have to uninstall the preinstalled tensorflow version if we want to work with PyTorch as backend\n", 78 | "# because the env variables USE_TORCH=1 / USE_TF=1 doesn't have an effect in Colab\n", 79 | "!pip uninstall -y tensorflow\n", 80 | "!pip install python-doctr[torch,viz]@git+https://github.com/mindee/doctr.git" 81 | ] 82 | }, 83 | { 84 | "cell_type": "markdown", 85 | "metadata": { 86 | "id": "r2mgRuCaJY4F" 87 | }, 88 | "source": [ 89 | "# Basic usage\n", 90 | "\n", 91 | "We're going to review the main features of docTR 🎁\n", 92 | "And for you to have a proper overview of its capabilities, we will need some free fonts for a proper output visualization:" 93 | ] 94 | }, 95 | { 96 | "cell_type": "code", 97 | "execution_count": null, 98 | "metadata": { 99 | "id": "2Qhrp88TPDZi" 100 | }, 101 | "outputs": [], 102 | "source": [ 103 | "# Install some free fonts for result rendering\n", 104 | "!sudo apt-get install fonts-freefont-ttf -y" 105 | ] 106 | }, 107 | { 108 | "cell_type": "markdown", 109 | "metadata": { 110 | "id": "Fe7KuocKSWX9" 111 | }, 112 | "source": [ 113 | "Let's take care of all the imports directly" 114 | ] 115 | }, 116 | { 117 | "cell_type": "code", 118 | "execution_count": 4, 119 | "metadata": { 120 | "id": "xSoeo0hRJbnU" 121 | }, 122 | "outputs": [], 123 | "source": [ 124 | "%matplotlib inline\n", 125 | "import matplotlib.pyplot as plt\n", 126 | "\n", 127 | "from doctr.io import DocumentFile\n", 128 | "from doctr.models import ocr_predictor" 129 | ] 130 | }, 131 | { 132 | "cell_type": "markdown", 133 | "metadata": { 134 | "id": "7wwEmHqZSaLF" 135 | }, 136 | "source": [ 137 | "For the next steps, we will need a proper PDF document that will be used to showcase the library features" 138 | ] 139 | }, 140 | { 141 | "cell_type": "code", 142 | "execution_count": null, 143 | "metadata": { 144 | "id": "UIBwFsVuJocy" 145 | }, 146 | "outputs": [], 147 | "source": [ 148 | "# Download a sample\n", 149 | "!wget https://eforms.com/download/2019/01/Cash-Payment-Receipt-Template.pdf\n", 150 | "# Read the file\n", 151 | "doc = DocumentFile.from_pdf(\"Cash-Payment-Receipt-Template.pdf\")\n", 152 | "print(f\"Number of pages: {len(doc)}\")" 153 | ] 154 | }, 155 | { 156 | "cell_type": "markdown", 157 | "metadata": { 158 | "id": "cM6PPiWpSmb0" 159 | }, 160 | "source": [ 161 | "docTR is, under the hood, running Deep Learning models to perform the different tasks it supports. Those models were built and trained with very popular frameworks for maximum compatibility (you will be pleased to know that you can switch from [PyTorch](https://pytorch.org/) to [TensorFlow](https://www.tensorflow.org/) without noticing any difference for you). By default, our high-level API sets the best default values so that you get high performing models without having to know anything about it. All of this is wrapper in a `Predictor` object, which will take care of pre-processing, model inference and post-processing for you ⚡\n", 162 | "\n", 163 | "Let's instantiate one!" 164 | ] 165 | }, 166 | { 167 | "cell_type": "code", 168 | "execution_count": null, 169 | "metadata": { 170 | "id": "WAoUcVQbKIfT" 171 | }, 172 | "outputs": [], 173 | "source": [ 174 | "# Instantiate a pretrained model\n", 175 | "predictor = ocr_predictor(pretrained=True)" 176 | ] 177 | }, 178 | { 179 | "cell_type": "markdown", 180 | "metadata": { 181 | "id": "PKKYSs0ET0XQ" 182 | }, 183 | "source": [ 184 | "By default, PyTorch model provides a nice visual description of a model, which is handy when it comes to debugging or knowing what you just created. We also added a similar feature for TensorFlow backend so that you don't miss on this nice assistance.\n", 185 | "\n", 186 | "Let's dive into this model 🕵" 187 | ] 188 | }, 189 | { 190 | "cell_type": "code", 191 | "execution_count": null, 192 | "metadata": { 193 | "id": "urR0KyUYOT-B" 194 | }, 195 | "outputs": [], 196 | "source": [ 197 | "# Display the architecture\n", 198 | "print(predictor)" 199 | ] 200 | }, 201 | { 202 | "cell_type": "markdown", 203 | "metadata": { 204 | "id": "dlrtkNzTURAo" 205 | }, 206 | "source": [ 207 | "Here we are inspecting the most complex (and high-level) object of docTR API: an OCR predictor. Since docTR achieves Optical Character Recognition by first localizing textual elements (Text Detection), then extracting the corresponding text from each location (Text Recognition), the OCR Predictor wraps two sub-predictors: one for text detection, and the other for text recognition." 208 | ] 209 | }, 210 | { 211 | "cell_type": "markdown", 212 | "metadata": { 213 | "id": "uyVfbzhUL-Rw" 214 | }, 215 | "source": [ 216 | "## Basic inference\n", 217 | "\n", 218 | "It looks quite complex, isn't it?\n", 219 | "Well that will not prevent you from easily get nice results. See for yourself:" 220 | ] 221 | }, 222 | { 223 | "cell_type": "code", 224 | "execution_count": 8, 225 | "metadata": { 226 | "id": "RLPsdCj3PWzI" 227 | }, 228 | "outputs": [], 229 | "source": [ 230 | "result = predictor(doc)" 231 | ] 232 | }, 233 | { 234 | "cell_type": "markdown", 235 | "metadata": { 236 | "id": "eFAwQaKuPYXG" 237 | }, 238 | "source": [ 239 | "## Prediction visualization\n", 240 | "\n", 241 | "If you rightfully prefer to see the results with your eyes, docTR includes a few visualization features. We will first overlay our predictions on the original document:" 242 | ] 243 | }, 244 | { 245 | "cell_type": "code", 246 | "execution_count": null, 247 | "metadata": { 248 | "id": "sOcqPNnPMAE7" 249 | }, 250 | "outputs": [], 251 | "source": [ 252 | "result.show()" 253 | ] 254 | }, 255 | { 256 | "cell_type": "markdown", 257 | "metadata": { 258 | "id": "EnJERph1VXgN" 259 | }, 260 | "source": [ 261 | "Looks accurate!\n", 262 | "But we can go further: if the extracted information is correctly structured, we should be able to recreate the page entirely. So let's do this 🎨" 263 | ] 264 | }, 265 | { 266 | "cell_type": "code", 267 | "execution_count": null, 268 | "metadata": { 269 | "id": "QdoSNYzROeRv" 270 | }, 271 | "outputs": [], 272 | "source": [ 273 | "synthetic_pages = result.synthesize()\n", 274 | "plt.imshow(synthetic_pages[0]); plt.axis('off'); plt.show()" 275 | ] 276 | }, 277 | { 278 | "cell_type": "markdown", 279 | "metadata": { 280 | "id": "mEabainyPeV8" 281 | }, 282 | "source": [ 283 | "## Exporting results\n", 284 | "\n", 285 | "OK, so the predictions are relevant, but how would you integrate this into your own document processing pipeline? Perhaps you're not using Python at all?\n", 286 | "\n", 287 | "Well, if you happen to be using JSON or XML exports, they are already supported 🤗" 288 | ] 289 | }, 290 | { 291 | "cell_type": "code", 292 | "execution_count": null, 293 | "metadata": { 294 | "id": "mhIolyjQOqLp" 295 | }, 296 | "outputs": [], 297 | "source": [ 298 | "# JSON export\n", 299 | "json_export = result.export()\n", 300 | "print(json_export)" 301 | ] 302 | }, 303 | { 304 | "cell_type": "code", 305 | "execution_count": null, 306 | "metadata": { 307 | "id": "3RsuKIlYPnNf" 308 | }, 309 | "outputs": [], 310 | "source": [ 311 | "# XML export\n", 312 | "xml_output = result.export_as_xml()\n", 313 | "print(xml_output[0][0])" 314 | ] 315 | }, 316 | { 317 | "cell_type": "markdown", 318 | "source": [ 319 | "Or if you only need the extracted plain text" 320 | ], 321 | "metadata": { 322 | "id": "1jMtYYupC_h5" 323 | } 324 | }, 325 | { 326 | "cell_type": "code", 327 | "source": [ 328 | "string_result = result.render()\n", 329 | "print(string_result)" 330 | ], 331 | "metadata": { 332 | "id": "mPkaooRnDlkQ" 333 | }, 334 | "execution_count": null, 335 | "outputs": [] 336 | } 337 | ], 338 | "metadata": { 339 | "colab": { 340 | "provenance": [] 341 | }, 342 | "kernelspec": { 343 | "display_name": "Python 3", 344 | "language": "python", 345 | "name": "python3" 346 | }, 347 | "language_info": { 348 | "codemirror_mode": { 349 | "name": "ipython", 350 | "version": 3 351 | }, 352 | "file_extension": ".py", 353 | "mimetype": "text/x-python", 354 | "name": "python", 355 | "nbconvert_exporter": "python", 356 | "pygments_lexer": "ipython3", 357 | "version": "3.8.8" 358 | } 359 | }, 360 | "nbformat": 4, 361 | "nbformat_minor": 0 362 | } 363 | -------------------------------------------------------------------------------- /doctr/using_standalone_predictors.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "id": "s-75rJZasyjG" 7 | }, 8 | "source": [ 9 | "# Using docTR's standalone predictors\n", 10 | "\n", 11 | "docTR’s `ocr_predictor` acts as a modular wrapper for its individual prediction models.\n", 12 | "This notebook shows how to work with these models independently, which can be helpful if you don't need all the features of the `ocr_predictor`.\n", 13 | "For instance, you might want to pair docTR's `detection_predictor` with a different recognition model.\n" 14 | ] 15 | }, 16 | { 17 | "cell_type": "code", 18 | "execution_count": null, 19 | "metadata": { 20 | "collapsed": true, 21 | "id": "LH5a8koqsyjI" 22 | }, 23 | "outputs": [], 24 | "source": [ 25 | "# First we have to uninstall the preinstalled tensorflow version if we want to work with PyTorch as backend\n", 26 | "# because the env variables USE_TORCH=1 / USE_TF=1 doesn't have an effect in Colab\n", 27 | "!pip uninstall -y tensorflow\n", 28 | "# Install doctr\n", 29 | "#!pip install python-doctr[torch,viz]\n", 30 | "# From source\n", 31 | "!pip install python-doctr[torch,viz]@git+https://github.com/mindee/doctr.git" 32 | ] 33 | }, 34 | { 35 | "cell_type": "code", 36 | "execution_count": null, 37 | "metadata": { 38 | "id": "RI0SCGXTsyjK" 39 | }, 40 | "outputs": [], 41 | "source": [ 42 | "# Imports\n", 43 | "import requests\n", 44 | "import numpy as np\n", 45 | "from matplotlib import pyplot as plt\n", 46 | "import cv2\n", 47 | "\n", 48 | "from doctr.models import detection_predictor, recognition_predictor, page_orientation_predictor, crop_orientation_predictor\n", 49 | "from doctr.io import DocumentFile\n", 50 | "from doctr.utils.geometry import detach_scores" 51 | ] 52 | }, 53 | { 54 | "cell_type": "code", 55 | "execution_count": null, 56 | "metadata": { 57 | "id": "riI2cFwRsyjK" 58 | }, 59 | "outputs": [], 60 | "source": [ 61 | "# Define sample image urls\n", 62 | "\n", 63 | "# Image of receipt\n", 64 | "receipt = requests.get(\"https://github.com/mindee/doctr/releases/download/v0.3.0/mock_receipt.jpeg\").content\n", 65 | "receipt_image = cv2.imdecode(np.frombuffer(receipt, np.uint8), cv2.IMREAD_COLOR)\n", 66 | "\n", 67 | "# Image of a word crop\n", 68 | "word_crop = requests.get(\"https://github.com/mindee/doctr/releases/download/v0.5.1/word-crop.png\").content\n", 69 | "word_crop_image = cv2.imdecode(np.frombuffer(word_crop, np.uint8), cv2.IMREAD_COLOR)" 70 | ] 71 | }, 72 | { 73 | "cell_type": "markdown", 74 | "metadata": { 75 | "id": "S9mSG_bGsyjK" 76 | }, 77 | "source": [ 78 | "## Detection predictor\n", 79 | "\n", 80 | "The detection predictor can be used to detect text in an image." 81 | ] 82 | }, 83 | { 84 | "cell_type": "code", 85 | "execution_count": null, 86 | "metadata": { 87 | "id": "PvY10EFJsyjL" 88 | }, 89 | "outputs": [], 90 | "source": [ 91 | "# Helper function to convert relative coordinates to absolute pixel values\n", 92 | "def _to_absolute(geom, img_shape: tuple[int, int]) -> list[list[int]]:\n", 93 | " h, w = img_shape\n", 94 | " if len(geom) == 2: # Assume straight pages = True -> [[xmin, ymin], [xmax, ymax]]\n", 95 | " (xmin, ymin), (xmax, ymax) = geom\n", 96 | " xmin, xmax = int(round(w * xmin)), int(round(w * xmax))\n", 97 | " ymin, ymax = int(round(h * ymin)), int(round(h * ymax))\n", 98 | " return [[xmin, ymin], [xmax, ymin], [xmax, ymax], [xmin, ymax]]\n", 99 | " else: # For polygons, convert each point to absolute coordinates\n", 100 | " return [[int(point[0] * w), int(point[1] * h)] for point in geom]\n", 101 | "\n", 102 | "# Define the detection predictor\n", 103 | "det_predictor = detection_predictor(\n", 104 | " arch=\"db_resnet50\",\n", 105 | " pretrained=True,\n", 106 | " assume_straight_pages=True,\n", 107 | " symmetric_pad=True,\n", 108 | " preserve_aspect_ratio=True,\n", 109 | " batch_size=1,\n", 110 | ") # .cuda().half() # Uncomment this line if you have a GPU\n", 111 | "\n", 112 | "# Define the postprocessing parameters (optional)\n", 113 | "det_predictor.model.postprocessor.bin_thresh = 0.3\n", 114 | "det_predictor.model.postprocessor.box_thresh = 0.1\n", 115 | "\n", 116 | "# Load the document image\n", 117 | "docs = DocumentFile.from_images([receipt])\n", 118 | "results = det_predictor(docs)\n", 119 | "\n", 120 | "for doc, res in zip(docs, results):\n", 121 | " img_shape = (doc.shape[0], doc.shape[1])\n", 122 | " # Detach the probability scores from the results\n", 123 | " detached_coords, prob_scores = detach_scores([res.get(\"words\")])\n", 124 | "\n", 125 | " for i, coords in enumerate(detached_coords[0]):\n", 126 | " coords = coords.reshape(2, 2).tolist() if coords.shape == (4,) else coords.tolist()\n", 127 | "\n", 128 | " # Convert relative to absolute pixel coordinates\n", 129 | " points = np.array(_to_absolute(coords, img_shape), dtype=np.int32).reshape((-1, 1, 2))\n", 130 | "\n", 131 | " # Draw the bounding box on the image\n", 132 | " cv2.polylines(receipt_image, [points], isClosed=True, color=(255, 0, 0), thickness=2)\n", 133 | "\n", 134 | "%matplotlib inline\n", 135 | "plt.imshow(cv2.cvtColor(receipt_image, cv2.COLOR_BGR2RGB)); plt.axis('off'); plt.show()" 136 | ] 137 | }, 138 | { 139 | "cell_type": "markdown", 140 | "metadata": { 141 | "id": "QXNJNmlosyjL" 142 | }, 143 | "source": [ 144 | "## Recognition predictor\n", 145 | "\n", 146 | "The recognition predictor is used to recognize text from pre-cropped word images." 147 | ] 148 | }, 149 | { 150 | "cell_type": "code", 151 | "execution_count": null, 152 | "metadata": { 153 | "id": "7-uEr62MsyjL" 154 | }, 155 | "outputs": [], 156 | "source": [ 157 | "\n", 158 | "# Load the word crop image\n", 159 | "doc = DocumentFile.from_images([word_crop])\n", 160 | "# Define the recognition predictor\n", 161 | "rec_predictor = recognition_predictor(arch=\"parseq\", pretrained=True, symmetric_pad=True, batch_size=1) # .cuda().half() # Uncomment this line if you have a GPU\n", 162 | "result = rec_predictor(doc)\n", 163 | "\n", 164 | "%matplotlib inline\n", 165 | "plt.imshow(cv2.cvtColor(word_crop_image, cv2.COLOR_BGR2RGB)); plt.axis('off'); plt.show()\n", 166 | "print(f\"Recognized text: {result[0][0]} \\nwith confidence: {result[0][1]:.2f}\")\n", 167 | "\n" 168 | ] 169 | }, 170 | { 171 | "cell_type": "markdown", 172 | "metadata": { 173 | "id": "AsXdh-NCsyjL" 174 | }, 175 | "source": [ 176 | "## Orientation predictors\n", 177 | "\n", 178 | "The orientation predictors can detect the **overall** orientation of a document image or word crop.\n", 179 | "They return the general orientation —[0, 90, 180, -90 (270)]— along with the corresponding confidence score." 180 | ] 181 | }, 182 | { 183 | "cell_type": "code", 184 | "execution_count": null, 185 | "metadata": { 186 | "id": "YDL5ari7syjM" 187 | }, 188 | "outputs": [], 189 | "source": [ 190 | "\n", 191 | "docs = DocumentFile.from_images([receipt])\n", 192 | "page_orient_predictor = page_orientation_predictor(pretrained=True, batch_size=1) # .cuda().half() # Uncomment this line if you have a GPU\n", 193 | "result = page_orient_predictor(docs)\n", 194 | "print(f\"general page orientation: {result[1][0]} with confidence: {result[2][0]:.2f}\")\n", 195 | "\n", 196 | "crop = DocumentFile.from_images([word_crop])\n", 197 | "crop_orient_predictor = crop_orientation_predictor(pretrained=True, batch_size=1) # .cuda().half() # Uncomment this line if you have a GPU\n", 198 | "result = crop_orient_predictor(crop)\n", 199 | "print(f\"general crop orientation: {result[1][0]} with confidence: {result[2][0]:.2f}\")" 200 | ] 201 | } 202 | ], 203 | "metadata": { 204 | "colab": { 205 | "provenance": [] 206 | }, 207 | "kernelspec": { 208 | "display_name": "doctr-torch-notebooks", 209 | "language": "python", 210 | "name": "python3" 211 | }, 212 | "language_info": { 213 | "codemirror_mode": { 214 | "name": "ipython", 215 | "version": 3 216 | }, 217 | "file_extension": ".py", 218 | "mimetype": "text/x-python", 219 | "name": "python", 220 | "nbconvert_exporter": "python", 221 | "pygments_lexer": "ipython3", 222 | "version": "3.10.15" 223 | } 224 | }, 225 | "nbformat": 4, 226 | "nbformat_minor": 0 227 | } 228 | --------------------------------------------------------------------------------