├── document-understanding-solution.png ├── test_images ├── patient_intake_form_sample.jpg └── patient_intake_form_sample-checkpoint.json ├── environment.yml ├── .gitignore ├── launch_app.ipynb ├── LICENSE ├── README.md ├── end_to_end_app.py └── app.py /document-understanding-solution.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/machinelearnear/amazon-textract-workbench/main/document-understanding-solution.png -------------------------------------------------------------------------------- /test_images/patient_intake_form_sample.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/machinelearnear/amazon-textract-workbench/main/test_images/patient_intake_form_sample.jpg -------------------------------------------------------------------------------- /test_images/patient_intake_form_sample-checkpoint.json: -------------------------------------------------------------------------------- 1 | [{"ID_0_START_STR": "Sample text", "ID_0_END_STR": "", "ID_0_PREFIX": "PREFIX_0"}, {"ID_1_START_STR": "Sample text", "ID_1_END_STR": "", "ID_1_PREFIX": "PREFIX_1"}, {"ID_2_START_STR": "Sample text", "ID_2_END_STR": "adasdasd", "ID_2_PREFIX": "PREFIX_2"}, {"ID_3_START_STR": "Sample text", "ID_3_END_STR": "asdasd", "ID_3_PREFIX": "PREFIX_3"}] -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | # To use: 2 | # $ conda env create -f environment.yml 3 | # $ conda activate 4 | name: machinelearnear-textract 5 | dependencies: 6 | - python=3.7 # problems with py39 7 | - pip 8 | - nb_conda_kernels 9 | - ipykernel 10 | - ipywidgets 11 | - gh 12 | - opencv 13 | - poppler 14 | - pip: 15 | - streamlit 16 | - gradio 17 | - boto3 18 | - awscli 19 | - sagemaker 20 | - amazon-textract-helper 21 | - amazon-textract-geofinder 22 | - amazon-textract-caller 23 | - amazon-textract-response-parser 24 | - transformers 25 | - pandas 26 | - parse 27 | - tensorflow==2.5.0 28 | - torch 29 | - torchvision 30 | - scipy 31 | - streamlit_image_comparison 32 | - thop 33 | - einops 34 | - timm 35 | - tabulate 36 | - pdf2image 37 | - spacy 38 | - spacy_streamlit -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | dependencies/ 2 | temp/ 3 | 4 | # Byte-compiled / optimized / DLL files 5 | __pycache__/ 6 | *.py[cod] 7 | *$py.class 8 | 9 | # C extensions 10 | *.so 11 | 12 | # Distribution / packaging 13 | .Python 14 | build/ 15 | develop-eggs/ 16 | dist/ 17 | downloads/ 18 | eggs/ 19 | .eggs/ 20 | lib/ 21 | lib64/ 22 | parts/ 23 | sdist/ 24 | var/ 25 | wheels/ 26 | pip-wheel-metadata/ 27 | share/python-wheels/ 28 | *.egg-info/ 29 | .installed.cfg 30 | *.egg 31 | MANIFEST 32 | 33 | # PyInstaller 34 | # Usually these files are written by a python script from a template 35 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 36 | *.manifest 37 | *.spec 38 | 39 | # Installer logs 40 | pip-log.txt 41 | pip-delete-this-directory.txt 42 | 43 | # Unit test / coverage reports 44 | htmlcov/ 45 | .tox/ 46 | .nox/ 47 | .coverage 48 | .coverage.* 49 | .cache 50 | nosetests.xml 51 | coverage.xml 52 | *.cover 53 | *.py,cover 54 | .hypothesis/ 55 | .pytest_cache/ 56 | 57 | # Translations 58 | *.mo 59 | *.pot 60 | 61 | # Django stuff: 62 | *.log 63 | local_settings.py 64 | db.sqlite3 65 | db.sqlite3-journal 66 | 67 | # Flask stuff: 68 | instance/ 69 | .webassets-cache 70 | 71 | # Scrapy stuff: 72 | .scrapy 73 | 74 | # Sphinx documentation 75 | docs/_build/ 76 | 77 | # PyBuilder 78 | target/ 79 | 80 | # Jupyter Notebook 81 | .ipynb_checkpoints 82 | 83 | # IPython 84 | profile_default/ 85 | ipython_config.py 86 | 87 | # pyenv 88 | .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 98 | __pypackages__/ 99 | 100 | # Celery stuff 101 | celerybeat-schedule 102 | celerybeat.pid 103 | 104 | # SageMath parsed files 105 | *.sage.py 106 | 107 | # Environments 108 | .env 109 | .venv 110 | env/ 111 | venv/ 112 | ENV/ 113 | env.bak/ 114 | venv.bak/ 115 | 116 | # Spyder project settings 117 | .spyderproject 118 | .spyproject 119 | 120 | # Rope project settings 121 | .ropeproject 122 | 123 | # mkdocs documentation 124 | /site 125 | 126 | # mypy 127 | .mypy_cache/ 128 | .dmypy.json 129 | dmypy.json 130 | 131 | # Pyre type checker 132 | .pyre/ 133 | -------------------------------------------------------------------------------- /launch_app.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "261a11ab-2952-4901-a5ee-99a090dc13ed", 6 | "metadata": { 7 | "tags": [] 8 | }, 9 | "source": [ 10 | "# Amazon Textract Workbench Demo" 11 | ] 12 | }, 13 | { 14 | "cell_type": "markdown", 15 | "id": "4568332b-7641-4fe6-a1ee-b7b41587b62a", 16 | "metadata": { 17 | "tags": [] 18 | }, 19 | "source": [ 20 | "## Configure session" 21 | ] 22 | }, 23 | { 24 | "cell_type": "code", 25 | "execution_count": 11, 26 | "id": "f4f42126-3a34-4aef-bf71-29aee7243c32", 27 | "metadata": {}, 28 | "outputs": [], 29 | "source": [ 30 | "domain = 'luzdatd5d7fmka0'\n", 31 | "region = 'us-east-2'" 32 | ] 33 | }, 34 | { 35 | "cell_type": "code", 36 | "execution_count": 12, 37 | "id": "2142d894-5ea6-47f7-bc1f-a4a77db71c9d", 38 | "metadata": {}, 39 | "outputs": [], 40 | "source": [ 41 | "fname = 'app.py'\n", 42 | "# fname = 'end_to_end_app.py'" 43 | ] 44 | }, 45 | { 46 | "cell_type": "markdown", 47 | "id": "092616ee-b36a-47e5-9ba1-26abb676b69a", 48 | "metadata": { 49 | "tags": [] 50 | }, 51 | "source": [ 52 | "## Launch demo" 53 | ] 54 | }, 55 | { 56 | "cell_type": "code", 57 | "execution_count": 13, 58 | "id": "e6bb77c0-c7e7-4df9-87d7-a7738f2b0f5f", 59 | "metadata": {}, 60 | "outputs": [], 61 | "source": [ 62 | "def launch_app(fname, domain, region):\n", 63 | " print('Wait a few seconds, then click the below to open your Streamlit app')\n", 64 | " print(f'https://{domain}.studio.{region}.sagemaker.aws/studiolab/default/jupyter/proxy/6006/')\n", 65 | " !streamlit run $app_to_launch --server.port 6006;" 66 | ] 67 | }, 68 | { 69 | "cell_type": "code", 70 | "execution_count": null, 71 | "id": "befe6cbd-a73f-4597-b1c1-a0e9e8406f5b", 72 | "metadata": {}, 73 | "outputs": [ 74 | { 75 | "name": "stdout", 76 | "output_type": "stream", 77 | "text": [ 78 | "Wait a few seconds, then click the below to open your Streamlit app\n", 79 | "https://luzdatd5d7fmka0.studio.us-east-2.sagemaker.aws/studiolab/default/jupyter/proxy/6006/\n", 80 | "\u001b[0m\n", 81 | "\u001b[34m\u001b[1m You can now view your Streamlit app in your browser.\u001b[0m\n", 82 | "\u001b[0m\n", 83 | "\u001b[34m Network URL: \u001b[0m\u001b[1mhttp://169.254.255.2:6006\u001b[0m\n", 84 | "\u001b[34m External URL: \u001b[0m\u001b[1mhttp://3.132.111.50:6006\u001b[0m\n", 85 | "\u001b[0m\n", 86 | "2022-05-05 18:12:56.151174: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcudart.so.11.0'; dlerror: libcudart.so.11.0: cannot open shared object file: No such file or directory\n", 87 | "2022-05-05 18:12:56.151212: I tensorflow/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU set up on your machine.\n" 88 | ] 89 | } 90 | ], 91 | "source": [ 92 | "launch_app(fname, domain, region)" 93 | ] 94 | }, 95 | { 96 | "cell_type": "code", 97 | "execution_count": null, 98 | "id": "c3fdfa2e-578d-41d4-aefd-9e150136bcda", 99 | "metadata": {}, 100 | "outputs": [], 101 | "source": [] 102 | } 103 | ], 104 | "metadata": { 105 | "kernelspec": { 106 | "display_name": "machinelearnear-textract:Python", 107 | "language": "python", 108 | "name": "conda-env-machinelearnear-textract-py" 109 | }, 110 | "language_info": { 111 | "codemirror_mode": { 112 | "name": "ipython", 113 | "version": 3 114 | }, 115 | "file_extension": ".py", 116 | "mimetype": "text/x-python", 117 | "name": "python", 118 | "nbconvert_exporter": "python", 119 | "pygments_lexer": "ipython3", 120 | "version": "3.7.12" 121 | } 122 | }, 123 | "nbformat": 4, 124 | "nbformat_minor": 5 125 | } 126 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Amazon Textract Workbench 2 | 3 | Quote from [the official website](https://aws.amazon.com/textract): 4 | 5 | > Amazon Textract is a machine learning (ML) service that automatically extracts text, handwriting, and data from scanned documents. It goes beyond simple optical character recognition (OCR) to identify, understand, and extract data from forms and tables. Today, many companies manually extract data from scanned documents such as PDFs, images, tables, and forms, or through simple OCR software that requires manual configuration (which often must be updated when the form changes). To overcome these manual and expensive processes, Textract uses ML to read and process any type of document, accurately extracting text, handwriting, tables, and other data with no manual effort. You can quickly automate document processing and act on the information extracted, whether you’re automating loans processing or extracting information from invoices and receipts. Textract can extract the data in minutes instead of hours or days. Additionally, you can add human reviews with Amazon Augmented AI to provide oversight of your models and check sensitive data. 6 | 7 | This repo is aimed at giving you a place to experiment with the tooling and will show you a step by step tutorial on how to take advantage of the geometric context detected in an image to make the tagging of key and value pairs easier and more accurate with [Amazon Textract](https://aws.amazon.com/textract/). We are running this application on top of [SageMaker Studio Lab](https://studiolab.sagemaker.aws/) combining multiple libraries such as [Hugging Face](https://huggingface.co/), [SpaCy](https://spacy.io/), and [Textractor](https://github.com/aws-samples/amazon-textract-textractor). We will also make use of the recently launched ["Queries"](https://aws.amazon.com/blogs/machine-learning/specify-and-extract-information-from-documents-using-the-new-queries-feature-in-amazon-textract/) functionality in Textract. 8 | 9 | https://user-images.githubusercontent.com/78419164/167004138-8129b1a3-9362-4adc-a017-ec4f08ee6842.mov 10 | 11 | ## Getting started 12 | - [SageMaker StudioLab Explainer Video](https://www.youtube.com/watch?v=FUEIwAsrMP4) 13 | - [How to access AWS resources from Studiolab](https://github.com/aws/studio-lab-examples/blob/main/connect-to-aws/Access_AWS_from_Studio_Lab.ipynb) 14 | - [Amazon Textract Developer Guide](https://docs.aws.amazon.com/textract/latest/dg/API_Operations.html) 15 | - [Amazon Textract Reponse Parser](https://github.com/aws-samples/amazon-textract-response-parser/tree/master/src-python) 16 | - [Amazon Textractor Library](https://github.com/aws-samples/amazon-textract-textractor) 17 | - [Intelligent Document Processing (IDP) Workshop by AWS](https://catalog.us-east-1.prod.workshops.aws/workshops/c2af04b2-54ab-4b3d-be73-c7dd39074b20/en-US/) 18 | - [Streamlit - "A faster way to build and share data apps"](https://streamlit.io/) 19 | 20 | ## Requirements 21 | - [SageMaker Studio Lab](https://studiolab.sagemaker.aws/) account. See this [explainer video](https://www.youtube.com/watch?v=FUEIwAsrMP4) to learn more about this. 22 | - `Python==3.7` 23 | - `Streamlit` 24 | - `TensorFlow==2.5.0` 25 | - `PyTorch>=1.10` 26 | - `Hugging Face Transformers` 27 | - Other libraries (see `environment.yml`) 28 | 29 | ## Step by step tutorial 30 | 31 | ### Clone repo, install dependencies, and launch your app 32 | 33 | Follow the steps shown in `launch_app.ipynb` [![Open In SageMaker Studio Lab](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/machinelearnear/amazon-textract-workbench/blob/main/launch_app.ipynb) Click on `Copy to project` in the top right corner. This will open the Studio Lab web interface and ask you whether you want to clone the entire repo or just the Notebook. Clone the entire repo and click `Yes` when asked about building the `Conda` environment automatically. You will now be running on top of a `Python` environment with `Streamlit` and `Gradio` already installed along with other libraries. 34 | 35 | Your Streamlit app will be running on `f'https://{studiolab_domain}.studio.{studiolab_region}.sagemaker.aws/studiolab/default/jupyter/proxy/6006/'` 36 | 37 | ### Pre-process image and compare modified vs original 38 | 39 | This is example code to implement [SauvolaNet](https://github.com/Leedeng/SauvolaNet), an end-to-end document binarization solution: 40 | 41 | ```python 42 | from os.path import exists as path_exists 43 | path_repo_sauvolanet = 'dependencies/SauvolaNet' 44 | if not path_exists(path_repo_sauvolanet): 45 | os.system(f'git clone https://github.com/Leedeng/SauvolaNet.git {path_repo_sauvolanet}') 46 | sys.path.append(f'{path_repo_sauvolanet}/SauvolaDocBin/') 47 | pd.set_option('display.float_format','{:.4f}'.format) 48 | from dataUtils import collect_binarization_by_dataset, DataGenerator 49 | from testUtils import prepare_inference, find_best_model 50 | from layerUtils import * 51 | from metrics import * 52 | 53 | @st.experimental_singleton 54 | def sauvolanet_load_model(model_root = f'{path_repo_sauvolanet}/pretrained_models/'): 55 | for this in os.listdir(model_root) : 56 | if this.endswith('.h5') : 57 | model_filepath = os.path.join(model_root, this) 58 | model = prepare_inference(model_filepath) 59 | print(model_filepath) 60 | return model 61 | 62 | def sauvolanet_read_decode_image(model, im): 63 | rgb = np.array(im) 64 | gray = cv2.cvtColor(rgb, cv2.COLOR_RGB2GRAY) 65 | x = gray.astype('float32')[None, ..., None]/255. 66 | pred = model.predict(x) 67 | return Image.fromarray(pred[0,...,0] > 0) 68 | 69 | ... 70 | 71 | with st.spinner(): 72 | sauvolanet_model = sauvolanet_load_model() 73 | modified_image = sauvolanet_read_decode_image(sauvolanet_model,input_image) 74 | st.success('Done!') 75 | ``` 76 | 77 | Finally we use [`streamlit-image-comparison`](https://github.com/fcakyon/streamlit-image-comparison) to display both images (modified, original) next to each other. 78 | 79 | ```python 80 | with st.expander("See modified image"): 81 | image_comparison( 82 | img1=input_image, img2=modified_image, 83 | label1='Original', label2='Modified', 84 | ) 85 | ``` 86 | 87 | Here's a demo: 88 | 89 | https://user-images.githubusercontent.com/78419164/166663497-79cdda76-93b3-43b8-b9c4-ed00f438cd25.mov 90 | 91 | ### Make API request to Textract with `Queries` 92 | 93 | Recently, Amazon released a new functionality in Textract called "Queries". You can think of it as [VQA](https://paperswithcode.com/task/visual-question-answering) where you can ask questions to your scanned documents and based on image and language context you will get the most likely response. You can see the [official documentation here](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/textract.html#Textract.Client.analyze_document) and a [sample Notebook here](https://github.com/aws-samples/amazon-textract-code-samples/blob/master/python/queries/paystub.ipynb). 94 | 95 | Here's some sample code: 96 | 97 | ```python 98 | # Call Textract AnalyzeDocument by passing a document from local disk 99 | response = textract.analyze_document( 100 | Document={'Bytes': imageBytes}, 101 | FeatureTypes=["QUERIES"], 102 | QueriesConfig={ 103 | "Queries": [{ 104 | "Text": "What is the year to date gross pay", 105 | "Alias": "PAYSTUB_YTD_GROSS" 106 | }, 107 | { 108 | "Text": "What is the current gross pay?", 109 | "Alias": "PAYSTUB_CURRENT_GROSS" 110 | }] 111 | }) 112 | ``` 113 | 114 | And this is how it looks on the Streamlit application that you will deploy with this repo: 115 | 116 | https://user-images.githubusercontent.com/78419164/166663542-8c8c82c6-41b5-4d07-9c71-3dfcd3573f2d.mov 117 | 118 | ### Use response with Amazon Comprehend, HuggingFace, SpaCy, etc. 119 | 120 | Example code to parse the output from Amazon Textract and use it with [Hugging Face](https://huggingface.co/) models under the task `summarization`. 121 | 122 | ```python 123 | def parse_response(response): 124 | from trp import Document 125 | doc = Document(response) 126 | text = '' 127 | for page in doc.pages: 128 | for line in page.lines: 129 | for word in line.words: 130 | text = text + word.text + ' ' 131 | return text.strip() 132 | 133 | @st.experimental_singleton 134 | def load_model_pipeline(task, model_name): 135 | return pipeline(task, model=model_name) 136 | 137 | ... 138 | 139 | with st.spinner('Downloading model weights and loading...'): 140 | pipe = load_model_pipeline(task="summarization", model_name=options) 141 | summary = pipe( 142 | parse_response(st.session_state.response), 143 | max_length=130, min_length=30, do_sample=False) 144 | 145 | with st.expander('View response'): 146 | st.write(summary) 147 | ``` 148 | 149 | ### (Additional) Extract Information Using "Geometric Context" and Amazon Textract 150 | 151 | Quote from [this repository](https://github.com/aws-samples/amazon-textract-textractor/tree/master/tpipelinegeofinder) by Martin Schade. 152 | 153 | > To find information in a document based on geometry with this library the main advantage over defining x,y coordinates where the expected value should be is the concept of an area. An area is ultimately defined by a box with `x_min`, `y_min`, `x_max`, `y_max` coordinates but can be defined by finding words/phrases in the document and then use to create the area. From there, functions to parse the information in the area help to extract the information. E. g. by defining the area based on the question like 'Did you feel fever or feverish lately?' we can associate the answers to it and create a new key/value pair specific to this question. 154 | 155 | You can find a notebook sample with a step by step tutorial under `notebook_samples/extract-info-geometric-context.ipynb`. 156 | 157 | ## Next steps: How to do large scale document processing with Amazon Textract 158 | 159 | - [Using SNS and SQS to handle concurrency at large scale](https://github.com/aws-samples/amazon-textract-serverless-large-scale-document-processing) 160 | - [Using CDK and Step Functions](https://github.com/aws-samples/amazon-textract-transformer-pipeline) 161 | - [Reference architecture: "Document Understanding Solution"](https://aws.amazon.com/solutions/implementations/document-understanding-solution/) 162 | - [Serverless Inferece by Amazon](https://docs.aws.amazon.com/sagemaker/latest/dg/serverless-endpoints.html) 163 | - [Serverless Inference with Hugging Face's Transformers, DistilBERT and Amazon SageMaker](https://www.philschmid.de/sagemaker-serverless-huggingface-distilbert) 164 | 165 | > The Document Understanding Solution (DUS) delivers an easy-to-use web application that ingests and analyzes files, extracts text from documents, identifies structural data (tables, key value pairs), extracts critical information (entities), and creates smart search indexes from the data. Additionally, files can be uploaded directly to and analyzed files can be accessed from an Amazon Simple Storage Service (Amazon S3) bucket in your AWS account. 166 | 167 | > This solution uses AWS artificial intelligence (AI) services that address business problems that apply to various industry verticals: 168 | > - Search and discovery: Search for information across multiple scanned documents, PDFs, and images 169 | > - Compliance: Redact information from documents 170 | > - Workflow automation: Easily plugs into your existing upstream and downstream applications 171 | 172 | ![Document Understanding Solution Architecture](document-understanding-solution.png) 173 | 174 | Additionally, serverless endpoints are a great way to create microservices that can be easily used within your document processing pipelines. From [this blog post](https://www.philschmid.de/sagemaker-serverless-huggingface-distilbert) by Philipp Schmid. 175 | 176 | > Amazon SageMaker Serverless Inference is a new capability in SageMaker that enables you to deploy and scale ML models in a Serverless fashion. Serverless endpoints automatically launch compute resources and scale them in and out depending on traffic similar to AWS Lambda. Serverless Inference is ideal for workloads which have idle periods between traffic spurts and can tolerate cold starts. With a pay-per-use model, Serverless Inference is a cost-effective option if you have an infrequent or unpredictable traffic pattern. 177 | 178 | ## Additional resources 179 | 180 | - [Amazon Textract code samples](https://github.com/aws-samples/amazon-textract-code-samples) 181 | - [How to extract information by using document geometry & Amazon Textract](https://github.com/machinelearnear/extract-info-by-doc-geometry-aws-textract) 182 | - [Textractor GeoFinder Sample Notebook](https://github.com/aws-samples/amazon-textract-textractor/blob/master/tpipelinegeofinder/geofinder-sample-notebook.ipynb) 183 | - [Post Processing with Amazon Textract: Multi-Page Table Handling](https://github.com/aws-samples/amazon-textract-multipage-tables-processing) 184 | - [Paragraph identification](https://github.com/aws-samples/textract-paragraph-identification) 185 | - [Specify and extract information from documents using the new Queries feature in Amazon Textract](https://aws.amazon.com/blogs/machine-learning/specify-and-extract-information-from-documents-using-the-new-queries-feature-in-amazon-textract/) 186 | - [Sample notebook showing use of new "Queries" feature](https://github.com/aws-samples/amazon-textract-code-samples/blob/master/python/queries/paystub.ipynb) 187 | - [Accelerating Topic modeling with RAPIDS and BERT models](https://medium.com/rapids-ai/accelerating-topic-modeling-with-rapids-and-bert-models-be9909eeed2) 188 | - [BERTopic](https://github.com/MaartenGr/BERTopic) 189 | - [Annotated Text Component for Streamlit](https://github.com/tvst/st-annotated-text) 190 | 191 | ## References 192 | 193 | ```bibtex 194 | @INPROCEEDINGS{9506664, 195 | author={Li, Deng and Wu, Yue and Zhou, Yicong}, 196 | booktitle={The 16th International Conference on Document Analysis and Recognition (ICDAR)}, 197 | title={SauvolaNet: Learning Adaptive Sauvola Network for Degraded Document Binarization}, 198 | year={2021}, 199 | volume={}, 200 | number={}, 201 | pages={538–553}, 202 | doi={https://doi.org/10.1007/978-3-030-86337-1_36}} 203 | 204 | @article{zhang2022practical, 205 | title={Practical Blind Denoising via Swin-Conv-UNet and Data Synthesis}, 206 | author={Zhang, Kai and Li, Yawei and Liang, Jingyun and Cao, Jiezhang and Zhang, Yulun and Tang, Hao and Timofte, Radu and Van Gool, Luc}, 207 | journal={arXiv preprint}, 208 | year={2022} 209 | } 210 | ``` 211 | 212 | ## Disclaimer 213 | - The content provided in this repository is for demonstration purposes and not meant for production. You should use your own discretion when using the content. 214 | - The ideas and opinions outlined in these examples are my own and do not represent the opinions of AWS. 215 | -------------------------------------------------------------------------------- /end_to_end_app.py: -------------------------------------------------------------------------------- 1 | # Amazon Textract Workbench 2 | # Author: https://github.com/machinelearnear 3 | 4 | # import dependencies 5 | # ----------------------------------------------------------- 6 | import pandas as pd 7 | import streamlit as st 8 | import boto3 9 | import io 10 | import json 11 | import re 12 | import os 13 | os.makedirs("dependencies",exist_ok=True) 14 | import sys 15 | import cv2 16 | import numpy as np 17 | import tensorflow as tf 18 | 19 | from transformers import pipeline 20 | from matplotlib import pyplot 21 | from pathlib import Path 22 | from PIL import Image 23 | from typing import List, Optional 24 | from tabulate import tabulate 25 | from pdf2image import convert_from_path, convert_from_bytes 26 | from streamlit_image_comparison import image_comparison 27 | 28 | from textractgeofinder.ocrdb import AreaSelection 29 | from textractgeofinder.tgeofinder import KeyValue, TGeoFinder, AreaSelection, SelectionElement 30 | from textractcaller.t_call import Textract_Features, Textract_Types, call_textract 31 | from textractprettyprinter.t_pretty_print import Pretty_Print_Table_Format, Textract_Pretty_Print, get_string, get_forms_string 32 | from textractoverlayer.t_overlay import DocumentDimensions, get_bounding_boxes 33 | 34 | import trp.trp2 as t2 35 | 36 | # PRE-PROCESSING (1) 37 | # ----------------------------------------------------------- 38 | # Source: https://github.com/Leedeng/SauvolaNet 39 | # ----------------------------------------------------------- 40 | from os.path import exists as path_exists 41 | path_repo_sauvolanet = 'dependencies/SauvolaNet' 42 | if not path_exists(path_repo_sauvolanet): 43 | os.system(f'git clone https://github.com/Leedeng/SauvolaNet.git {path_repo_sauvolanet}') 44 | sys.path.append(f'{path_repo_sauvolanet}/SauvolaDocBin/') 45 | pd.set_option('display.float_format','{:.4f}'.format) 46 | from dataUtils import collect_binarization_by_dataset, DataGenerator 47 | from testUtils import prepare_inference, find_best_model 48 | from layerUtils import * 49 | from metrics import * 50 | 51 | @st.experimental_singleton 52 | def sauvolanet_load_model(model_root = f'{path_repo_sauvolanet}/pretrained_models/'): 53 | for this in os.listdir(model_root) : 54 | if this.endswith('.h5') : 55 | model_filepath = os.path.join(model_root, this) 56 | model = prepare_inference(model_filepath) 57 | print(model_filepath) 58 | return model 59 | 60 | def sauvolanet_read_decode_image(model,im): 61 | rgb = np.array(im) 62 | gray = cv2.cvtColor(rgb, cv2.COLOR_RGB2GRAY) 63 | x = gray.astype('float32')[None, ..., None]/255. 64 | pred = model.predict(x) 65 | return Image.fromarray(pred[0,...,0] > 0) 66 | 67 | # PRE-PROCESSING (2) 68 | # ----------------------------------------------------------- 69 | # Source: Kai Zhang (e-mail: cskaizhang@gmail.com; github: https://github.com/cszn) 70 | # by Kai Zhang (2021/05-2021/11) 71 | # ----------------------------------------------------------- 72 | 73 | path_repo_SCUNet = 'dependencies/SCUNet' 74 | if not path_exists(path_repo_SCUNet): 75 | os.system(f'git clone https://github.com/cszn/SCUNet.git {path_repo_SCUNet}') 76 | os.system(f'wget https://github.com/cszn/KAIR/releases/download/v1.0/scunet_color_real_psnr.pth -P {path_repo_SCUNet}/model_zoo') 77 | from datetime import datetime 78 | from collections import OrderedDict 79 | import torch 80 | sys.path.append(f'{path_repo_SCUNet}') 81 | from utils import utils_model 82 | from utils import utils_image as util 83 | 84 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 85 | n_channels = 3 86 | 87 | @st.experimental_singleton 88 | def scunet_load_model(model_path=f'{path_repo_SCUNet}/model_zoo/scunet_color_real_psnr.pth'): 89 | from models.network_scunet import SCUNet as net 90 | model = net(in_nc=n_channels,config=[4,4,4,4,4,4,4],dim=64) 91 | model.load_state_dict(torch.load(model_path), strict=True) 92 | model.eval() 93 | for k, v in model.named_parameters(): 94 | v.requires_grad = False 95 | 96 | return model.to(device) 97 | 98 | def scunet_inference(model,img): 99 | # ------------------------------------ 100 | # (1) img_L 101 | # ------------------------------------ 102 | img_L = np.asarray(img) 103 | if img_L.ndim == 2: 104 | img_L = cv2.cvtColor(img_L, cv2.COLOR_GRAY2RGB) # GGG 105 | else: 106 | img_L = cv2.cvtColor(img_L, cv2.COLOR_BGR2RGB) # RGB 107 | 108 | img_L = util.uint2single(img_L) 109 | img_L = util.single2tensor4(img_L) 110 | img_L = img_L.to(device) 111 | 112 | # ------------------------------------ 113 | # (2) img_E 114 | # ------------------------------------ 115 | img_E = model(img_L) 116 | img_E = util.tensor2uint(img_E) 117 | 118 | return Image.fromarray(img_E) 119 | 120 | # helper funcs 121 | # ----------------------------------------------------------- 122 | def set_hierarchy_kv(list_kv, t_document: t2.TDocument, page_block: t2.TBlock, prefix="BORROWER"): 123 | """ 124 | function to add "virtual" keys which we use to indicate context 125 | """ 126 | for x in list_kv: 127 | t_document.add_virtual_key_for_existing_key(key_name=f"{prefix}_{x.key.text}", 128 | existing_key=t_document.get_block_by_id(x.key.id), 129 | page_block=page_block) 130 | 131 | def add_sel_elements(t_document: t2.TDocument, selection_values, key_base_name: str, 132 | page_block: t2.TBlock) -> t2.TDocument: 133 | """ 134 | Function that makes it easier to add selection elements to the Amazon Textract Response JSON schema 135 | """ 136 | for sel_element in selection_values: 137 | sel_key_string = "_".join([s_key.original_text.upper() for s_key in sel_element.key if s_key.original_text]) 138 | if sel_key_string: 139 | if sel_element.selection.original_text: 140 | t_document.add_virtual_key_for_existing_key(page_block=page_block, 141 | key_name=f"{key_base_name}->{sel_key_string}", 142 | existing_key=t_document.get_block_by_id( 143 | sel_element.key[0].id)) 144 | return t_document 145 | 146 | def tag_kv_pairs_to_text(textract_json, list_of_items): 147 | t_document, geofinder_doc = geofinder(textract_json) 148 | for item in list_of_items: 149 | start = geofinder_doc.find_phrase_on_page(item['first_str'])[0] 150 | if len(item) == 3: 151 | end = geofinder_doc.find_phrase_on_page(item['last_str'], min_textdistance=0.99)[0] 152 | top_left = t2.TPoint(y=start.ymax, x=0) 153 | lower_right = t2.TPoint(y=end.ymin, x=doc_width) 154 | form_fields = geofinder_doc.get_form_fields_in_area( 155 | area_selection=AreaSelection(top_left=top_left, lower_right=lower_right, page_number=1)) 156 | set_hierarchy_kv( 157 | list_kv=form_fields, t_document=t_document, prefix=item['prefix'], page_block=t_document.pages[0]) 158 | else: 159 | top_left = t2.TPoint(y=start.ymin - 50, x=0) 160 | lower_right = t2.TPoint(y=start.ymax + 50, x=doc_width) 161 | sel_values: list[SelectionElement] = geofinder_doc.get_selection_values_in_area(area_selection=AreaSelection( 162 | top_left=top_left, lower_right=lower_right, page_number=1), 163 | exclude_ids=[]) 164 | t_document = add_sel_elements(t_document=t_document, 165 | selection_values=sel_values, 166 | key_base_name=item['prefix'], 167 | page_block=t_document.pages[0]) 168 | 169 | return t_document 170 | 171 | def geofinder(textract_json, doc_height=1000, doc_width=1000): 172 | j = textract_json.copy() 173 | t_document = t2.TDocumentSchema().load(j) 174 | geofinder_doc = TGeoFinder(j, doc_height=doc_height, doc_width=doc_width) 175 | 176 | return t_document, geofinder_doc 177 | 178 | def image_to_byte_array(image:Image): 179 | imgByteArr = io.BytesIO() 180 | image.save(imgByteArr, format=image.format) 181 | imgByteArr = imgByteArr.getvalue() 182 | 183 | return imgByteArr 184 | 185 | def t_json_to_t_df(t_json): 186 | # convert t_json > string > csv 187 | t_str = get_string( 188 | textract_json=t_json, 189 | table_format=Pretty_Print_Table_Format.csv, 190 | output_type=[Textract_Pretty_Print.FORMS], 191 | ) 192 | 193 | return pd.read_csv(io.StringIO(t_str), sep=",") 194 | 195 | @st.cache 196 | def convert_pandas(df): 197 | # IMPORTANT: Cache the conversion to prevent computation on every rerun 198 | return df.to_csv().encode('utf-8') 199 | 200 | @st.experimental_singleton 201 | def start_textract_client(credentials): 202 | return boto3.client( 203 | 'textract', 204 | aws_access_key_id=credentials['Access key ID'].values[0], 205 | aws_secret_access_key=credentials['Secret access key'].values[0], 206 | region_name='us-east-2', 207 | ) 208 | 209 | @st.experimental_singleton 210 | def start_comprehend_client(credentials): 211 | return boto3.client( 212 | 'comprehend', 213 | aws_access_key_id=credentials['Access key ID'].values[0], 214 | aws_secret_access_key=credentials['Secret access key'].values[0], 215 | region_name='us-east-2', 216 | ) 217 | 218 | def return_fnames(folder, extensions={'.png','.jpg','.jpeg'}): 219 | f = (p for p in Path(folder).glob("**/*") if p.suffix in extensions) 220 | return [x for x in f if 'ipynb_checkpoints' not in str(x)] 221 | 222 | @st.cache 223 | def return_anno_file(folder, image_fname): 224 | files = list(sorted([x for x in Path(folder).rglob('*.json')])) 225 | selected = [x for x in files if 226 | Path(image_fname).stem in str(x) and 'ipynb_checkpoints' not in str(x)] 227 | return selected[0] 228 | 229 | def get_filename_from_cd(cd): 230 | """ 231 | Get filename from content-disposition 232 | """ 233 | if not cd: 234 | return None 235 | fname = re.findall('filename=(.+)', cd) 236 | if len(fname) == 0: 237 | return None 238 | return fname[0] 239 | 240 | def add_item_to_input_queries_list(text, alias='', verbose=False): 241 | text = re.sub('[^\w\s]','', text) 242 | if len(alias)==0: 243 | alias = text.replace(' ','_').upper() 244 | 245 | if not any(text in x['Text'] for x in st.session_state.input_queries): 246 | st.session_state.input_queries.append( 247 | { 248 | "Text": text, 249 | "Alias": alias 250 | } 251 | ) 252 | if verbose: st.success('Added') 253 | else: 254 | if verbose: st.warning('Already exists') 255 | 256 | def remove_item(): 257 | del st.session_state.input_queries[st.session_state.index] 258 | 259 | def clear_all_items(): 260 | st.session_state.input_queries = [] 261 | st.session_state.index = 0 262 | 263 | def parse_response(response): 264 | from trp import Document 265 | doc = Document(response) 266 | text = '' 267 | for page in doc.pages: 268 | for line in page.lines: 269 | for word in line.words: 270 | text = text + word.text + ' ' 271 | return text.strip() 272 | 273 | @st.experimental_singleton 274 | def load_model_pipeline(task, model_name): 275 | return pipeline(task, model=model_name) 276 | 277 | # streamlit app 278 | # ----------------------------------------------------------- 279 | st.set_page_config( 280 | page_title='Textract Workbench', 281 | page_icon=":open_book:", 282 | layout="centered", 283 | initial_sidebar_state="auto", 284 | menu_items=None) 285 | 286 | def main(): 287 | # intro and sidebar 288 | ####################### 289 | st.title('Amazon Textract Pipeline v0.1') 290 | st.markdown(''' 291 | This repo is aimed at giving you a place to experiment with the tooling and 292 | will show you a step by step tutorial on how to take advantage of the geometric 293 | context detected in an image to make the tagging of key and value pairs easier 294 | and more accurate with [Amazon Textract](https://aws.amazon.com/textract/). We are 295 | running this application on top of [SageMaker Studio Lab](https://studiolab.sagemaker.aws/) 296 | combining multiple libraries such as [Hugging Face](https://huggingface.co/), 297 | [SpaCy](https://spacy.io/), and [Textractor](https://github.com/aws-samples/amazon-textract-textractor). 298 | We will also make use of the recently launched 299 | ["Queries"](https://aws.amazon.com/blogs/machine-learning/specify-and-extract-information-from-documents-using-the-new-queries-feature-in-amazon-textract/) 300 | functionality in Textract. 301 | ''') 302 | 303 | with st.sidebar: 304 | # about 305 | st.subheader('About this demo') 306 | st.markdown(''' 307 | Author: Nicolás Metallo (metallo@amazon.com) 308 | ''') 309 | st.markdown(f'This web app is running on `{device}`') 310 | 311 | # connect AWS credentials 312 | with st.expander('Connect your AWS credentials'): 313 | st.markdown('Required to use Amazon Textract. No data is stored locally, only streamed to memory.') 314 | credentials = pd.DataFrame() 315 | uploaded_file = st.file_uploader("Upload your csv file", type=['csv'], key='uploaded_file_credentials') 316 | 317 | if uploaded_file: 318 | credentials = pd.read_csv(io.StringIO(uploaded_file.read().decode('utf-8'))) 319 | 320 | if not credentials.empty: 321 | textract_client = start_textract_client(credentials) 322 | comprehend_client = start_comprehend_client(credentials) 323 | st.success('AWS credentials are loaded.') 324 | else: 325 | st.warning('AWS credentials are not loaded.') 326 | 327 | # (1) read input image 328 | ####################### 329 | st.header('(0) Configure environment') 330 | with st.form('config'): 331 | preprocessing = st.selectbox() 332 | textract_api = None 333 | 334 | 335 | 336 | st.header('(1) Load input document(s)') 337 | uploaded_file= st.file_uploader( 338 | "Choose document(s) to upload", key='uploaded_file', accept_multiple_files=True) 339 | if uploaded_file: 340 | if Path(uploaded_file.name).suffix != '.pdf': 341 | input_image = Image.open(io.BytesIO(uploaded_file.decode())) 342 | else: 343 | input_image = convert_from_bytes(uploaded_file.read(),fmt='png')[0] # only first page 344 | st.success('Image was successfully uploaded') 345 | 346 | if input_image: 347 | max_im_size = (1000,1000) 348 | input_image.thumbnail(max_im_size, Image.Resampling.LANCZOS) 349 | with st.expander("See input image"): 350 | st.image(input_image, use_column_width=True) 351 | st.info(f'The input image has been resized to fit within `{max_im_size}`') 352 | else: 353 | st.warning('There is no image loaded.') 354 | 355 | # image pre-processing 356 | ####################### 357 | st.subheader('(Optional) Image pre-processing') 358 | options_preproc = st.selectbox('Please choose any of the following options', 359 | ( 360 | 'No pre-processing', 361 | 'SauvolaNet: Learning Adaptive Sauvola Network for Degraded Document Binarization (ICDAR2021)', 362 | 'Practical Blind Denoising via Swin-Conv-UNet and Data Synthesis', 363 | 'Restormer: Efficient Transformer for High-Resolution Image Restoration (CVPR2022)', 364 | ) 365 | ) 366 | 367 | modified_image = None 368 | if input_image: 369 | if options_preproc == 'SauvolaNet: Learning Adaptive Sauvola Network for Degraded Document Binarization (ICDAR2021)': 370 | try: 371 | with st.spinner(): 372 | sauvolanet_model = sauvolanet_load_model() 373 | modified_image = sauvolanet_read_decode_image(sauvolanet_model,input_image) 374 | st.success('Done!') 375 | except Exception as e: 376 | st.error(e) 377 | elif options_preproc == 'Practical Blind Denoising via Swin-Conv-UNet and Data Synthesis': 378 | try: 379 | with st.spinner(): 380 | scunet_model = scunet_load_model() 381 | modified_image = scunet_inference(scunet_model,input_image) 382 | st.success('Done!') 383 | except Exception as e: 384 | st.error(e) 385 | elif options_preproc == 'Restormer: Efficient Transformer for High-Resolution Image Restoration (CVPR2022)': 386 | st.warning('Not implemented yet.') 387 | 388 | if modified_image: 389 | with st.expander("See modified image"): 390 | image_comparison( 391 | img1=input_image, img2=modified_image, 392 | label1='Original', label2='Modified', 393 | ) 394 | else: 395 | st.warning('There is no image loaded.') 396 | 397 | # (2) retrieve ocr preds 398 | ####################### 399 | st.header('(2) Amazon Textract') 400 | if not 'response' in st.session_state: 401 | st.session_state.response = None 402 | feature_types=[] 403 | cAA, cBA = st.columns(2) 404 | with cAA: 405 | options = st.selectbox( 406 | 'The following actions are supported:', 407 | ('DetectDocumentText','AnalyzeDocument','AnalyzeExpense','AnalyzeID'), 408 | help='Read more: https://docs.aws.amazon.com/textract/latest/dg/API_Operations.html') 409 | st.write(f'You selected: `{options}`') 410 | with cBA: 411 | if options == 'AnalyzeDocument': 412 | feature_types = st.multiselect( 413 | 'Select feature types for "AnalyzeDocument"', 414 | ['TABLES','FORMS','QUERIES'], 415 | help='Read more: https://docs.aws.amazon.com/textract/latest/dg/API_AnalyzeDocument.html') 416 | st.write(f'You selected: `{feature_types}`') 417 | 418 | if 'QUERIES' in feature_types: 419 | if 'input_queries' not in st.session_state: 420 | st.session_state.input_queries = [] 421 | if 'index' not in st.session_state: 422 | st.session_state.index = 0 423 | 424 | with st.expander('Would you like to upload your existing list of queries?'): 425 | uploaded_file = st.file_uploader("Choose file to upload", key='uploaded_file_queries') 426 | if uploaded_file: 427 | queries = io.StringIO(uploaded_file.getvalue().decode("utf-8")).read() 428 | queries = [x.strip().lower() for x in queries.split(',')] 429 | for x in queries: add_item_to_input_queries_list(x) 430 | st.success('List of queries was successfully uploaded') 431 | 432 | with st.expander('Input your new queries here'): 433 | cAB, cBB = st.columns([3,1]) 434 | with cAB: 435 | st.text_input('Input your new query', 436 | key='add_query_text', 437 | help='Input queries that Textract will use to extract the data that is most important to you.') 438 | with cBB: 439 | st.text_input('Alias (Optional)', 440 | key='add_query_alias') 441 | 442 | if st.button('+ Add query') and len(st.session_state.add_query_text)>0: 443 | input_query_text = st.session_state.add_query_text.strip() 444 | input_query_alias = st.session_state.add_query_alias 445 | add_item_to_input_queries_list(input_query_text, input_query_alias, verbose=True) 446 | 447 | if len(st.session_state.input_queries)==0: 448 | st.warning('No queries selected') 449 | else: 450 | with st.expander('Edit existing queries'): 451 | cAC, cBC = st.columns([3,1]) 452 | cAC.write(st.session_state.input_queries) 453 | cBC.number_input( 454 | 'Select entry number', 455 | min_value=0, 456 | max_value=len(st.session_state.input_queries)-1, 457 | key='index', 458 | ) 459 | if cBC.button('Remove item',on_click=remove_item): 460 | st.success('Deleted!') 461 | if cBC.button('Clear all',on_click=clear_all_items): 462 | if len(st.session_state.input_queries)==0: 463 | st.success('Cleared!') 464 | else: 465 | cBC.warning('Delete the uploaded file to clear all') 466 | 467 | st.subheader('Run and review response') 468 | if input_image: 469 | if not credentials.empty: 470 | aa, bb = st.columns([1,5]) 471 | placeholder = aa.empty() 472 | if aa.button('✍ Submit'): 473 | st.session_state.response = None 474 | if options == 'AnalyzeDocument' and feature_types: 475 | if 'QUERIES' in feature_types: 476 | if len(st.session_state.input_queries)==0: 477 | st.error('Please add queries before you submit your request with QUERIES') 478 | else: 479 | response = textract_client.analyze_document( 480 | Document = { 481 | 'Bytes': image_to_byte_array(input_image), 482 | }, 483 | FeatureTypes = feature_types, 484 | QueriesConfig={ 485 | 'Queries': st.session_state.input_queries[:15] # max queries per page: 15 486 | } 487 | ) 488 | else: 489 | response = textract_client.analyze_document( 490 | Document = { 491 | 'Bytes': image_to_byte_array(input_image), 492 | }, 493 | FeatureTypes = feature_types, 494 | ) 495 | elif options == 'AnalyzeExpense': 496 | response = textract_client.analyze_expense( 497 | Document={ 498 | 'Bytes': image_to_byte_array(input_image), 499 | } 500 | ) 501 | elif options == 'AnalyzeID': 502 | response = textract_client.analyze_id( 503 | DocumentPages=[ 504 | { 505 | 'Bytes': image_to_byte_array(input_image), 506 | }, 507 | ] 508 | ) 509 | else: 510 | response = textract_client.detect_document_text( 511 | Document={ 512 | 'Bytes': image_to_byte_array(input_image), 513 | } 514 | ) 515 | 516 | if response: 517 | placeholder.success('Finished!') 518 | with bb.expander('View response'): 519 | st.markdown('**RAW TEXT**') 520 | output_text = parse_response(response) 521 | st.write(output_text) 522 | st.markdown('**JSON**') 523 | st.write(response) 524 | 525 | if feature_types: 526 | with bb.expander('View response from AnalyzeDocument'): 527 | if 'QUERIES' in feature_types: 528 | st.markdown('**QUERIES**') 529 | d = t2.TDocumentSchema().load(response) 530 | page = d.pages[0] 531 | query_answers = d.get_query_answers(page=page) 532 | queries_df = pd.DataFrame( 533 | query_answers, 534 | columns=['Text','Alias','Value']) 535 | st.dataframe(queries_df) 536 | if 'FORMS' in feature_types: 537 | st.markdown('**FORMS**') 538 | forms = get_string( 539 | textract_json=response, 540 | table_format=Pretty_Print_Table_Format.csv, 541 | output_type=[Textract_Pretty_Print.FORMS], 542 | ) 543 | forms_df = pd.read_csv(io.StringIO(forms),sep=",") 544 | st.dataframe(forms_df) 545 | if 'TABLES' in feature_types: 546 | st.markdown('**TABLES**') 547 | tables = get_string( 548 | textract_json=response, 549 | table_format=Pretty_Print_Table_Format.csv, 550 | output_type=[Textract_Pretty_Print.TABLES], 551 | ) 552 | tables_df = pd.read_csv(io.StringIO(tables),sep=",") 553 | st.dataframe(tables_df) 554 | 555 | if options == 'AnalyzeExpense': 556 | with st.expander('View response from AnalyzeExpense'): 557 | pass 558 | 559 | if options == 'AnalyzeID': 560 | with st.expander('View response from AnalyzeID'): 561 | pass 562 | 563 | st.session_state.response = response 564 | elif not st.session_state.response: 565 | st.warning('No response generated') 566 | else: 567 | st.warning('AWS credentials are not loaded.') 568 | else: 569 | st.warning('There is no image loaded.') 570 | 571 | # expand with Amazon Comprehend and Hugging Face 572 | ####################### 573 | st.header('(3) Amazon Comprehend') 574 | st.write('Read more here: https://docs.aws.amazon.com/comprehend/latest/dg/what-is.html') 575 | if input_image: 576 | if st.session_state.response: 577 | options = st.selectbox( 578 | 'Please select any of the following models implemented on Hugging Face', 579 | ['Not selected','Entity recognition','Detect PII','Topic Modeling'], 580 | help='https://docs.aws.amazon.com/comprehend/latest/dg/get-started-api.html', 581 | ) 582 | if options == 'Entity recognition': 583 | ner = comprehend_client.detect_entities( 584 | Text=parse_response(st.session_state.response), LanguageCode='en') 585 | with st.expander('View response'): 586 | st.write(ner) 587 | if options == 'Detect PII': 588 | pii = comprehend_client.detect_pii_entities( 589 | Text=parse_response(st.session_state.response), LanguageCode='en') 590 | with st.expander('View response'): 591 | st.write(pii) 592 | if options == 'Topic Modeling': 593 | st.warning('Not implemented yet.') 594 | else: 595 | st.warning('No response generated') 596 | else: 597 | st.warning('There is no image loaded.') 598 | 599 | st.header('(4) Hugging Face Transformers') 600 | st.write('Read more here: https://huggingface.co/') 601 | st.subheader("Summarization") 602 | if input_image: 603 | if st.session_state.response: 604 | options = st.selectbox( 605 | 'Please select any of the following models implemented on Hugging Face', 606 | ['Not selected','google/pegasus-xsum','facebook/bart-large-cnn', 'Use another'], 607 | help='https://huggingface.co/models?pipeline_tag=summarization&sort=downloads', 608 | ) 609 | 610 | if options == 'Use another': 611 | options = st.text_input('Enter model name here, e.g. "sshleifer/distilbart-cnn-12-6"') 612 | 613 | if not options == 'Not selected' and len(options)>0: 614 | with st.spinner('Downloading model weights and loading...'): 615 | pipe = load_model_pipeline(task="summarization", model_name=options) 616 | summary = pipe(parse_response(st.session_state.response), 617 | max_length=130, min_length=30, do_sample=False) 618 | with st.expander('View response'): 619 | st.write(summary) 620 | 621 | st.write(f'You selected: `{options}`') 622 | else: 623 | st.warning('No response generated') 624 | 625 | st.subheader("Zero-shot classification") 626 | if input_image: 627 | if st.session_state.response: 628 | options = st.selectbox( 629 | 'Please select any of the following models implemented on Hugging Face', 630 | [ 631 | 'Not selected', 632 | 'typeform/distilbert-base-uncased-mnli', 633 | 'cross-encoder/nli-distilroberta-base', 634 | 'MoritzLaurer/mDeBERTa-v3-base-mnli-xnli', 635 | 'Use another', 636 | ], 637 | help='https://huggingface.co/models?pipeline_tag=zero-shot-classification', 638 | ) 639 | 640 | if options == 'Use another': 641 | options = st.text_input('Enter model name here, e.g. "cross-encoder/nli-distilroberta-base"') 642 | 643 | if not options == 'Not selected' and len(options)>0: 644 | with st.spinner('Downloading model weights and loading...'): 645 | pipe = load_model_pipeline(task="zero-shot-classification", model_name=options) 646 | candidate_labels = st.text_input( 647 | 'Possible class names (comma-separated)', 648 | value='utility bill, benefit application, medical note', 649 | ) 650 | if candidate_labels: candidate_labels = [x.strip() for x in candidate_labels.split(',')] 651 | zero_shot_class = pipe(parse_response(st.session_state.response), candidate_labels) 652 | with st.expander('View response'): 653 | st.write(zero_shot_class) 654 | 655 | st.write(f'You selected: `{options}`') 656 | else: 657 | st.warning('No response generated') 658 | else: 659 | st.warning('There is no image loaded.') 660 | st.header('(5) SpaCy') 661 | st.write('Read more here: https://spacy.io/') 662 | if input_image: 663 | if st.session_state.response: 664 | options = st.selectbox( 665 | 'Select visualisers:', 666 | ["Not selected","NER with 'en_core_web_sm'"], 667 | help='https://github.com/explosion/spacy-streamlit', 668 | ) 669 | 670 | if options != 'Not selected': 671 | with st.spinner('Loading SpaCy model...'): 672 | import spacy 673 | import spacy_streamlit 674 | 675 | try: nlp = spacy.load("en_core_web_sm") 676 | except: os.system('python -m spacy download en_core_web_sm') 677 | nlp = spacy.load("en_core_web_sm") 678 | doc = nlp(parse_response(st.session_state.response)) 679 | spacy_streamlit.visualize_ner( 680 | doc, labels=nlp.get_pipe("ner").labels) 681 | else: 682 | st.warning('No response generated') 683 | 684 | # footer 685 | st.header('References') 686 | st.code( 687 | ''' 688 | @INPROCEEDINGS{9506664, 689 | author={Li, Deng and Wu, Yue and Zhou, Yicong}, 690 | booktitle={The 16th International Conference on Document Analysis and Recognition (ICDAR)}, 691 | title={SauvolaNet: Learning Adaptive Sauvola Network for Degraded Document Binarization}, 692 | year={2021}, 693 | volume={}, 694 | number={}, 695 | pages={538–553}, 696 | doi={https://doi.org/10.1007/978-3-030-86337-1_36}} 697 | 698 | @article{zhang2022practical, 699 | title={Practical Blind Denoising via Swin-Conv-UNet and Data Synthesis}, 700 | author={Zhang, Kai and Li, Yawei and Liang, Jingyun and Cao, Jiezhang and Zhang, Yulun and Tang, Hao and Timofte, Radu and Van Gool, Luc}, 701 | journal={arXiv preprint}, 702 | year={2022} 703 | } 704 | ''' 705 | , language='bibtex') 706 | 707 | st.header('Disclaimer') 708 | st.markdown(''' 709 | - The content provided in this repository is for demonstration purposes and not meant for production. You should use your own discretion when using the content. 710 | - The ideas and opinions outlined in these examples are my own and do not represent the opinions of AWS. 711 | ''') 712 | 713 | # run application 714 | # ----------------------------------------------------------- 715 | if __name__ == '__main__': 716 | main() -------------------------------------------------------------------------------- /app.py: -------------------------------------------------------------------------------- 1 | # Amazon Textract Workbench 2 | # Author: https://github.com/machinelearnear 3 | 4 | # import dependencies 5 | # ----------------------------------------------------------- 6 | import pandas as pd 7 | import streamlit as st 8 | import boto3 9 | import io 10 | import json 11 | import re 12 | import os 13 | os.makedirs("dependencies",exist_ok=True) 14 | import sys 15 | import cv2 16 | import numpy as np 17 | import tensorflow as tf 18 | 19 | from transformers import pipeline 20 | from matplotlib import pyplot 21 | from pathlib import Path 22 | from PIL import Image 23 | from typing import List, Optional 24 | from tabulate import tabulate 25 | from pdf2image import convert_from_path, convert_from_bytes 26 | from streamlit_image_comparison import image_comparison 27 | 28 | from textractgeofinder.ocrdb import AreaSelection 29 | from textractgeofinder.tgeofinder import KeyValue, TGeoFinder, AreaSelection, SelectionElement 30 | from textractcaller.t_call import Textract_Features, Textract_Types, call_textract 31 | from textractprettyprinter.t_pretty_print import Pretty_Print_Table_Format, Textract_Pretty_Print, get_string, get_forms_string 32 | from textractoverlayer.t_overlay import DocumentDimensions, get_bounding_boxes 33 | 34 | import trp.trp2 as t2 35 | 36 | # PRE-PROCESSING (1) 37 | # ----------------------------------------------------------- 38 | # Source: https://github.com/Leedeng/SauvolaNet 39 | # ----------------------------------------------------------- 40 | from os.path import exists as path_exists 41 | path_repo_sauvolanet = 'dependencies/SauvolaNet' 42 | if not path_exists(path_repo_sauvolanet): 43 | os.system(f'git clone https://github.com/Leedeng/SauvolaNet.git {path_repo_sauvolanet}') 44 | sys.path.append(f'{path_repo_sauvolanet}/SauvolaDocBin/') 45 | pd.set_option('display.float_format','{:.4f}'.format) 46 | from dataUtils import collect_binarization_by_dataset, DataGenerator 47 | from testUtils import prepare_inference, find_best_model 48 | from layerUtils import * 49 | from metrics import * 50 | 51 | @st.experimental_singleton 52 | def sauvolanet_load_model(model_root = f'{path_repo_sauvolanet}/pretrained_models/'): 53 | for this in os.listdir(model_root) : 54 | if this.endswith('.h5') : 55 | model_filepath = os.path.join(model_root, this) 56 | model = prepare_inference(model_filepath) 57 | print(model_filepath) 58 | return model 59 | 60 | def sauvolanet_read_decode_image(model,im): 61 | rgb = np.array(im) 62 | gray = cv2.cvtColor(rgb, cv2.COLOR_RGB2GRAY) 63 | x = gray.astype('float32')[None, ..., None]/255. 64 | pred = model.predict(x) 65 | return Image.fromarray(pred[0,...,0] > 0) 66 | 67 | # PRE-PROCESSING (2) 68 | # ----------------------------------------------------------- 69 | # Source: Kai Zhang (e-mail: cskaizhang@gmail.com; github: https://github.com/cszn) 70 | # by Kai Zhang (2021/05-2021/11) 71 | # ----------------------------------------------------------- 72 | 73 | path_repo_SCUNet = 'dependencies/SCUNet' 74 | if not path_exists(path_repo_SCUNet): 75 | os.system(f'git clone https://github.com/cszn/SCUNet.git {path_repo_SCUNet}') 76 | os.system(f'wget https://github.com/cszn/KAIR/releases/download/v1.0/scunet_color_real_psnr.pth -P {path_repo_SCUNet}/model_zoo') 77 | from datetime import datetime 78 | from collections import OrderedDict 79 | import torch 80 | sys.path.append(f'{path_repo_SCUNet}') 81 | from utils import utils_model 82 | from utils import utils_image as util 83 | 84 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 85 | n_channels = 3 86 | 87 | @st.experimental_singleton 88 | def scunet_load_model(model_path=f'{path_repo_SCUNet}/model_zoo/scunet_color_real_psnr.pth'): 89 | from models.network_scunet import SCUNet as net 90 | model = net(in_nc=n_channels,config=[4,4,4,4,4,4,4],dim=64) 91 | model.load_state_dict(torch.load(model_path), strict=True) 92 | model.eval() 93 | for k, v in model.named_parameters(): 94 | v.requires_grad = False 95 | 96 | return model.to(device) 97 | 98 | @st.experimental_singleton 99 | def scunet_inference(model,img): 100 | # ------------------------------------ 101 | # (1) img_L 102 | # ------------------------------------ 103 | img_L = np.asarray(img) 104 | if img_L.ndim == 2: 105 | img_L = cv2.cvtColor(img_L, cv2.COLOR_GRAY2RGB) # GGG 106 | else: 107 | img_L = cv2.cvtColor(img_L, cv2.COLOR_BGR2RGB) # RGB 108 | 109 | img_L = util.uint2single(img_L) 110 | img_L = util.single2tensor4(img_L) 111 | img_L = img_L.to(device) 112 | 113 | # ------------------------------------ 114 | # (2) img_E 115 | # ------------------------------------ 116 | img_E = model(img_L) 117 | img_E = util.tensor2uint(img_E) 118 | 119 | return Image.fromarray(img_E) 120 | 121 | # helper funcs 122 | # ----------------------------------------------------------- 123 | def set_hierarchy_kv(list_kv, t_document: t2.TDocument, page_block: t2.TBlock, prefix="BORROWER"): 124 | """ 125 | function to add "virtual" keys which we use to indicate context 126 | """ 127 | for x in list_kv: 128 | t_document.add_virtual_key_for_existing_key(key_name=f"{prefix}_{x.key.text}", 129 | existing_key=t_document.get_block_by_id(x.key.id), 130 | page_block=page_block) 131 | 132 | def add_sel_elements(t_document: t2.TDocument, selection_values, key_base_name: str, 133 | page_block: t2.TBlock) -> t2.TDocument: 134 | """ 135 | Function that makes it easier to add selection elements to the Amazon Textract Response JSON schema 136 | """ 137 | for sel_element in selection_values: 138 | sel_key_string = "_".join([s_key.original_text.upper() for s_key in sel_element.key if s_key.original_text]) 139 | if sel_key_string: 140 | if sel_element.selection.original_text: 141 | t_document.add_virtual_key_for_existing_key(page_block=page_block, 142 | key_name=f"{key_base_name}->{sel_key_string}", 143 | existing_key=t_document.get_block_by_id( 144 | sel_element.key[0].id)) 145 | return t_document 146 | 147 | def tag_kv_pairs_to_text(textract_json, list_of_items): 148 | t_document, geofinder_doc = geofinder(textract_json) 149 | for item in list_of_items: 150 | start = geofinder_doc.find_phrase_on_page(item['first_str'])[0] 151 | if len(item) == 3: 152 | end = geofinder_doc.find_phrase_on_page(item['last_str'], min_textdistance=0.99)[0] 153 | top_left = t2.TPoint(y=start.ymax, x=0) 154 | lower_right = t2.TPoint(y=end.ymin, x=doc_width) 155 | form_fields = geofinder_doc.get_form_fields_in_area( 156 | area_selection=AreaSelection(top_left=top_left, lower_right=lower_right, page_number=1)) 157 | set_hierarchy_kv( 158 | list_kv=form_fields, t_document=t_document, prefix=item['prefix'], page_block=t_document.pages[0]) 159 | else: 160 | top_left = t2.TPoint(y=start.ymin - 50, x=0) 161 | lower_right = t2.TPoint(y=start.ymax + 50, x=doc_width) 162 | sel_values: list[SelectionElement] = geofinder_doc.get_selection_values_in_area(area_selection=AreaSelection( 163 | top_left=top_left, lower_right=lower_right, page_number=1), 164 | exclude_ids=[]) 165 | t_document = add_sel_elements(t_document=t_document, 166 | selection_values=sel_values, 167 | key_base_name=item['prefix'], 168 | page_block=t_document.pages[0]) 169 | 170 | return t_document 171 | 172 | def geofinder(textract_json, doc_height=1000, doc_width=1000): 173 | j = textract_json.copy() 174 | t_document = t2.TDocumentSchema().load(j) 175 | geofinder_doc = TGeoFinder(j, doc_height=doc_height, doc_width=doc_width) 176 | 177 | return t_document, geofinder_doc 178 | 179 | def image_to_byte_array(image:Image): 180 | imgByteArr = io.BytesIO() 181 | image.save(imgByteArr, format=image.format) 182 | imgByteArr = imgByteArr.getvalue() 183 | 184 | return imgByteArr 185 | 186 | def t_json_to_t_df(t_json): 187 | # convert t_json > string > csv 188 | t_str = get_string( 189 | textract_json=t_json, 190 | table_format=Pretty_Print_Table_Format.csv, 191 | output_type=[Textract_Pretty_Print.FORMS], 192 | ) 193 | 194 | return pd.read_csv(io.StringIO(t_str), sep=",") 195 | 196 | @st.cache 197 | def convert_pandas(df): 198 | # IMPORTANT: Cache the conversion to prevent computation on every rerun 199 | return df.to_csv().encode('utf-8') 200 | 201 | @st.experimental_singleton 202 | def start_textract_client(credentials): 203 | return boto3.client( 204 | 'textract', 205 | aws_access_key_id=credentials['Access key ID'].values[0], 206 | aws_secret_access_key=credentials['Secret access key'].values[0], 207 | region_name='us-east-2', 208 | ) 209 | 210 | @st.experimental_singleton 211 | def start_comprehend_client(credentials): 212 | return boto3.client( 213 | 'comprehend', 214 | aws_access_key_id=credentials['Access key ID'].values[0], 215 | aws_secret_access_key=credentials['Secret access key'].values[0], 216 | region_name='us-east-2', 217 | ) 218 | 219 | def return_fnames(folder, extensions={'.png','.jpg','.jpeg'}): 220 | f = (p for p in Path(folder).glob("**/*") if p.suffix in extensions) 221 | return [x for x in f if 'ipynb_checkpoints' not in str(x)] 222 | 223 | @st.cache 224 | def return_anno_file(folder, image_fname): 225 | files = list(sorted([x for x in Path(folder).rglob('*.json')])) 226 | selected = [x for x in files if 227 | Path(image_fname).stem in str(x) and 'ipynb_checkpoints' not in str(x)] 228 | return selected[0] 229 | 230 | def get_filename_from_cd(cd): 231 | """ 232 | Get filename from content-disposition 233 | """ 234 | if not cd: 235 | return None 236 | fname = re.findall('filename=(.+)', cd) 237 | if len(fname) == 0: 238 | return None 239 | return fname[0] 240 | 241 | def add_item_to_input_queries_list(text, alias='', verbose=False): 242 | text = re.sub('[^\w\s]','', text) 243 | if len(alias)==0: 244 | alias = text.replace(' ','_').upper() 245 | 246 | if not any(text in x['Text'] for x in st.session_state.input_queries): 247 | st.session_state.input_queries.append( 248 | { 249 | "Text": text, 250 | "Alias": alias 251 | } 252 | ) 253 | if verbose: st.success('Added') 254 | else: 255 | if verbose: st.warning('Already exists') 256 | 257 | def remove_item(): 258 | del st.session_state.input_queries[st.session_state.index] 259 | 260 | def clear_all_items(): 261 | st.session_state.input_queries = [] 262 | st.session_state.index = 0 263 | 264 | def parse_response(response): 265 | from trp import Document 266 | doc = Document(response) 267 | text = '' 268 | for page in doc.pages: 269 | for line in page.lines: 270 | for word in line.words: 271 | text = text + word.text + ' ' 272 | return text.strip() 273 | 274 | @st.experimental_singleton 275 | def load_model_pipeline(task, model_name): 276 | return pipeline(task, model=model_name) 277 | 278 | # streamlit app 279 | # ----------------------------------------------------------- 280 | st.set_page_config( 281 | page_title='Textract Workbench', 282 | page_icon=":open_book:", 283 | layout="centered", 284 | initial_sidebar_state="auto", 285 | menu_items=None) 286 | 287 | def main(): 288 | # intro and sidebar 289 | ####################### 290 | st.title('Amazon Textract Workbench v0.1') 291 | st.markdown(''' 292 | This repo is aimed at giving you a place to experiment with the tooling and 293 | will show you a step by step tutorial on how to take advantage of the geometric 294 | context detected in an image to make the tagging of key and value pairs easier 295 | and more accurate with [Amazon Textract](https://aws.amazon.com/textract/). We are 296 | running this application on top of [SageMaker Studio Lab](https://studiolab.sagemaker.aws/) 297 | combining multiple libraries such as [Hugging Face](https://huggingface.co/), 298 | [SpaCy](https://spacy.io/), and [Textractor](https://github.com/aws-samples/amazon-textract-textractor). 299 | We will also make use of the recently launched 300 | ["Queries"](https://aws.amazon.com/blogs/machine-learning/specify-and-extract-information-from-documents-using-the-new-queries-feature-in-amazon-textract/) 301 | functionality in Textract. 302 | ''') 303 | 304 | with st.sidebar: 305 | # about 306 | st.subheader('About this demo') 307 | st.markdown(''' 308 | Author: Nicolás Metallo (metallo@amazon.com) 309 | ''') 310 | st.markdown(f'This web app is running on `{device}`') 311 | 312 | # connect AWS credentials 313 | with st.expander('Connect your AWS credentials'): 314 | st.markdown('Required to use Amazon Textract. No data is stored locally, only streamed to memory.') 315 | credentials = pd.DataFrame() 316 | uploaded_file = st.file_uploader("Upload your csv file", type=['csv'], key='uploaded_file_credentials') 317 | 318 | if uploaded_file: 319 | credentials = pd.read_csv(io.StringIO(uploaded_file.read().decode('utf-8'))) 320 | 321 | if not credentials.empty: 322 | textract_client = start_textract_client(credentials) 323 | comprehend_client = start_comprehend_client(credentials) 324 | st.success('AWS credentials are loaded.') 325 | else: 326 | st.warning('AWS credentials are not loaded.') 327 | 328 | # (1) read input image 329 | ####################### 330 | st.header('(1) Read input image') 331 | options = st.selectbox('Please choose any of the following options', 332 | ( 333 | 'Choose sample image from library', 334 | 'Download image from URL', 335 | 'Upload your own image / file', 336 | ) 337 | ) 338 | 339 | input_image = None 340 | if options == 'Choose sample image from library': 341 | image_files = return_fnames('test_images') 342 | selected_file = st.selectbox( 343 | 'Select an image file or PDF from the list', image_files 344 | ) 345 | image_fname = selected_file 346 | st.write(f'You have selected `{image_fname}`') 347 | 348 | if Path(image_fname).suffix != '.pdf': 349 | input_image = Image.open(selected_file) 350 | else: 351 | input_image = convert_from_path(selected_file,fmt='png')[0] # only first page 352 | 353 | elif options == 'Download image from URL': 354 | image_url = st.text_input('Image URL') 355 | try: 356 | r = requests.get(image_url) 357 | image_fname = get_filename_from_cd(r.headers.get('content-disposition')) 358 | input_image = Image.open(io.BytesIO(r.content)) 359 | except Exception: 360 | st.error('There was an error downloading the image. Please check the URL again.') 361 | elif options == 'Upload your own image / file': 362 | uploaded_file = st.file_uploader("Choose file to upload", key='uploaded_file_input_image') 363 | if uploaded_file: 364 | if Path(uploaded_file.name).suffix != '.pdf': 365 | input_image = Image.open(io.BytesIO(uploaded_file.decode())) 366 | else: 367 | input_image = convert_from_bytes(uploaded_file.read(),fmt='png')[0] # only first page 368 | st.success('Image was successfully uploaded') 369 | 370 | if input_image: 371 | max_im_size = (1000,1000) 372 | input_image.thumbnail(max_im_size, Image.Resampling.LANCZOS) 373 | with st.expander("See input image"): 374 | st.image(input_image, use_column_width=True) 375 | st.info(f'The input image has been resized to fit within `{max_im_size}`') 376 | else: 377 | st.warning('There is no image loaded.') 378 | 379 | # image pre-processing 380 | ####################### 381 | st.subheader('(Optional) Image pre-processing') 382 | options_preproc = st.selectbox('Please choose any of the following options', 383 | ( 384 | 'No pre-processing', 385 | 'SauvolaNet: Learning Adaptive Sauvola Network for Degraded Document Binarization (ICDAR2021)', 386 | 'Practical Blind Denoising via Swin-Conv-UNet and Data Synthesis', 387 | 'Restormer: Efficient Transformer for High-Resolution Image Restoration (CVPR2022)', 388 | ) 389 | ) 390 | 391 | modified_image = None 392 | if input_image: 393 | if options_preproc == 'SauvolaNet: Learning Adaptive Sauvola Network for Degraded Document Binarization (ICDAR2021)': 394 | try: 395 | with st.spinner(): 396 | sauvolanet_model = sauvolanet_load_model() 397 | modified_image = sauvolanet_read_decode_image(sauvolanet_model,input_image) 398 | st.success('Done!') 399 | except Exception as e: 400 | st.error(e) 401 | elif options_preproc == 'Practical Blind Denoising via Swin-Conv-UNet and Data Synthesis': 402 | try: 403 | with st.spinner(): 404 | scunet_model = scunet_load_model() 405 | modified_image = scunet_inference(scunet_model,input_image) 406 | st.success('Done!') 407 | except Exception as e: 408 | st.error(e) 409 | elif options_preproc == 'Restormer: Efficient Transformer for High-Resolution Image Restoration (CVPR2022)': 410 | st.warning('Not implemented yet.') 411 | 412 | if modified_image: 413 | with st.expander("See modified image"): 414 | image_comparison( 415 | img1=input_image, img2=modified_image, 416 | label1='Original', label2='Modified', 417 | ) 418 | else: 419 | st.warning('There is no image loaded.') 420 | 421 | # (2) retrieve ocr preds 422 | ####################### 423 | st.header('(2) Amazon Textract') 424 | if not 'response' in st.session_state: 425 | st.session_state.response = None 426 | feature_types=[] 427 | cAA, cBA = st.columns(2) 428 | with cAA: 429 | options = st.selectbox( 430 | 'The following actions are supported:', 431 | ('DetectDocumentText','AnalyzeDocument','AnalyzeExpense','AnalyzeID'), 432 | help='Read more: https://docs.aws.amazon.com/textract/latest/dg/API_Operations.html') 433 | st.write(f'You selected: `{options}`') 434 | with cBA: 435 | if options == 'AnalyzeDocument': 436 | feature_types = st.multiselect( 437 | 'Select feature types for "AnalyzeDocument"', 438 | ['TABLES','FORMS','QUERIES'], 439 | help='Read more: https://docs.aws.amazon.com/textract/latest/dg/API_AnalyzeDocument.html') 440 | st.write(f'You selected: `{feature_types}`') 441 | 442 | if 'QUERIES' in feature_types: 443 | if 'input_queries' not in st.session_state: 444 | st.session_state.input_queries = [] 445 | if 'index' not in st.session_state: 446 | st.session_state.index = 0 447 | 448 | with st.expander('Would you like to upload your existing list of queries?'): 449 | uploaded_file = st.file_uploader("Choose file to upload", key='uploaded_file_queries') 450 | if uploaded_file: 451 | queries = io.StringIO(uploaded_file.getvalue().decode("utf-8")).read() 452 | queries = [x.strip().lower() for x in queries.split(',')] 453 | for x in queries: add_item_to_input_queries_list(x) 454 | st.success('List of queries was successfully uploaded') 455 | 456 | with st.expander('Input your new queries here'): 457 | cAB, cBB = st.columns([3,1]) 458 | with cAB: 459 | st.text_input('Input your new query', 460 | key='add_query_text', 461 | help='Input queries that Textract will use to extract the data that is most important to you.') 462 | with cBB: 463 | st.text_input('Alias (Optional)', 464 | key='add_query_alias') 465 | 466 | if st.button('+ Add query') and len(st.session_state.add_query_text)>0: 467 | input_query_text = st.session_state.add_query_text.strip() 468 | input_query_alias = st.session_state.add_query_alias 469 | add_item_to_input_queries_list(input_query_text, input_query_alias, verbose=True) 470 | 471 | if len(st.session_state.input_queries)==0: 472 | st.warning('No queries selected') 473 | else: 474 | with st.expander('Edit existing queries'): 475 | cAC, cBC = st.columns([3,1]) 476 | cAC.write(st.session_state.input_queries) 477 | cBC.number_input( 478 | 'Select entry number', 479 | min_value=0, 480 | max_value=len(st.session_state.input_queries)-1, 481 | key='index', 482 | ) 483 | if cBC.button('Remove item',on_click=remove_item): 484 | st.success('Deleted!') 485 | if cBC.button('Clear all',on_click=clear_all_items): 486 | if len(st.session_state.input_queries)==0: 487 | st.success('Cleared!') 488 | else: 489 | cBC.warning('Delete the uploaded file to clear all') 490 | 491 | st.subheader('Run and review response') 492 | if input_image: 493 | if not credentials.empty: 494 | aa, bb = st.columns([1,5]) 495 | placeholder = aa.empty() 496 | if aa.button('✍ Submit'): 497 | st.session_state.response = None 498 | if options == 'AnalyzeDocument' and feature_types: 499 | if 'QUERIES' in feature_types: 500 | if len(st.session_state.input_queries)==0: 501 | st.error('Please add queries before you submit your request with QUERIES') 502 | else: 503 | response = textract_client.analyze_document( 504 | Document = { 505 | 'Bytes': image_to_byte_array(input_image), 506 | }, 507 | FeatureTypes = feature_types, 508 | QueriesConfig={ 509 | 'Queries': st.session_state.input_queries[:15] # max queries per page: 15 510 | } 511 | ) 512 | else: 513 | response = textract_client.analyze_document( 514 | Document = { 515 | 'Bytes': image_to_byte_array(input_image), 516 | }, 517 | FeatureTypes = feature_types, 518 | ) 519 | elif options == 'AnalyzeExpense': 520 | response = textract_client.analyze_expense( 521 | Document={ 522 | 'Bytes': image_to_byte_array(input_image), 523 | } 524 | ) 525 | elif options == 'AnalyzeID': 526 | response = textract_client.analyze_id( 527 | DocumentPages=[ 528 | { 529 | 'Bytes': image_to_byte_array(input_image), 530 | }, 531 | ] 532 | ) 533 | else: 534 | response = textract_client.detect_document_text( 535 | Document={ 536 | 'Bytes': image_to_byte_array(input_image), 537 | } 538 | ) 539 | 540 | if response: 541 | placeholder.success('Finished!') 542 | with bb.expander('View response'): 543 | st.markdown('**RAW TEXT**') 544 | output_text = parse_response(response) 545 | st.write(output_text) 546 | st.markdown('**JSON**') 547 | st.write(response) 548 | 549 | if feature_types: 550 | with bb.expander('View response from AnalyzeDocument'): 551 | if 'QUERIES' in feature_types: 552 | st.markdown('**QUERIES**') 553 | d = t2.TDocumentSchema().load(response) 554 | page = d.pages[0] 555 | query_answers = d.get_query_answers(page=page) 556 | queries_df = pd.DataFrame( 557 | query_answers, 558 | columns=['Text','Alias','Value']) 559 | st.dataframe(queries_df) 560 | if 'FORMS' in feature_types: 561 | st.markdown('**FORMS**') 562 | forms = get_string( 563 | textract_json=response, 564 | table_format=Pretty_Print_Table_Format.csv, 565 | output_type=[Textract_Pretty_Print.FORMS], 566 | ) 567 | forms_df = pd.read_csv(io.StringIO(forms),sep=",") 568 | st.dataframe(forms_df) 569 | if 'TABLES' in feature_types: 570 | st.markdown('**TABLES**') 571 | tables = get_string( 572 | textract_json=response, 573 | table_format=Pretty_Print_Table_Format.csv, 574 | output_type=[Textract_Pretty_Print.TABLES], 575 | ) 576 | tables_df = pd.read_csv(io.StringIO(tables),sep=",") 577 | st.dataframe(tables_df) 578 | 579 | if options == 'AnalyzeExpense': 580 | with st.expander('View response from AnalyzeExpense'): 581 | pass 582 | 583 | if options == 'AnalyzeID': 584 | with st.expander('View response from AnalyzeID'): 585 | pass 586 | 587 | st.session_state.response = response 588 | elif not st.session_state.response: 589 | st.warning('No response generated') 590 | else: 591 | st.warning('AWS credentials are not loaded.') 592 | else: 593 | st.warning('There is no image loaded.') 594 | 595 | # expand with Amazon Comprehend and Hugging Face 596 | ####################### 597 | st.header('(3) Amazon Comprehend') 598 | st.write('Read more here: https://docs.aws.amazon.com/comprehend/latest/dg/what-is.html') 599 | if input_image: 600 | if st.session_state.response: 601 | options = st.selectbox( 602 | 'Please select any of the following models implemented on Hugging Face', 603 | ['Not selected','Entity recognition','Detect PII','Topic Modeling'], 604 | help='https://docs.aws.amazon.com/comprehend/latest/dg/get-started-api.html', 605 | ) 606 | if options == 'Entity recognition': 607 | ner = comprehend_client.detect_entities( 608 | Text=parse_response(st.session_state.response), LanguageCode='en') 609 | with st.expander('View response'): 610 | st.write(ner) 611 | if options == 'Detect PII': 612 | pii = comprehend_client.detect_pii_entities( 613 | Text=parse_response(st.session_state.response), LanguageCode='en') 614 | with st.expander('View response'): 615 | st.write(pii) 616 | if options == 'Topic Modeling': 617 | st.warning('Not implemented yet.') 618 | else: 619 | st.warning('No response generated') 620 | else: 621 | st.warning('There is no image loaded.') 622 | 623 | st.header('(4) Hugging Face Transformers') 624 | st.write('Read more here: https://huggingface.co/') 625 | st.subheader("Summarization") 626 | if input_image: 627 | if st.session_state.response: 628 | options = st.selectbox( 629 | 'Please select any of the following models implemented on Hugging Face', 630 | ['Not selected','google/pegasus-xsum','facebook/bart-large-cnn', 'Use another'], 631 | help='https://huggingface.co/models?pipeline_tag=summarization&sort=downloads', 632 | ) 633 | 634 | if options == 'Use another': 635 | options = st.text_input('Enter model name here, e.g. "sshleifer/distilbart-cnn-12-6"') 636 | 637 | if not options == 'Not selected' and len(options)>0: 638 | with st.spinner('Downloading model weights and loading...'): 639 | pipe = load_model_pipeline(task="summarization", model_name=options) 640 | summary = pipe(parse_response(st.session_state.response), 641 | max_length=130, min_length=30, do_sample=False) 642 | with st.expander('View response'): 643 | st.write(summary) 644 | 645 | st.write(f'You selected: `{options}`') 646 | else: 647 | st.warning('No response generated') 648 | 649 | st.subheader("Zero-shot classification") 650 | if input_image: 651 | if st.session_state.response: 652 | options = st.selectbox( 653 | 'Please select any of the following models implemented on Hugging Face', 654 | [ 655 | 'Not selected', 656 | 'typeform/distilbert-base-uncased-mnli', 657 | 'cross-encoder/nli-distilroberta-base', 658 | 'MoritzLaurer/mDeBERTa-v3-base-mnli-xnli', 659 | 'Use another', 660 | ], 661 | help='https://huggingface.co/models?pipeline_tag=zero-shot-classification', 662 | ) 663 | 664 | if options == 'Use another': 665 | options = st.text_input('Enter model name here, e.g. "cross-encoder/nli-distilroberta-base"') 666 | 667 | if not options == 'Not selected' and len(options)>0: 668 | with st.spinner('Downloading model weights and loading...'): 669 | pipe = load_model_pipeline(task="zero-shot-classification", model_name=options) 670 | candidate_labels = st.text_input( 671 | 'Possible class names (comma-separated)', 672 | value='utility bill, benefit claim, medical form', 673 | ) 674 | if candidate_labels: candidate_labels = [x.strip() for x in candidate_labels.split(',')] 675 | zero_shot_class = pipe(parse_response(st.session_state.response), candidate_labels) 676 | with st.expander('View response'): 677 | st.write(zero_shot_class) 678 | 679 | st.write(f'You selected: `{options}`') 680 | else: 681 | st.warning('No response generated') 682 | else: 683 | st.warning('There is no image loaded.') 684 | st.header('(5) SpaCy') 685 | st.write('Read more here: https://spacy.io/') 686 | if input_image: 687 | if st.session_state.response: 688 | options = st.selectbox( 689 | 'Select visualisers:', 690 | ["Not selected","NER with 'en_core_web_sm'"], 691 | help='https://github.com/explosion/spacy-streamlit', 692 | ) 693 | 694 | if options != 'Not selected': 695 | with st.spinner('Loading SpaCy model...'): 696 | import spacy 697 | import spacy_streamlit 698 | 699 | try: nlp = spacy.load("en_core_web_sm") 700 | except: os.system('python -m spacy download en_core_web_sm') 701 | nlp = spacy.load("en_core_web_sm") 702 | doc = nlp(parse_response(st.session_state.response)) 703 | spacy_streamlit.visualize_ner( 704 | doc, labels=nlp.get_pipe("ner").labels) 705 | else: 706 | st.warning('No response generated') 707 | 708 | # footer 709 | st.header('References') 710 | st.code( 711 | ''' 712 | @INPROCEEDINGS{9506664, 713 | author={Li, Deng and Wu, Yue and Zhou, Yicong}, 714 | booktitle={The 16th International Conference on Document Analysis and Recognition (ICDAR)}, 715 | title={SauvolaNet: Learning Adaptive Sauvola Network for Degraded Document Binarization}, 716 | year={2021}, 717 | volume={}, 718 | number={}, 719 | pages={538–553}, 720 | doi={https://doi.org/10.1007/978-3-030-86337-1_36}} 721 | 722 | @article{zhang2022practical, 723 | title={Practical Blind Denoising via Swin-Conv-UNet and Data Synthesis}, 724 | author={Zhang, Kai and Li, Yawei and Liang, Jingyun and Cao, Jiezhang and Zhang, Yulun and Tang, Hao and Timofte, Radu and Van Gool, Luc}, 725 | journal={arXiv preprint}, 726 | year={2022} 727 | } 728 | ''' 729 | , language='bibtex') 730 | 731 | st.header('Disclaimer') 732 | st.markdown(''' 733 | - The content provided in this repository is for demonstration purposes and not meant for production. You should use your own discretion when using the content. 734 | - The ideas and opinions outlined in these examples are my own and do not represent the opinions of AWS. 735 | ''') 736 | 737 | # run application 738 | # ----------------------------------------------------------- 739 | if __name__ == '__main__': 740 | main() --------------------------------------------------------------------------------