├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md └── workflows │ └── codeql-analysis.yml ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── auto_tensorflow └── tfa.py ├── header.png ├── logo.png ├── pyproject.toml ├── setup.py └── tutorials ├── TFAuto_|_Classification.ipynb └── TFAuto_|_Regression.ipynb /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 16 | **Expected behavior** 17 | A clear and concise description of what you expected to happen. 18 | 19 | **Versions:** 20 | - Auto-Tensorflow: 21 | - Tensorflow: 22 | - Tensorflow-Extended: 23 | 24 | **Additional context** 25 | Add any other context about the problem here. 26 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | # For most projects, this workflow file will not need changing; you simply need 2 | # to commit it to your repository. 3 | # 4 | # You may wish to alter this file to override the set of languages analyzed, 5 | # or to provide custom queries or build logic. 6 | # 7 | # ******** NOTE ******** 8 | # We have attempted to detect the languages in your repository. Please check 9 | # the `language` matrix defined below to confirm you have the correct set of 10 | # supported CodeQL languages. 11 | # 12 | name: "AutoQLChecks" 13 | 14 | on: 15 | push: 16 | branches: [ main ] 17 | pull_request: 18 | # The branches below must be a subset of the branches above 19 | branches: [ main ] 20 | schedule: 21 | - cron: '17 22 * * 5' 22 | 23 | jobs: 24 | analyze: 25 | name: Analyze 26 | runs-on: ubuntu-latest 27 | permissions: 28 | actions: read 29 | contents: read 30 | security-events: write 31 | 32 | strategy: 33 | fail-fast: false 34 | matrix: 35 | language: [ 'python' ] 36 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] 37 | # Learn more: 38 | # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed 39 | 40 | steps: 41 | - name: Checkout repository 42 | uses: actions/checkout@v2 43 | 44 | # Initializes the CodeQL tools for scanning. 45 | - name: Initialize CodeQL 46 | uses: github/codeql-action/init@v1 47 | with: 48 | languages: ${{ matrix.language }} 49 | # If you wish to specify custom queries, you can do so here or in a config file. 50 | # By default, queries listed here will override any specified in a config file. 51 | # Prefix the list here with "+" to use these queries and those in the config file. 52 | # queries: ./path/to/local/query, your-org/your-repo/queries@main 53 | 54 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). 55 | # If this step fails, then you should remove it and run the build manually (see below) 56 | - name: Autobuild 57 | uses: github/codeql-action/autobuild@v1 58 | 59 | # ℹ️ Command-line programs to run using the OS shell. 60 | # 📚 https://git.io/JvXDl 61 | 62 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines 63 | # and modify them (or add more) to build your code if your project 64 | # uses a compiled language 65 | 66 | #- run: | 67 | # make bootstrap 68 | # make release 69 | 70 | - name: Perform CodeQL Analysis 71 | uses: github/codeql-action/analyze@v1 72 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | We as members, contributors, and leaders pledge to make participation in our 6 | community a harassment-free experience for everyone, regardless of age, body 7 | size, visible or invisible disability, ethnicity, sex characteristics, gender 8 | identity and expression, level of experience, education, socio-economic status, 9 | nationality, personal appearance, race, religion, or sexual identity 10 | and orientation. 11 | 12 | We pledge to act and interact in ways that contribute to an open, welcoming, 13 | diverse, inclusive, and healthy community. 14 | 15 | ## Our Standards 16 | 17 | Examples of behavior that contributes to a positive environment for our 18 | community include: 19 | 20 | * Demonstrating empathy and kindness toward other people 21 | * Being respectful of differing opinions, viewpoints, and experiences 22 | * Giving and gracefully accepting constructive feedback 23 | * Accepting responsibility and apologizing to those affected by our mistakes, 24 | and learning from the experience 25 | * Focusing on what is best not just for us as individuals, but for the 26 | overall community 27 | 28 | Examples of unacceptable behavior include: 29 | 30 | * The use of sexualized language or imagery, and sexual attention or 31 | advances of any kind 32 | * Trolling, insulting or derogatory comments, and personal or political attacks 33 | * Public or private harassment 34 | * Publishing others' private information, such as a physical or email 35 | address, without their explicit permission 36 | * Other conduct which could reasonably be considered inappropriate in a 37 | professional setting 38 | 39 | ## Enforcement Responsibilities 40 | 41 | Community leaders are responsible for clarifying and enforcing our standards of 42 | acceptable behavior and will take appropriate and fair corrective action in 43 | response to any behavior that they deem inappropriate, threatening, offensive, 44 | or harmful. 45 | 46 | Community leaders have the right and responsibility to remove, edit, or reject 47 | comments, commits, code, wiki edits, issues, and other contributions that are 48 | not aligned to this Code of Conduct, and will communicate reasons for moderation 49 | decisions when appropriate. 50 | 51 | ## Scope 52 | 53 | This Code of Conduct applies within all community spaces, and also applies when 54 | an individual is officially representing the community in public spaces. 55 | Examples of representing our community include using an official e-mail address, 56 | posting via an official social media account, or acting as an appointed 57 | representative at an online or offline event. 58 | 59 | ## Enforcement 60 | 61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 62 | reported to the community leaders responsible for enforcement. 63 | All complaints will be reviewed and investigated promptly and fairly. 64 | 65 | All community leaders are obligated to respect the privacy and security of the 66 | reporter of any incident. 67 | 68 | ## Enforcement Guidelines 69 | 70 | Community leaders will follow these Community Impact Guidelines in determining 71 | the consequences for any action they deem in violation of this Code of Conduct: 72 | 73 | ### 1. Correction 74 | 75 | **Community Impact**: Use of inappropriate language or other behavior deemed 76 | unprofessional or unwelcome in the community. 77 | 78 | **Consequence**: A private, written warning from community leaders, providing 79 | clarity around the nature of the violation and an explanation of why the 80 | behavior was inappropriate. A public apology may be requested. 81 | 82 | ### 2. Warning 83 | 84 | **Community Impact**: A violation through a single incident or series 85 | of actions. 86 | 87 | **Consequence**: A warning with consequences for continued behavior. No 88 | interaction with the people involved, including unsolicited interaction with 89 | those enforcing the Code of Conduct, for a specified period of time. This 90 | includes avoiding interactions in community spaces as well as external channels 91 | like social media. Violating these terms may lead to a temporary or 92 | permanent ban. 93 | 94 | ### 3. Temporary Ban 95 | 96 | **Community Impact**: A serious violation of community standards, including 97 | sustained inappropriate behavior. 98 | 99 | **Consequence**: A temporary ban from any sort of interaction or public 100 | communication with the community for a specified period of time. No public or 101 | private interaction with the people involved, including unsolicited interaction 102 | with those enforcing the Code of Conduct, is allowed during this period. 103 | Violating these terms may lead to a permanent ban. 104 | 105 | ### 4. Permanent Ban 106 | 107 | **Community Impact**: Demonstrating a pattern of violation of community 108 | standards, including sustained inappropriate behavior, harassment of an 109 | individual, or aggression toward or disparagement of classes of individuals. 110 | 111 | **Consequence**: A permanent ban from any sort of public interaction within 112 | the community. 113 | 114 | ## Attribution 115 | 116 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], 117 | version 2.0, available at 118 | https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. 119 | 120 | Community Impact Guidelines were inspired by [Mozilla's code of conduct 121 | enforcement ladder](https://github.com/mozilla/diversity). 122 | 123 | [homepage]: https://www.contributor-covenant.org 124 | 125 | For answers to common questions about this code of conduct, see the FAQ at 126 | https://www.contributor-covenant.org/faq. Translations are available at 127 | https://www.contributor-covenant.org/translations. 128 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # How to Contribute 2 | 3 | We'd love to accept your patches and contributions to this project. There are 4 | just a few small guidelines you need to follow. 5 | 6 | ## Contributor License Agreement 7 | 8 | Contributions to this project must be accompanied by a Contributor License 9 | Agreement. You (or your employer) retain the copyright to your contribution; 10 | this simply gives us permission to use and redistribute your contributions as 11 | part of the project. 12 | 13 | ## Code Reviews 14 | 15 | All submissions, including submissions by project members, require review. We 16 | use GitHub pull requests for this purpose. Consult 17 | [GitHub Help](https://help.github.com/articles/about-pull-requests/) for more 18 | information on using pull requests. 19 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | [![Downloads](https://static.pepy.tech/personalized-badge/auto-tensorflow?period=total&units=none&left_color=grey&right_color=brightgreen&left_text=Downloads)](https://pepy.tech/project/auto-tensorflow) 4 | [![Generic badge](https://img.shields.io/pypi/v/auto-tensorflow.svg?logo=pypi&logoColor=white&color=orange)](https://pypi.org/project/auto-tensorflow/) 5 | ![Generic badge](https://img.shields.io/badge/python-v3.6%20%7C%203.7%20%7C%203.8-blue) 6 | ![example workflow](https://github.com/rafiqhasan/auto-tensorflow/actions/workflows/codeql-analysis.yml/badge.svg) 7 | ![Open issues](https://img.shields.io/github/issues-raw/rafiqhasan/auto-tensorflow?color=red) 8 | 9 | ### **Auto Tensorflow - Mission:** 10 | **Build Low Code Automated Tensorflow, What-IF explainable models in just 3 lines of code.** 11 | 12 | To make Deep Learning on Tensorflow absolutely easy for the masses with its low code framework and also increase trust on ML models through What-IF model explainability. 13 | 14 | ### **Under the hood:** 15 | Built on top of the powerful **Tensorflow** ecosystem tools like **TFX** , **TF APIs** and **What-IF Tool** , the library automatically does all the heavy lifting internally like EDA, schema discovery, feature engineering, HPT, model search etc. This empowers developers to focus only on building end user applications quickly without any knowledge of Tensorflow, ML or debugging. Built for handling large volume of data / BigData - using only TF scalable components. Moreover the models trained with auto-tensorflow can directly be deployed on any cloud like GCP / AWS / Azure. 16 | 17 | 18 | 19 | ### **Official Launch**: https://youtu.be/sil-RbuckG0 20 | 21 | ### **Features:** 22 | 1. Build Classification / Regression models on CSV data 23 | 2. Automated Schema Inference 24 | 3. Automated Feature Engineering 25 | - Discretization 26 | - Scaling 27 | - Normalization 28 | - Text Embedding 29 | - Category encoding 30 | 5. Automated Model build for mixed data types( Continuous, Categorical and Free Text ) 31 | 6. Automated Hyper-parameter tuning 32 | 7. Automated GPU Distributed training 33 | 8. Automated UI based What-IF analysis( Fairness, Feature Partial dependencies, What-IF ) 34 | 9. Control over complexity of model 35 | 10. No dependency over Pandas / SKLearn 36 | 11. Can handle dataset of any size - including multiple CSV files 37 | 38 | ### **Tutorials**: 39 | 1. [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/rafiqhasan/auto-tensorflow/blob/main/tutorials/TFAuto_%7C_Classification.ipynb) - Auto Classification on CSV data 40 | 2. [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/rafiqhasan/auto-tensorflow/blob/main/tutorials/TFAuto_%7C_Regression.ipynb) - Auto Regression on CSV data 41 | 42 | ### **Setup:** 43 | 1. Install library 44 | - PIP(Recommended): ```pip install auto-tensorflow``` 45 | - Nightly: ```pip install git+https://github.com/rafiqhasan/auto-tensorflow.git``` 46 | 2. Works best on UNIX/Linux/Debian/Google Colab/MacOS 47 | 48 | ### **Usage:** 49 | 1. Initialize TFAuto Engine 50 | ``` 51 | from auto_tensorflow.tfa import TFAuto 52 | tfa = TFAuto(train_data_path='/content/train_data/', test_data_path='/content/test_data/', path_root='/content/tfauto') 53 | ``` 54 | 55 | 2. Step 1 - Automated EDA and Schema discovery 56 | ``` 57 | tfa.step_data_explore(viz=True) ##Viz=False for no visualization 58 | ``` 59 | 60 | 3. Step 2 - Automated ML model build and train 61 | ``` 62 | tfa.step_model_build(label_column = 'price', model_type='REGRESSION', model_complexity=1) 63 | ``` 64 | 65 | 4. Step 3 - Automated What-IF Tool launch 66 | ``` 67 | tfa.step_model_whatif() 68 | ``` 69 | 70 | ### **API Arguments:** 71 | - Method **TFAuto** 72 | - ```train_data_path```: Path where training data is stored 73 | - ```test_data_path```: Path where Test / Eval data is stored 74 | - ```path_root```: Directory for running TFAuto( Directory should NOT exist ) 75 | 76 | - Method **step_data_explore** 77 | - ```viz```: Is data visualization required ? - True or False( Default ) 78 | 79 | - Method **step_model_build** 80 | - `label_column`: The feature to be used as Label 81 | - `model_type`: Either of 'REGRESSION'( Default ), 'CLASSIFICATION' 82 | - `model_complexity`: 83 | - `0` : Model with default hyper-parameters 84 | - `1` (Default): Model with automated hyper-parameter tuning 85 | - `2` : Complexity 1 + Advanced fine-tuning of Text layers 86 | 87 | ### **Current limitations:** 88 | There are a few limitations in the initial release but we are working day and night to resolve these and **add them as future features**. 89 | 1. Doesn't support Image / Audio data 90 | 91 | ### **Future roadmap:** 92 | 1. Add support for Timeseries / Audio / Image data 93 | 2. Add feature to download full pipeline model Python code for advanced tweaking 94 | 95 | ### **Release History:** 96 | **1.3.4** - 09/12/2022 - [Release Notes](https://github.com/rafiqhasan/auto-tensorflow/releases/tag/1.3.4) 97 | 98 | **1.3.3** - 09/12/2022 - [Release Notes](https://github.com/rafiqhasan/auto-tensorflow/releases/tag/1.3.3) 99 | 100 | **1.3.2** - 27/11/2021 - [Release Notes](https://github.com/rafiqhasan/auto-tensorflow/releases/tag/1.3.2) 101 | 102 | **1.3.1** - 18/11/2021 - [Release Notes](https://github.com/rafiqhasan/auto-tensorflow/releases/tag/1.3.1) 103 | 104 | **1.2.0** - 24/07/2021 - [Release Notes](https://github.com/rafiqhasan/auto-tensorflow/releases/tag/1.2.0) 105 | 106 | **1.1.1** - 14/07/2021 - [Release Notes](https://github.com/rafiqhasan/auto-tensorflow/releases/tag/1.1.1) 107 | 108 | **1.0.1** - 07/07/2021 - [Release Notes](https://github.com/rafiqhasan/auto-tensorflow/releases/tag/1.0.1) 109 | -------------------------------------------------------------------------------- /auto_tensorflow/tfa.py: -------------------------------------------------------------------------------- 1 | ##Owner: Hasan Rafiq 2 | ##URL: https://www.linkedin.com/in/sam04/ 3 | import os 4 | import tempfile 5 | import re 6 | import json 7 | import urllib 8 | 9 | import absl 10 | import tensorflow as tf 11 | import tensorflow.keras as keras 12 | import tensorflow_text 13 | import tfx 14 | import tensorflow_hub as hub 15 | import keras_tuner as kt 16 | # import tensorflow_model_analysis as tfma 17 | import witwidget 18 | import tensorflow_data_validation as tfdv 19 | tf.get_logger().propagate = False 20 | 21 | from tfx.components import CsvExampleGen 22 | from typing import Dict, List, Text 23 | from tfx.components import Evaluator, ExampleValidator, Pusher, SchemaGen, Trainer, StatisticsGen, Transform 24 | from tfx.orchestration import metadata, pipeline 25 | from tfx.orchestration.experimental.interactive.interactive_context import InteractiveContext 26 | from tfx.proto import pusher_pb2, trainer_pb2, example_gen_pb2 27 | from tfx.v1 import proto 28 | from tfx.orchestration.local.local_dag_runner import LocalDagRunner 29 | from ml_metadata.proto import metadata_store_pb2 30 | from tfx.orchestration.portable.mlmd import execution_lib 31 | from tensorflow_metadata.proto.v0 import anomalies_pb2 32 | from tensorflow_data_validation.utils import io_util, display_util 33 | from google.protobuf.json_format import MessageToDict 34 | from witwidget.notebook.visualization import WitConfigBuilder 35 | from witwidget.notebook.visualization import WitWidget 36 | 37 | class TFAutoUtils(): 38 | def __init__(self, data_path, path_root='/tfx'): 39 | ##Define all constants 40 | self._tfx_root = os.path.join(os.getcwd(), path_root) 41 | self._pipeline_root = os.path.join(self._tfx_root, 'pipelines'); # Join ~/tfx/pipelines/ 42 | self._metadata_db_root = os.path.join(self._tfx_root, 'metadata.db'); # Join ~/tfx/metadata.db 43 | self._metadata = os.path.join(self._tfx_root, 'metadata'); # Join ~/tfx/metadata 44 | self._log_root = os.path.join(self._tfx_root, 'logs'); 45 | self._model_root = os.path.join(self._tfx_root, 'model'); 46 | self._data_path = data_path 47 | 48 | def check_directories(self): 49 | if os.path.exists(self._tfx_root): 50 | raise Exception("Root Directory: {} already exists. Please make sure directory doesn't exist!".format(self._tfx_root)) 51 | 52 | def create_directories(self): 53 | self.check_directories() 54 | 55 | directories = [self._tfx_root, self._pipeline_root, self._metadata, 56 | self._log_root, self._model_root] 57 | [ print("Creating {}".format(d)) for d in directories ] 58 | [ os.mkdir(d) for d in directories ] 59 | 60 | def load_anomalies_binary(self, input_path: Text) -> anomalies_pb2.Anomalies: 61 | """Loads the Anomalies proto stored in text format in the input path. 62 | Args: 63 | input_path: File path from which to load the Anomalies proto. 64 | Returns: 65 | An Anomalies protocol buffer. 66 | """ 67 | anomalies_proto = anomalies_pb2.Anomalies() 68 | 69 | anomalies_proto.ParseFromString(io_util.read_file_to_string( 70 | input_path, binary_mode=True)) 71 | 72 | return anomalies_proto 73 | 74 | def _get_latest_execution(self, metadata, pipeline_name, component_id): 75 | """Gets the execution objects for the latest run of the pipeline.""" 76 | node_context = metadata.store.get_context_by_type_and_name( 77 | 'node', f'{pipeline_name}.{component_id}') 78 | executions = metadata.store.get_executions_by_context(node_context.id) 79 | # Pick the latest one. 80 | return max(executions, key=lambda e: e.last_update_time_since_epoch) 81 | 82 | #Get all running artifact details from MLMD 83 | def _get_artifacts_for_component_id(self, metadata, execution): 84 | return execution_lib.get_artifacts_dict(metadata, execution.id, 85 | [metadata_store_pb2.Event.OUTPUT]) 86 | 87 | #Get all running artifact directories from MLMD for later uses 88 | def get_artifacts_directories(self, component_name='StatisticsGen'): 89 | metadata_connection_config = metadata.sqlite_metadata_connection_config(self._metadata_db_root) 90 | 91 | with metadata.Metadata(metadata_connection_config) as metadata_handler: 92 | execution = self._get_latest_execution(metadata_handler, 'data_pipeline', component_name) 93 | output_directory = self._get_artifacts_for_component_id(metadata_handler, execution) 94 | 95 | return output_directory 96 | 97 | class TFAutoData(): 98 | def __init__(self): 99 | ##Define all constants 100 | self.features_list = [] #Features used for training 101 | self._train_data_path = '' #Training data path 102 | self.schema = '' #Schema details of data 103 | self.stats_train = '' #Statistics of Train data 104 | self.stats_eval = '' #Statistics of Eval data 105 | self.anom_train = '' 106 | self.anom_eval = '' 107 | self.file_headers = [] #Headers of CSV train file 108 | self._len_train = 0 #Training data size 109 | self._run = False #Run flag 110 | 111 | def collect_feature_details(self, schema): 112 | features_list = [] 113 | features_dict = display_util.get_schema_dataframe(schema)[0].to_dict('index') 114 | features_stats = MessageToDict(self.stats_train) 115 | self._len_train = features_stats['datasets'][0]['numExamples'] 116 | 117 | for f_ in features_dict.keys(): 118 | features_dict[f_]['feature'] = re.sub( r"\'", "", f_) 119 | #Feature has a domain( categorical feature ) 120 | if features_dict[f_]['Domain'] != '-': 121 | features_dict[f_]['categorical_values'] = [ v for v in tfdv.get_domain(schema, features_dict[f_]['feature']).value ] 122 | features_dict[f_]['num_categorical_values'] = len(tfdv.get_domain(schema, features_dict[f_]['feature']).value) 123 | 124 | #Handle for free Text( If ratio of unique values with rows > 0.2 ) 125 | if int(features_dict[f_]['num_categorical_values']) / int(self._len_train) > 0.2: 126 | features_dict[f_]['Type'] = 'STRING' 127 | features_dict[f_]['categorical_values'] = "" 128 | features_dict[f_]['num_categorical_values'] = 0 129 | else: 130 | features_dict[f_]['Type'] = 'CATEGORICAL' 131 | 132 | #Min/Max for numerical features 133 | #Count of unique values 134 | for feat in features_stats['datasets'][0]['features']: 135 | curr_feat = feat['path']['step'][0] 136 | if curr_feat == features_dict[f_]['feature'] and features_dict[f_]['Type'] in ['INT','FLOAT']: 137 | features_dict[f_]['min'] = feat['numStats'].get('min', 0.0) 138 | features_dict[f_]['max'] = feat['numStats'].get('max', 0.0) 139 | features_dict[f_]['mean'] = feat['numStats'].get('mean', 0.0) 140 | features_dict[f_]['std_dev'] = feat['numStats'].get('stdDev', 1) 141 | elif curr_feat == features_dict[f_]['feature'] and features_dict[f_]['Type'] == 'CATEGORICAL': 142 | features_dict[f_]['categorical_values_count'] = {} 143 | features_dict[f_]['categorical_values_count_total'] = 0 144 | #For each categorical value store counts 145 | for categ in features_dict[f_]['categorical_values']: 146 | categ_found = 0 147 | for topvals in feat['stringStats']['topValues']: 148 | if categ == topvals['value']: 149 | categ_found = 1 150 | features_dict[f_]['categorical_values_count'][topvals.get('value', "NA")] = topvals.get('frequency', 0) 151 | features_dict[f_]['categorical_values_count_total'] += topvals.get('frequency', 0) 152 | break 153 | #If value count for this category not present 154 | if categ_found == 0: 155 | features_dict[f_]['categorical_values_count'][categ] = 1 156 | features_dict[f_]['categorical_values_count_total'] += 1 157 | 158 | features_list.append(features_dict[f_]) 159 | self.features_list = features_list 160 | return self.features_list 161 | 162 | def get_columns_from_file_header(self, path, num_cols): 163 | record_defaults=[] 164 | #Create dataset input functions 165 | if os.path.isdir(path): 166 | path = path + "*" 167 | elif os.path.isfile(path): 168 | path = path 169 | 170 | for _ in range(num_cols): 171 | record_defaults.append('') 172 | 173 | # Create list of files that match pattern 174 | file_list = tf.io.gfile.glob(path) 175 | 176 | # Create dataset from file list 177 | dataset = tf.data.experimental.CsvDataset(file_list, header=False, record_defaults=record_defaults, use_quote_delim=False) 178 | 179 | for example in dataset.take(1): 180 | return ([e.numpy().decode('utf-8') for e in example]) 181 | 182 | def run_initial(self, _train_data_path, _test_data_path, _tfx_root, _metadata_db_root, tfautils, viz=False): 183 | """Run all data steps in pipeline and generate visuals""" 184 | input = proto.Input(splits=[ 185 | example_gen_pb2.Input.Split(name='train', pattern=os.path.join(_train_data_path, "*")), 186 | example_gen_pb2.Input.Split(name='eval', pattern=os.path.join(_test_data_path, "*")) 187 | ]) 188 | self.example_gen = CsvExampleGen(input_base="/", input_config=input) 189 | 190 | self.statistics_gen = StatisticsGen(examples=self.example_gen.outputs['examples']) 191 | 192 | self.infer_schema = SchemaGen( 193 | statistics=self.statistics_gen.outputs['statistics'], infer_feature_shape=False) 194 | 195 | self.validate_stats = ExampleValidator( 196 | statistics=self.statistics_gen.outputs['statistics'], 197 | schema=self.infer_schema.outputs['schema']) 198 | 199 | #Create pipeline 200 | self.pipeline = pipeline.Pipeline( 201 | pipeline_name= 'data_pipeline', 202 | pipeline_root= _tfx_root, 203 | components=[ 204 | self.example_gen, self.statistics_gen, self.infer_schema, self.validate_stats 205 | ], 206 | metadata_connection_config = metadata.sqlite_metadata_connection_config(_metadata_db_root), 207 | enable_cache=True, 208 | beam_pipeline_args=['--direct_num_workers=%d' % 0, '--direct_running_mode=multi_threading'], 209 | ) 210 | 211 | #Run data pipeline 212 | print("Data: Pipeline execution started...") 213 | LocalDagRunner().run(self.pipeline) 214 | self._run = True 215 | 216 | #Get directories after run 217 | dir_stats = tfautils.get_artifacts_directories('StatisticsGen') 218 | dir_anom = tfautils.get_artifacts_directories('ExampleValidator') 219 | dir_schema = tfautils.get_artifacts_directories('SchemaGen') 220 | 221 | #Get statistics 222 | stats_url_train = str(dir_stats['statistics'][0].uri) + "/Split-train/FeatureStats.pb" 223 | self.stats_train = tfdv.load_stats_binary(stats_url_train) 224 | stats_url_eval = str(dir_stats['statistics'][0].uri) + "/Split-eval/FeatureStats.pb" 225 | self.stats_eval = tfdv.load_stats_binary(stats_url_eval) 226 | 227 | #Get data anomalies 228 | anom_url_train = str(dir_anom['anomalies'][0].uri) + "/Split-train/SchemaDiff.pb" 229 | self.anom_train = tfautils.load_anomalies_binary(anom_url_train) 230 | anom_url_eval = str(dir_anom['anomalies'][0].uri) + "/Split-eval/SchemaDiff.pb" 231 | self.anom_eval = tfautils.load_anomalies_binary(anom_url_eval) 232 | 233 | #Get Schema and Features details, generate config JSON 234 | schema_url = str(dir_schema['schema'][0].uri) + "/schema.pbtxt" 235 | self.schema = tfdv.load_schema_text(schema_url) 236 | self.features_list = self.collect_feature_details(self.schema) 237 | 238 | #Get columns from training file 239 | self.file_headers = self.get_columns_from_file_header(_train_data_path, len(self.features_list)) 240 | 241 | # Visualize results using TFDV 242 | if viz==True: 243 | #Show Schema Gen 244 | print("\n### Generating schema visuals") 245 | tfdv.display_schema(self.schema) 246 | 247 | #Show Train Vs Eval Schema Stats 248 | print("\n### Generating Comparative Statistics Visuals...") 249 | tfdv.visualize_statistics(lhs_statistics=self.stats_eval, rhs_statistics=self.stats_train, 250 | lhs_name='EVAL_DATASET', rhs_name='TRAIN_DATASET') 251 | 252 | #Show Eval Anomalies 253 | print("\n### Generating Test Data Anomaly Visuals...") 254 | tfdv.display_anomalies(self.anom_eval) 255 | 256 | return self.pipeline 257 | 258 | class TextEncoder(tf.keras.Model): 259 | def __init__(self, strategy, trainable=False): 260 | self.strategy = strategy 261 | with self.strategy.scope(): 262 | super(TextEncoder, self).__init__() 263 | self.encoder = hub.KerasLayer("https://tfhub.dev/google/tf2-preview/nnlm-en-dim50/1", trainable=trainable) 264 | 265 | def __call__(self, inp): 266 | with self.strategy.scope(): 267 | #Encode text 268 | embedding = self.encoder(inp) 269 | 270 | return embedding 271 | 272 | class TFAutoModel(): 273 | def __init__(self, _tfx_root, train_data_path, test_data_path): 274 | ##Define all constants 275 | self._tfx_root = _tfx_root 276 | self._config_json = '' 277 | self._pipeline_root = os.path.join(self._tfx_root, 'pipelines'); # Join ~/tfx/pipelines/ 278 | self._log_root = os.path.join(self._tfx_root, 'logs'); 279 | self._model_root = os.path.join(self._tfx_root, 'model'); 280 | self._label = '' #Label 281 | self._label_vocab = [] 282 | self._features = [] #List of features to be used for modeling 283 | self._class_weights = {} #Class weights 284 | self._train_data_path = train_data_path #Training data 285 | self._test_data_path = test_data_path #Test data 286 | self._model_type = '' 287 | self._model_complexity = 1 288 | self._defaults = [] 289 | self._run = False #Run flag 290 | self.hpt_config = { 291 | 0:{ 292 | 'deep_neurons':{ 293 | 'min':64, 294 | 'max':64 295 | }, 296 | 'wide_neurons':{ 297 | 'min':64, 298 | 'max':64 299 | }, 300 | 'prefinal_dense': { 301 | 'min':32, 302 | 'max':32 303 | }, 304 | 'learning_rate':{ 305 | 'min':0.01, 306 | 'max':0.01 307 | }, 308 | 'bins':{ 309 | 'min':10, 310 | 'max':10 311 | }, 312 | 'l1_regularization':{ 313 | 'min':0.0001, 314 | 'max':0.0001 315 | }, 316 | }, 317 | 1:{ 318 | 'deep_neurons':{ 319 | 'min':8, 320 | 'max':1024 321 | }, 322 | 'wide_neurons':{ 323 | 'min':32, 324 | 'max':2048 325 | }, 326 | 'prefinal_dense': { 327 | 'min':8, 328 | 'max':512 329 | }, 330 | 'learning_rate':{ 331 | 'min':0.0005, 332 | 'max':0.1 333 | }, 334 | 'bins':{ 335 | 'min':4, 336 | 'max':40 337 | }, 338 | 'l1_regularization':{ 339 | 'min':0.00001, 340 | 'max':0.0001 341 | }, 342 | }, 343 | } 344 | self.hpt_config[2] = self.hpt_config[1] 345 | 346 | ##GPU Strategy 347 | self.strategy = tf.distribute.MirroredStrategy() 348 | 349 | def load_config_json(self): 350 | with open(os.path.join(self._tfx_root, 'config.json')) as f: 351 | self._config_json = json.load(f) 352 | 353 | #Create list of features 354 | for feats in self._config_json['data_schema']: 355 | #Don't include ignored features 356 | if feats['feature'] in self._config_json['ignore_features'] or feats['feature'] == self._label: 357 | continue 358 | else: 359 | self._features.append(feats['feature']) 360 | 361 | def make_input_fn(self, filename, mode, vnum_epochs = None, batch_size = 512): 362 | CSV_COLUMNS = [ feats['feature'] for feats in self._config_json['data_schema'] ] 363 | LABEL_COLUMN = self._label 364 | 365 | # Set default values for required only CSV columns + LABEL 366 | # This has to be in sequence of CSV columns 0 to N 367 | DEFAULTS = [] 368 | for f_ in self._config_json['file_headers']: 369 | for feats in self._config_json['data_schema']: 370 | if feats['feature'] != f_: 371 | continue 372 | 373 | #Logic for default values 374 | if feats['Type'] in [ 'CATEGORICAL', 'STRING', 'BYTES' ]: 375 | DEFAULTS.append(['']) 376 | elif feats['Type'] == 'FLOAT': 377 | DEFAULTS.append([tf.cast(0, tf.float32)]) 378 | elif feats['Type'] == 'INT': 379 | DEFAULTS.append([tf.cast(0, tf.int64)]) 380 | 381 | # print("Default for {} is {}".format(f_, DEFAULTS[-1])) 382 | 383 | break 384 | 385 | self._defaults = DEFAULTS 386 | 387 | ############################### 388 | ##Feature engineering functions 389 | def feature_engg_features(features): 390 | #Apply data type conversions 391 | for feats in self._config_json['data_schema']: 392 | if feats['feature'] == self._label or not feats['feature'] in self._features: 393 | continue 394 | 395 | #Convert dtype of all tensors as per requested schema 396 | if feats['Type'] in [ 'CATEGORICAL', 'STRING', 'BYTES' ] and features[feats['feature']].dtype != tf.string: 397 | features[feats['feature']] = tf.strings.as_string(features[feats['feature']]) 398 | elif feats['Type'] == 'FLOAT' and features[feats['feature']].dtype != tf.float32: 399 | #Needs special handling for strings 400 | if features[feats['feature']].dtype != tf.string: 401 | features[feats['feature']] = tf.cast(features[feats['feature']], dtype=tf.float32) 402 | elif feats['Type'] == 'INT' and features[feats['feature']].dtype != tf.int64: 403 | #Needs special handling for strings 404 | if features[feats['feature']].dtype != tf.string: 405 | features[feats['feature']] = tf.cast(features[feats['feature']], dtype=tf.int64) 406 | 407 | return(features) 408 | 409 | #Convert string labels 0 to N 410 | def string_labels_to_num(v_feats, v_label): 411 | pass_label = v_label 412 | for ix, cat_vals in enumerate(v_feats['categorical_values']): 413 | search_string = str(cat_vals) + "$" 414 | pass_label = tf.strings.regex_replace(pass_label, search_string, tf.strings.as_string(ix)) 415 | 416 | pass_label = tf.strings.to_number(pass_label, out_type=tf.dtypes.int32) 417 | return pass_label 418 | 419 | #To be called from TF 420 | def feature_engg(features, label): 421 | #Add new features 422 | features = feature_engg_features(features) 423 | 424 | #Replace string label with 0 to N for classification case 425 | if self._model_type == 'CLASSIFICATION': 426 | for feats in self._config_json['data_schema']: 427 | if feats['feature'] == self._label: 428 | break 429 | 430 | #Replace categorical values with 0 to N 431 | if feats['Type'] == 'CATEGORICAL' and feats['feature'] == self._label: 432 | self._label_vocab = feats['categorical_values'] 433 | label = string_labels_to_num(feats, label) 434 | 435 | return(features, label) 436 | 437 | def _input_fn(v_test=False): 438 | # Create list of files that match pattern 439 | file_list = tf.io.gfile.glob(filename) 440 | 441 | if mode == tf.estimator.ModeKeys.TRAIN: 442 | num_epochs = vnum_epochs # indefinitely 443 | else: 444 | num_epochs = 1 # end-of-input after this 445 | 446 | # Create dataset from file list 447 | dataset = tf.data.experimental.make_csv_dataset(file_list, 448 | batch_size=batch_size, 449 | column_names=self._config_json['file_headers'], 450 | column_defaults=DEFAULTS, 451 | label_name=LABEL_COLUMN, 452 | num_epochs = num_epochs, 453 | num_parallel_reads=30) 454 | 455 | dataset = dataset.prefetch(buffer_size = batch_size) 456 | 457 | #Feature engineering 458 | dataset = dataset.map(feature_engg) 459 | 460 | if mode == tf.estimator.ModeKeys.TRAIN: 461 | num_epochs = vnum_epochs # indefinitely 462 | dataset = dataset.shuffle(buffer_size = batch_size) 463 | else: 464 | num_epochs = 1 # end-of-input after this 465 | 466 | dataset = dataset.repeat(num_epochs) 467 | 468 | #Begins - Uncomment for testing only -----------------------------------------------------< 469 | if v_test == True: 470 | print(next(dataset.__iter__())) 471 | 472 | #End - Uncomment for testing only -----------------------------------------------------< 473 | return dataset 474 | return _input_fn 475 | 476 | # def make_input_fn_gz(self, dir_uri, mode, vnum_epochs = None, batch_size = 512): 477 | # def decode_tfr(serialized_example): 478 | # schema = {} 479 | # features = {} 480 | # for feats in self._config_json['data_schema']: 481 | # #1. Create GZIP TFR parser schema 482 | # # if feats['Type'] == 'CATEGORICAL': 483 | # # schema[feats['feature']] = tf.io.FixedLenFeature([], tf.string, default_value="") 484 | # # elif feats['Type'] == 'FLOAT': 485 | # # schema[feats['feature']] = tf.io.FixedLenFeature([], tf.float32, default_value=0.0 ) 486 | # # elif feats['Type'] == 'INT': 487 | # # schema[feats['feature']] = tf.io.FixedLenFeature([], tf.int64, default_value=0) 488 | 489 | # if feats['Type'] == 'CATEGORICAL': 490 | # schema[feats['feature']] = tf.io.VarLenFeature(tf.string) 491 | # elif feats['Type'] == 'FLOAT': 492 | # schema[feats['feature']] = tf.io.VarLenFeature(tf.float32) 493 | # elif feats['Type'] == 'INT': 494 | # schema[feats['feature']] = tf.io.VarLenFeature(tf.int64) 495 | 496 | # # 1. define a parser 497 | # features = tf.io.parse_example( 498 | # serialized_example, 499 | # # Defaults are not specified since both keys are required. 500 | # features=schema) 501 | 502 | # return features, features[self._label] 503 | 504 | # def _input_fn(v_test=False): 505 | # # Get the list of files in this directory (all compressed TFRecord files) 506 | # tfrecord_filenames = tf.io.gfile.glob(dir_uri) 507 | 508 | # # Create a `TFRecordDataset` to read these files 509 | # dataset = tf.data.TFRecordDataset(tfrecord_filenames, compression_type="GZIP") 510 | 511 | # if mode == tf.estimator.ModeKeys.TRAIN: 512 | # num_epochs = vnum_epochs # indefinitely 513 | # else: 514 | # num_epochs = 1 # end-of-input after this 515 | 516 | # dataset = dataset.batch(batch_size) 517 | # dataset = dataset.prefetch(buffer_size = batch_size) 518 | 519 | # #Convert TFRecord data to dict 520 | # dataset = dataset.map(decode_tfr) 521 | 522 | # #Feature engineering 523 | # # dataset = dataset.map(feature_engg) 524 | 525 | # if mode == tf.estimator.ModeKeys.TRAIN: 526 | # num_epochs = vnum_epochs # indefinitely 527 | # dataset = dataset.shuffle(buffer_size = batch_size) 528 | # else: 529 | # num_epochs = 1 # end-of-input after this 530 | 531 | # dataset = dataset.repeat(num_epochs) 532 | 533 | # #Begins - Uncomment for testing only -----------------------------------------------------< 534 | # if v_test == True: 535 | # print(next(dataset.__iter__())) 536 | 537 | # #End - Uncomment for testing only -----------------------------------------------------< 538 | # return dataset 539 | # return _input_fn 540 | 541 | def create_feature_cols(self): 542 | #Keras format features 543 | feats_dict = {} 544 | keras_dict_input = {} 545 | for feats in self._config_json['data_schema']: 546 | #Only include features 547 | if feats['feature'] not in self._features: 548 | continue 549 | 550 | #Create feature columns list 551 | if feats['Type'] in [ 'CATEGORICAL', 'STRING', 'BYTES' ]: 552 | feats_dict[feats['feature']] = tf.keras.Input(name=feats['feature'], shape=(1,), dtype=tf.string) 553 | elif feats['Type'] == 'FLOAT': 554 | feats_dict[feats['feature']] = tf.keras.Input(name=feats['feature'], shape=(1,), dtype=tf.float32) 555 | elif feats['Type'] == 'INT': 556 | feats_dict[feats['feature']] = tf.keras.Input(name=feats['feature'], shape=(1,), dtype=tf.int32) 557 | 558 | for k_ in feats_dict.keys(): 559 | keras_dict_input[k_] = feats_dict[k_] 560 | 561 | self._feature_cols = {'K' : keras_dict_input} 562 | return self._feature_cols 563 | 564 | def create_keras_model_classification(self, hp): 565 | with self.strategy.scope(): 566 | # params = self.params_default 567 | feature_cols = self._feature_cols 568 | 569 | #Number of classes 570 | for feats in self._config_json['data_schema']: 571 | #Only include label 572 | if feats['feature'] == self._label: 573 | if feats['Type'] != 'CATEGORICAL': 574 | num_classes = int(feats['max'] + 1) 575 | break 576 | else: 577 | num_classes = len(feats['categorical_values']) 578 | break 579 | 580 | #Calculate class weights 581 | for feats in self._config_json['data_schema']: 582 | #Only include label 583 | if feats['feature'] == self._label: 584 | if feats.get('categorical_values', 'NA') != 'NA': 585 | #In case of categorical labels 586 | for ix, class_ in enumerate(feats['categorical_values']): 587 | self._class_weights[ix] = (1 / feats['categorical_values_count'][class_]) * (feats['categorical_values_count_total'])/2.0 588 | else: 589 | #In case of numerical labels 590 | for class_ in range(num_classes): 591 | self._class_weights[class_] = 1 592 | 593 | METRICS = [ 594 | # tf.keras.metrics.AUC(multi_label=True, num_labels=num_classes), 595 | 'sparse_categorical_accuracy' 596 | ] 597 | 598 | #Input layers 599 | input_feats = [] 600 | for inp in feature_cols['K'].keys(): 601 | input_feats.append(feature_cols['K'][inp]) 602 | 603 | ##Input processing 604 | ##https://keras.io/examples/structured_data/structured_data_classification_from_scratch/ 605 | ##https://github.com/tensorflow/community/blob/master/rfcs/20191212-keras-categorical-inputs.md 606 | 607 | ##Automated feature handling 608 | #Handle categorical attributes( One-hot encoding ) 609 | feat_cat = [] 610 | for feats in self._config_json['data_schema']: 611 | if feats['feature'] in self._features and feats['Type'] == 'CATEGORICAL': 612 | feat_cat.append('') 613 | cat_len = feats['num_categorical_values'] 614 | cat = tf.keras.layers.experimental.preprocessing.StringLookup(vocabulary=feats['categorical_values'], mask_token=None, oov_token = '~UNK~')(feature_cols['K'][feats['feature']]) 615 | feat_cat[-1] = tf.keras.layers.experimental.preprocessing.CategoryEncoding(num_tokens = cat_len + 1)(cat) 616 | 617 | #Handle numerical attributes 618 | feat_numeric = [] 619 | for feats in self._config_json['data_schema']: 620 | if feats['feature'] in self._features and feats['Type'] not in [ 'CATEGORICAL', 'STRING', 'BYTES' ]: 621 | feat_numeric.append('') 622 | 623 | #Apply normalization 624 | if feats['std_dev'] != 0: 625 | feat_numeric[-1] = ( tf.cast(feature_cols['K'][feats['feature']], tf.float32) - feats['mean'] ) / feats['std_dev'] 626 | else: 627 | feat_numeric[-1] = tf.cast(feature_cols['K'][feats['feature']], tf.float32) 628 | 629 | #More feature engineering( Squaring ) 630 | feat_numeric.append('') 631 | feat_numeric[-1] = tf.math.pow(feat_numeric[-2], 2) 632 | 633 | #Apply min-max scaling 634 | if feats['max'] - feats['min'] != 0: 635 | feat_numeric.append('') 636 | feat_numeric[-1] = ( tf.cast(feature_cols['K'][feats['feature']], tf.float32) - feats['min'] ) / ( feats['max'] - feats['min'] ) 637 | 638 | ##SPECIAL HANDLING CONVERT NUMERIC TO CATEG 639 | #Bucketization( 2 to 40 ) 640 | feat_cat.append('') 641 | no_of_bins = hp.Int('bins_' + feats['feature'], min_value=self.hpt_config[self._model_complexity]['bins']['min'], 642 | max_value=self.hpt_config[self._model_complexity]['bins']['max']) 643 | bins = tf.linspace(feats['min'], feats['max'], no_of_bins) 644 | layer_discretization = tf.keras.layers.Discretization(bin_boundaries=bins)(feature_cols['K'][feats['feature']]) 645 | feat_cat[-1] = tf.keras.layers.experimental.preprocessing.CategoryEncoding(num_tokens = no_of_bins + 2)(layer_discretization) 646 | 647 | #Handle Text attributes 648 | feat_text = [] 649 | if self._model_complexity < 2: 650 | #Without fine-tuning 651 | text_emb = TextEncoder(self.strategy) 652 | else: 653 | #With fine-tuning 654 | text_emb = TextEncoder(self.strategy, trainable=True) 655 | 656 | for feats in self._config_json['data_schema']: 657 | if feats['Type'] in ['STRING', 'BYTES']: 658 | feat_text.append('') 659 | 660 | #Apply Text Encoding from TFHub 661 | feat_text[-1] = text_emb(tf.reshape(tf.cast(feature_cols['K'][feats['feature']], tf.string), [-1])) 662 | 663 | ###Create MODEL 664 | ####Concatenate all features( Numerical input ) 665 | numeric_features_count = 0 666 | if len(feat_numeric) > 0: 667 | numeric_features_count += 1 668 | x_input_numeric = tf.keras.layers.concatenate(feat_numeric) 669 | 670 | #DEEP - This Dense layer connects to input layer - Numeric Data 671 | deep_neurons = hp.Int('deep_neurons', min_value=self.hpt_config[self._model_complexity]['deep_neurons']['min'], 672 | max_value=self.hpt_config[self._model_complexity]['deep_neurons']['max']) 673 | x_numeric = tf.keras.layers.Dense(deep_neurons, kernel_initializer='lecun_normal', 674 | activation='selu')(x_input_numeric) 675 | x_numeric = tf.keras.layers.BatchNormalization()(x_numeric) 676 | 677 | ####Concatenate all Categorical features( Categorical converted ) 678 | text_features_count = 0 679 | if len(feat_text) > 0: 680 | text_features_count += 1 681 | x_input_text = tf.keras.layers.concatenate(feat_text) 682 | 683 | ####Concatenate all Categorical features( Categorical converted ) 684 | categ_features_count = 0 685 | if len(feat_cat) > 0: 686 | categ_features_count += 1 687 | x_input_categ = tf.keras.layers.concatenate(feat_cat) 688 | 689 | #WIDE - This Dense layer connects to input layer - Categorical Data 690 | wide_neurons = hp.Int('wide_neurons', min_value=self.hpt_config[self._model_complexity]['wide_neurons']['min'], 691 | max_value=self.hpt_config[self._model_complexity]['wide_neurons']['max']) 692 | x_categ = tf.keras.layers.Dense(wide_neurons, kernel_initializer='lecun_normal', 693 | activation='selu')(x_input_categ) 694 | 695 | ####Concatenate both Wide and Deep layers 696 | if numeric_features_count > 0 and categ_features_count > 0 and text_features_count > 0: 697 | x = tf.keras.layers.concatenate([x_numeric, x_categ, x_input_text]) 698 | elif numeric_features_count == 0 and categ_features_count > 0 and text_features_count > 0: 699 | x = tf.keras.layers.concatenate([x_categ, x_input_text]) 700 | elif numeric_features_count > 0 and categ_features_count == 0 and text_features_count > 0: 701 | x = tf.keras.layers.concatenate([x_numeric, x_input_text]) 702 | elif numeric_features_count > 0 and categ_features_count > 0 and text_features_count == 0: 703 | x = tf.keras.layers.concatenate([x_numeric, x_categ]) 704 | elif numeric_features_count > 0 and categ_features_count == 0 and text_features_count == 0: 705 | x = x_numeric 706 | elif numeric_features_count == 0 and categ_features_count > 0 and text_features_count == 0: 707 | x = x_categ 708 | elif numeric_features_count == 0 and categ_features_count == 0 and text_features_count > 0: 709 | x = x_input_text 710 | 711 | prefinal_dense = hp.Int('prefinal_dense', min_value=self.hpt_config[self._model_complexity]['prefinal_dense']['min'], 712 | max_value=self.hpt_config[self._model_complexity]['prefinal_dense']['max']) 713 | l1_reg = hp.Float('l1_regularization', min_value=self.hpt_config[self._model_complexity]['l1_regularization']['min'], 714 | max_value=self.hpt_config[self._model_complexity]['l1_regularization']['max']) 715 | x = tf.keras.layers.Dense(prefinal_dense, kernel_initializer='lecun_normal', 716 | activation='selu', 717 | activity_regularizer=tf.keras.regularizers.l2(l1_reg))(x) 718 | x = tf.keras.layers.BatchNormalization()(x) 719 | 720 | #Final Layer 721 | # out = tf.keras.layers.Dense(1, activation='sigmoid', name='out')(x) 722 | out = tf.keras.layers.Dense(num_classes, activation='softmax', name='out')(x) 723 | model = tf.keras.Model(input_feats, out) 724 | 725 | #Set optimizer 726 | hp_learning_rate = hp.Float('learning_rate', min_value=self.hpt_config[self._model_complexity]['learning_rate']['min'], 727 | max_value=self.hpt_config[self._model_complexity]['learning_rate']['max']) 728 | opt = tf.keras.optimizers.Adam(lr = hp_learning_rate) 729 | 730 | #Compile model 731 | model.compile(loss='sparse_categorical_crossentropy', optimizer=opt, metrics = METRICS) 732 | 733 | return model 734 | 735 | def create_keras_model_regression(self, hp): 736 | with self.strategy.scope(): 737 | METRICS = [ 738 | keras.metrics.RootMeanSquaredError(name='rmse'), 739 | keras.metrics.MeanAbsolutePercentageError(name='mape') 740 | ] 741 | 742 | # params = self.params_default 743 | feature_cols = self._feature_cols 744 | 745 | #Input layers 746 | input_feats = [] 747 | for inp in feature_cols['K'].keys(): 748 | input_feats.append(feature_cols['K'][inp]) 749 | 750 | ##Input processing 751 | ##https://keras.io/examples/structured_data/structured_data_classification_from_scratch/ 752 | ##https://github.com/tensorflow/community/blob/master/rfcs/20191212-keras-categorical-inputs.md 753 | 754 | ##Automated feature handling 755 | #Handle categorical attributes( One-hot encoding ) 756 | feat_cat = [] 757 | for feats in self._config_json['data_schema']: 758 | if feats['feature'] in self._features and feats['Type'] == 'CATEGORICAL': 759 | feat_cat.append('') 760 | cat_len = feats['num_categorical_values'] 761 | cat = tf.keras.layers.experimental.preprocessing.StringLookup(vocabulary=feats['categorical_values'], mask_token=None, oov_token = '~UNK~')(feature_cols['K'][feats['feature']]) 762 | feat_cat[-1] = tf.keras.layers.experimental.preprocessing.CategoryEncoding(num_tokens = cat_len + 1)(cat) 763 | 764 | #Handle numerical attributes 765 | feat_numeric = [] 766 | for feats in self._config_json['data_schema']: 767 | if feats['feature'] in self._features and feats['Type'] not in [ 'CATEGORICAL', 'STRING', 'BYTES' ]: 768 | feat_numeric.append('') 769 | 770 | #apply normalization 771 | if feats['std_dev'] != 0: 772 | feat_numeric[-1] = ( tf.cast(feature_cols['K'][feats['feature']], tf.float32) - feats['mean'] ) / feats['std_dev'] 773 | else: 774 | feat_numeric[-1] = tf.cast(feature_cols['K'][feats['feature']], tf.float32) 775 | 776 | ##More feature engineering( Squaring ) 777 | feat_numeric.append('') 778 | feat_numeric[-1] = tf.math.pow(feat_numeric[-2], 2) 779 | 780 | #Apply min-max scaling 781 | if feats['max'] - feats['min'] != 0: 782 | feat_numeric.append('') 783 | feat_numeric[-1] = ( tf.cast(feature_cols['K'][feats['feature']], tf.float32) - feats['min'] ) / ( feats['max'] - feats['min'] ) 784 | 785 | ##SPECIAL HANDLING CONVERT NUMERIC TO CATEG 786 | #Bucketization( 2 to 40 ) 787 | feat_cat.append('') 788 | no_of_bins = hp.Int('bins_' + feats['feature'], min_value=self.hpt_config[self._model_complexity]['bins']['min'], 789 | max_value=self.hpt_config[self._model_complexity]['bins']['max']) 790 | bins = tf.linspace(feats['min'], feats['max'], no_of_bins) 791 | layer_discretization = tf.keras.layers.Discretization(bin_boundaries=bins)(feature_cols['K'][feats['feature']]) 792 | feat_cat[-1] = tf.keras.layers.experimental.preprocessing.CategoryEncoding(num_tokens = no_of_bins + 2)(layer_discretization) 793 | 794 | #Handle Text attributes 795 | feat_text = [] 796 | if self._model_complexity < 2: 797 | #Without fine-tuning 798 | text_emb = TextEncoder(self.strategy) 799 | else: 800 | #With fine-tuning 801 | text_emb = TextEncoder(self.strategy, trainable=True) 802 | 803 | for feats in self._config_json['data_schema']: 804 | if feats['Type'] in ['STRING', 'BYTES']: 805 | feat_text.append('') 806 | 807 | #Apply Text Encoding from TFHub 808 | feat_text[-1] = text_emb(tf.reshape(tf.cast(feature_cols['K'][feats['feature']], tf.string), [-1])) 809 | 810 | ###Create MODEL 811 | ####Concatenate all features( Numerical input ) 812 | numeric_features_count = 0 813 | if len(feat_numeric) > 0: 814 | numeric_features_count += 1 815 | x_input_numeric = tf.keras.layers.concatenate(feat_numeric) 816 | 817 | #DEEP - This Dense layer connects to input layer - Numeric Data 818 | deep_neurons = hp.Int('deep_neurons', min_value=self.hpt_config[self._model_complexity]['deep_neurons']['min'], 819 | max_value=self.hpt_config[self._model_complexity]['deep_neurons']['max']) 820 | x_numeric = tf.keras.layers.Dense(deep_neurons, kernel_initializer='lecun_normal', 821 | activation='selu')(x_input_numeric) 822 | x_numeric = tf.keras.layers.BatchNormalization()(x_numeric) 823 | 824 | ####Concatenate all Categorical features( Categorical converted ) 825 | text_features_count = 0 826 | if len(feat_text) > 0: 827 | text_features_count += 1 828 | x_input_text = tf.keras.layers.concatenate(feat_text) 829 | 830 | ####Concatenate all Categorical features( Categorical converted ) 831 | categ_features_count = 0 832 | if len(feat_cat) > 0: 833 | categ_features_count += 1 834 | x_input_categ = tf.keras.layers.concatenate(feat_cat) 835 | 836 | #WIDE - This Dense layer connects to input layer - Categorical Data 837 | wide_neurons = hp.Int('wide_neurons', min_value=self.hpt_config[self._model_complexity]['wide_neurons']['min'], 838 | max_value=self.hpt_config[self._model_complexity]['wide_neurons']['max']) 839 | x_categ = tf.keras.layers.Dense(wide_neurons, kernel_initializer='lecun_normal', 840 | activation='selu')(x_input_categ) 841 | 842 | ####Concatenate both Wide and Deep layers 843 | if numeric_features_count > 0 and categ_features_count > 0 and text_features_count > 0: 844 | x = tf.keras.layers.concatenate([x_numeric, x_categ, x_input_text]) 845 | elif numeric_features_count == 0 and categ_features_count > 0 and text_features_count > 0: 846 | x = tf.keras.layers.concatenate([x_categ, x_input_text]) 847 | elif numeric_features_count > 0 and categ_features_count == 0 and text_features_count > 0: 848 | x = tf.keras.layers.concatenate([x_numeric, x_input_text]) 849 | elif numeric_features_count > 0 and categ_features_count > 0 and text_features_count == 0: 850 | x = tf.keras.layers.concatenate([x_numeric, x_categ]) 851 | elif numeric_features_count > 0 and categ_features_count == 0 and text_features_count == 0: 852 | x = x_numeric 853 | elif numeric_features_count == 0 and categ_features_count > 0 and text_features_count == 0: 854 | x = x_categ 855 | elif numeric_features_count == 0 and categ_features_count == 0 and text_features_count > 0: 856 | x = x_input_text 857 | 858 | prefinal_dense = hp.Int('prefinal_dense', min_value=self.hpt_config[self._model_complexity]['prefinal_dense']['min'], 859 | max_value=self.hpt_config[self._model_complexity]['prefinal_dense']['max']) 860 | l1_reg = hp.Float('l1_regularization', min_value=self.hpt_config[self._model_complexity]['l1_regularization']['min'], 861 | max_value=self.hpt_config[self._model_complexity]['l1_regularization']['max']) 862 | x = tf.keras.layers.Dense(prefinal_dense, kernel_initializer='lecun_normal', 863 | activation='selu', 864 | activity_regularizer=tf.keras.regularizers.l2(l1_reg))(x) 865 | x = tf.keras.layers.BatchNormalization()(x) 866 | 867 | #Final Layer 868 | out = tf.keras.layers.Dense(1, activation='linear', name='out')(x) 869 | model = tf.keras.Model(input_feats, out) 870 | 871 | #Set optimizer 872 | hp_learning_rate = hp.Float('learning_rate', min_value=self.hpt_config[self._model_complexity]['learning_rate']['min'], 873 | max_value=self.hpt_config[self._model_complexity]['learning_rate']['max']) 874 | opt = tf.keras.optimizers.Adam(lr = hp_learning_rate) 875 | 876 | #Compile model 877 | model.compile(loss='mean_squared_error', optimizer=opt, metrics = METRICS) 878 | 879 | return model 880 | 881 | def keras_train_and_evaluate(self, model, epochs=100, mode='Train'): 882 | #Add callbacks 883 | reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.2, 884 | patience=5, min_lr=0.00001, verbose = 1) 885 | 886 | tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir="./logs") 887 | 888 | #Create dataset input functions 889 | if os.path.isdir(self._train_data_path): 890 | train_file_path = self._train_data_path + "*" 891 | elif os.path.isfile(self._train_data_path): 892 | train_file_path = self._train_data_path 893 | 894 | if os.path.isdir(self._test_data_path): 895 | test_file_path = self._test_data_path + "*" 896 | elif os.path.isfile(self._test_data_path): 897 | test_file_path = self._test_data_path 898 | 899 | train_batch = 128 900 | train_dataset = self.make_input_fn(filename = train_file_path, 901 | mode = tf.estimator.ModeKeys.TRAIN, 902 | batch_size = train_batch)() 903 | # eval_file = '/content/tfauto/CsvExampleGen/examples/1/Split-train/*' 904 | # train_dataset = self.make_input_fn_gz(dir_uri = eval_file, 905 | # mode = tf.estimator.ModeKeys.TRAIN, 906 | # batch_size = 10)() 907 | 908 | train_steps_per_epoch = int(self._config_json['len_train']) // train_batch 909 | 910 | validation_dataset = self.make_input_fn(filename = test_file_path, 911 | mode = tf.estimator.ModeKeys.EVAL, 912 | batch_size = 512)() 913 | # validation_dataset = self.make_input_fn_gz(dir_uri = eval_file, 914 | # mode = tf.estimator.ModeKeys.TRAIN, 915 | # batch_size = 10)() 916 | 917 | #Train and Evaluate 918 | #Best model chosen from tuning is just refitted here on full data 919 | if mode == 'Train': 920 | if self._model_type == 'REGRESSION': 921 | print("Training a regression model...") 922 | elif self._model_type == 'CLASSIFICATION': 923 | print("Training a classification model...") 924 | 925 | #Start training loop 926 | self._model.fit(train_dataset, 927 | validation_data = validation_dataset, 928 | epochs=epochs, 929 | # validation_steps = 3, ###Keep this none for running evaluation on full EVAL data every epoch 930 | steps_per_epoch = train_steps_per_epoch, ###Has to be passed - Cant help it :) [ Number of batches per epoch ] 931 | callbacks=[reduce_lr, #modelsave_callback, #tensorboard_callback, 932 | keras.callbacks.EarlyStopping(patience=20, restore_best_weights=True, verbose=True)], 933 | class_weight = self._class_weights 934 | ) 935 | else: 936 | #Model is created during Tuning cycle 937 | if self._model_type == 'REGRESSION': 938 | print("Hyper-Tuning a regression model...") 939 | mod_func = self.create_keras_model_regression 940 | objective = 'val_loss' 941 | elif self._model_type == 'CLASSIFICATION': 942 | print("Hyper-Tuning a classification model...") 943 | mod_func = self.create_keras_model_classification 944 | objective = 'val_sparse_categorical_accuracy' 945 | 946 | ###Create Tuner 947 | ########################################### 948 | tuner = kt.Hyperband( 949 | mod_func, 950 | objective=objective, 951 | overwrite='True', 952 | max_epochs=20) 953 | stop_early = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=10) 954 | 955 | tuner.search(train_dataset, validation_data=validation_dataset, epochs=epochs, steps_per_epoch = train_steps_per_epoch, callbacks=[stop_early], 956 | class_weight = self._class_weights) 957 | # print(f""" 958 | # The hyperparameter search is complete. The optimal number of units in the first densely-connected 959 | # layer is {best_hps.get('units')} and the optimal learning rate for the optimizer 960 | # is {best_hps.get('learning_rate')}. 961 | # """) 962 | best_hps=tuner.get_best_hyperparameters(num_trials=1)[0] 963 | print("Best LR: ", best_hps.get('learning_rate')) 964 | self._model = tuner.hypermodel.build(best_hps) 965 | 966 | return self._model 967 | 968 | def save_model(self): 969 | version = "1" #{'serving_default': call_output} 970 | tf.saved_model.save( 971 | self._model, #Model 972 | self._model_root + "/" + version #Location 973 | ) 974 | 975 | def search_hpt(self): 976 | #Create feature columns dynamically and model too 977 | self._feature_cols = self.create_feature_cols() 978 | 979 | #Start HPT 980 | self._model = self.keras_train_and_evaluate(None, epochs=50, mode='Tune') 981 | 982 | #Print summary 983 | print(self._model.summary()) 984 | 985 | def start_train(self): 986 | #Create feature columns dynamically and model too 987 | self._feature_cols = self.create_feature_cols() 988 | 989 | #Start training loop on HPT best found model 990 | try: 991 | tf.keras.utils.plot_model(self._model, rankdir="LR") 992 | except: 993 | 1 - 1 994 | 995 | self._model = self.keras_train_and_evaluate(self._model, epochs=9999, mode='Train') 996 | 997 | #Save model 998 | self.save_model() 999 | 1000 | def generate_examples_for_wit(self): 1001 | max_samples = 100 1002 | examples = [] 1003 | record_defaults=[] 1004 | out = {} 1005 | path = self._test_data_path 1006 | #Create dataset input functions 1007 | if os.path.isdir(path): 1008 | path = path + "*" 1009 | elif os.path.isfile(path): 1010 | path = path 1011 | 1012 | # Create list of files that match pattern 1013 | file_list = tf.io.gfile.glob(path) 1014 | 1015 | # Create dataset from file list 1016 | dataset = tf.data.experimental.make_csv_dataset(file_list, header=True, batch_size=max_samples 1017 | ,num_epochs=1, column_defaults=self._defaults) 1018 | 1019 | #Get first batch 1020 | for features in dataset.take(1): 1021 | for i, (name, value) in enumerate(features.items()): 1022 | out[name] = value.numpy() 1023 | 1024 | #Generate examples 1025 | for row in range(max_samples): 1026 | try: 1027 | example = tf.train.Example() 1028 | #For each column in file 1029 | for f_ in self._config_json['file_headers']: 1030 | for feats in self._config_json['data_schema']: 1031 | if feats['feature'] != f_: 1032 | continue 1033 | 1034 | #Prepare example data 1035 | if feats['Type'] in [ 'CATEGORICAL', 'STRING', 'BYTES' ]: 1036 | example.features.feature[f_].bytes_list.value.append(out[f_][row]) 1037 | elif feats['Type'] == 'FLOAT': 1038 | example.features.feature[f_].float_list.value.append(out[f_][row]) 1039 | elif feats['Type'] == 'INT': 1040 | example.features.feature[f_].int64_list.value.append(int(out[f_][row])) 1041 | examples.append(example) 1042 | except: 1043 | pass 1044 | return examples 1045 | 1046 | #Create prediction function to link in WIT 1047 | def wit_prediction_fn_dyn(self, examples, check_mode=False): 1048 | version = "1" 1049 | out = [] 1050 | examples_out = [] 1051 | 1052 | #LOCAL: Predict using Keras prediction function 1053 | saved_mod = tf.saved_model.load(self._model_root + "/" + version) #Location) 1054 | 1055 | #Get prediction function from serving 1056 | mod_fn = saved_mod.signatures['serving_default'] 1057 | 1058 | for ex in examples: 1059 | #Extract features from each example 1060 | keyword_args = {} 1061 | test_data = ex.features 1062 | 1063 | for f_ in self._config_json['file_headers']: 1064 | if f_ == self._label or f_ in self._config_json['ignore_features'] : 1065 | continue 1066 | for feats in self._config_json['data_schema']: 1067 | if feats['feature'] != f_: 1068 | continue 1069 | 1070 | #Prepare example data 1071 | if feats['Type'] in [ 'CATEGORICAL', 'STRING', 'BYTES' ]: 1072 | keyword_args[f_] = tf.convert_to_tensor([test_data.feature[f_].bytes_list.value]) 1073 | elif feats['Type'] == 'FLOAT': 1074 | keyword_args[f_] = tf.convert_to_tensor([test_data.feature[f_].float_list.value]) 1075 | elif feats['Type'] == 'INT': 1076 | keyword_args[f_] = tf.convert_to_tensor([test_data.feature[f_].int64_list.value]) 1077 | 1078 | #Run prediction function on saved model 1079 | # print(keyword_args) 1080 | # break 1081 | try: 1082 | pred = mod_fn(**keyword_args) 1083 | 1084 | p_ = pred['out'].numpy() 1085 | out.append(p_[0]) 1086 | examples_out.append(ex) 1087 | except: 1088 | 1 - 1 1089 | 1090 | if check_mode==True: 1091 | #If we want to find also the valid examples 1092 | return out, examples_out 1093 | else: 1094 | return out 1095 | 1096 | def call_wit(self): 1097 | #Generate examples for WIT and also check for valid examples 1098 | examples_wit = self.generate_examples_for_wit() 1099 | _, examples_wit = self.wit_prediction_fn_dyn(examples_wit, check_mode=True) 1100 | 1101 | if self._model_type == 'REGRESSION': 1102 | wit_type = 'regression' 1103 | config_builder = (WitConfigBuilder(examples_wit, self._config_json['file_headers']) 1104 | .set_custom_predict_fn(self.wit_prediction_fn_dyn) 1105 | .set_model_type(wit_type)) 1106 | elif self._model_type == 'CLASSIFICATION': 1107 | wit_type = 'classification' 1108 | config_builder = (WitConfigBuilder(examples_wit, self._config_json['file_headers']) 1109 | .set_custom_predict_fn(self.wit_prediction_fn_dyn) 1110 | .set_model_type(wit_type) 1111 | .set_label_vocab(self._label_vocab)) 1112 | 1113 | WitWidget(config_builder) 1114 | 1115 | def prechecks(self): 1116 | '''Set of tests to run before training''' 1117 | success_flag = True 1118 | #Test 1 -> Label Data Type check 1119 | for feats in self._config_json['data_schema']: 1120 | if feats['feature'] != self._label: 1121 | continue 1122 | 1123 | # Only allow numerical values for REGRESSION models 1124 | if feats['Type'] in [ 'CATEGORICAL', 'STRING', 'BYTES' ] and self._model_type == "REGRESSION": 1125 | print("Error: REGRESSION - labels should be numerical only") 1126 | success_flag = False 1127 | return success_flag 1128 | 1129 | #Test 2 -> Label values check 1130 | for feats in self._config_json['data_schema']: 1131 | if feats['feature'] != self._label: 1132 | continue 1133 | 1134 | #For classification, minimum value should be 0 for INT labels 1135 | if self._model_type == "CLASSIFICATION" and feats['Type'] in ['INT', 'FLOAT']: 1136 | if int(feats['min']) != 0: 1137 | print("Error: CLASSIFICATION - Integer labels should start from 0") 1138 | success_flag = False 1139 | return success_flag 1140 | 1141 | return success_flag 1142 | 1143 | def run_initial(self, label_column, model_type='REGRESSION', model_complexity=1): 1144 | self.__init__(self._tfx_root, self._train_data_path, self._test_data_path) 1145 | """Run all modeling steps in pipeline and generate results""" 1146 | self._label = label_column 1147 | self._model_type = model_type 1148 | self._model_complexity = model_complexity 1149 | self.load_config_json() 1150 | self._run = True #Run flag 1151 | 1152 | #Prechecks 1153 | if self.prechecks() == False: 1154 | raise Exception("Error: Precheck failed for Training start") 1155 | 1156 | #Run HPT 1157 | self.search_hpt() 1158 | 1159 | #Run Trainining and Evaluation 1160 | self.start_train() 1161 | 1162 | class TFAuto(): 1163 | def __init__(self, train_data_path, test_data_path, path_root='/tfx'): 1164 | ''' 1165 | Initialize TFAuto engine 1166 | train_data_path: Path where Training data is stored 1167 | test_data_path: Path where Test / Eval data is stored 1168 | path_root: Directory for running TFAuto( Directory should NOT exist ) 1169 | ''' 1170 | ##Define all constants 1171 | self._tfx_root = os.path.join(os.getcwd(), path_root) 1172 | self._pipeline_root = os.path.join(self._tfx_root, 'pipelines'); # Join ~/tfx/pipelines/ 1173 | self._metadata_db_root = os.path.join(self._tfx_root, 'metadata.db'); # Join ~/tfx/metadata.db 1174 | self._metadata = os.path.join(self._tfx_root, 'metadata'); # Join ~/tfx/metadata 1175 | self._log_root = os.path.join(self._tfx_root, 'logs'); 1176 | self._model_root = os.path.join(self._tfx_root, 'model'); 1177 | self._train_data_path = train_data_path 1178 | self._test_data_path = test_data_path 1179 | 1180 | self._input_fn_module_file = 'inputfn_trainer.py' 1181 | self._constants_module_file = 'constants_trainer.py' 1182 | self._model_trainer_module_file = 'model_trainer.py' 1183 | 1184 | #Instantiate other services 1185 | self.tfautils = TFAutoUtils(data_path=train_data_path, path_root=path_root) 1186 | self.tfadata = TFAutoData() 1187 | self.tfamodel = TFAutoModel(self._tfx_root, train_data_path, test_data_path) 1188 | 1189 | #Create all required directories 1190 | self.tfautils.create_directories() 1191 | 1192 | #Set interactive context 1193 | # self.context = InteractiveContext(pipeline_root=self._tfx_root) 1194 | 1195 | #Output 1196 | print("TF initialized...") 1197 | print("All paths setup at {}".format(self._tfx_root)) 1198 | 1199 | def generate_config_json(self): 1200 | #Generate JSON for data modeling etc 1201 | config_dict = {} 1202 | config_json = os.path.join(self._tfx_root, 'config.json') 1203 | config_dict['root_path'] = self._tfx_root 1204 | config_dict['data_schema'] = self.tfadata.features_list 1205 | config_dict['len_train'] = self.tfadata._len_train 1206 | config_dict['ignore_features'] = ['ADD_FEATURES_TO_IGNORE_FROM_MODEL'] 1207 | config_dict['file_headers'] = list(self.tfadata.file_headers) 1208 | 1209 | #Write JSON file 1210 | with open(config_json, 'w') as fp: 1211 | json.dump(config_dict, fp, indent = 4) 1212 | 1213 | def step_data_explore(self, viz=False): 1214 | ''' 1215 | Method to automatically estimate schema of Data 1216 | Viz: (False) Is data visualization required ? 1217 | ''' 1218 | self.pipeline = self.tfadata.run_initial(self._train_data_path, self._test_data_path, self._tfx_root, self._metadata_db_root, self.tfautils, viz) 1219 | self.generate_config_json() 1220 | 1221 | def step_model_build(self, label_column, model_type='REGRESSION', model_complexity=1): 1222 | ''' 1223 | Method to automatically create models from data 1224 | Parameters 1225 | label_column: The feature to be used as Label 1226 | model_type: Either of 'REGRESSION', 'CLASSIFICATION' 1227 | model_complexity: 0 to 2 (0: Model without HPT, 1: Model with HPT, 2: Complexity 1 + Trainable Text Layer) 1228 | ''' 1229 | # #Run Modeling steps 1230 | if self.tfadata._run == True: 1231 | print("Success: Started AutoML Training") 1232 | self.tfamodel.run_initial(label_column, model_type, model_complexity) 1233 | else: 1234 | print("Error: Please run Step 1 - step_data_explore") 1235 | 1236 | print("Success: Model Training complete. Exported to: {}".format(self._model_root + "/")) 1237 | 1238 | def step_model_whatif(self): 1239 | ''' 1240 | Run What-IF tool for trained model 1241 | ''' 1242 | # #Run Modeling steps 1243 | if self.tfadata._run == True: 1244 | self.tfamodel.call_wit() 1245 | else: 1246 | print("Error: Please run Step 2 - step_model_build") 1247 | -------------------------------------------------------------------------------- /header.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rafiqhasan/auto-tensorflow/ec80d21d2c88f308f567a11f4b3d820c9af1aa42/header.png -------------------------------------------------------------------------------- /logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rafiqhasan/auto-tensorflow/ec80d21d2c88f308f567a11f4b3d820c9af1aa42/logo.png -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = [ 3 | "setuptools>=42", 4 | "wheel" 5 | ] 6 | build-backend = "setuptools.build_meta" 7 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | ############################################################################################ 2 | #Licensed under the Apache License, Version 2.0 (the "License"); 3 | #you may not use this file except in compliance with the License. 4 | #You may obtain a copy of the License at 5 | # 6 | # https://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | #Unless required by applicable law or agreed to in writing, software 9 | #distributed under the License is distributed on an "AS IS" BASIS, 10 | #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 | #See the License for the specific language governing permissions and 12 | #limitations under the License. 13 | ############################################################################################ 14 | import setuptools 15 | 16 | with open("README.md", "r", encoding="utf-8") as fh: 17 | long_description = fh.read() 18 | 19 | setuptools.setup( 20 | name="auto_tensorflow", 21 | version="1.3.4", 22 | author="Hasan Rafiq", 23 | description="""Build Low Code Automated Tensorflow, What-IF explainable models in just 3 lines of code. To make Deep Learning on Tensorflow absolutely easy for the masses with its low code framework and also increase trust on ML models through What-IF model explainability.""", 24 | long_description=long_description, 25 | long_description_content_type="text/markdown", 26 | license='Apache License 2.0', 27 | url="https://github.com/rafiqhasan/auto-tensorflow", 28 | packages = [ 29 | "auto_tensorflow" 30 | ], 31 | include_package_data=True, 32 | install_requires=[ 33 | "keras-tuner==1.0.4", 34 | "tensorflow_text==2.6.0", 35 | "tfx==1.4.0", 36 | "witwidget==1.8.0", 37 | "tensorflow==2.6.2", 38 | "tensorflow_hub==0.12.0", 39 | "tensorflow-metadata==1.4.0", 40 | "ipython==7.29.0", 41 | "tensorflow-estimator==2.6.0", 42 | "joblib==0.14.1", 43 | "tensorboard-plugin-wit==1.8.0", 44 | "tensorboard-data-server==0.6.1", 45 | "google-api-core==1.31.4", 46 | "google-cloud-aiplatform==1.10.0", 47 | "google-cloud==0.34.0", 48 | "apache-beam==2.34.0", 49 | "protobuf==3.19.5", 50 | "jupyterlab-widgets==3.0.3", 51 | "PyYAML==5.4.1", 52 | "pytz==2022.6", 53 | "tensorflow-model-analysis==0.35.0", 54 | "tensorflow-data-validation==1.4.0", 55 | "tensorboard==2.6.0", 56 | "six==1.15.0", 57 | "requests==2.28.1", 58 | "widgetsnbextension==3.6.1" 59 | ], 60 | classifiers=[ 61 | "Programming Language :: Python :: 3", 62 | "Operating System :: OS Independent", 63 | ], 64 | ) 65 | -------------------------------------------------------------------------------- /tutorials/TFAuto_|_Classification.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "nbformat": 4, 3 | "nbformat_minor": 0, 4 | "metadata": { 5 | "colab": { 6 | "name": "TFAuto | Demo and Library testing.ipynb", 7 | "provenance": [], 8 | "collapsed_sections": [], 9 | "authorship_tag": "ABX9TyMvDPKTWtiaNqhEWo/BgCXR", 10 | "include_colab_link": true 11 | }, 12 | "kernelspec": { 13 | "name": "python3", 14 | "display_name": "Python 3" 15 | }, 16 | "language_info": { 17 | "name": "python" 18 | } 19 | }, 20 | "cells": [ 21 | { 22 | "cell_type": "markdown", 23 | "metadata": { 24 | "id": "view-in-github", 25 | "colab_type": "text" 26 | }, 27 | "source": [ 28 | "\"Open" 29 | ] 30 | }, 31 | { 32 | "cell_type": "code", 33 | "metadata": { 34 | "id": "qfbOrnbYi1CF" 35 | }, 36 | "source": [ 37 | "# !pip install git+https://github.com/rafiqhasan/auto-tensorflow.git\n", 38 | "!pip install auto-tensorflow" 39 | ], 40 | "execution_count": null, 41 | "outputs": [] 42 | }, 43 | { 44 | "cell_type": "code", 45 | "metadata": { 46 | "id": "JPfi5hIckYxY" 47 | }, 48 | "source": [ 49 | "from auto_tensorflow.tfa import TFAuto" 50 | ], 51 | "execution_count": null, 52 | "outputs": [] 53 | }, 54 | { 55 | "cell_type": "markdown", 56 | "metadata": { 57 | "id": "UwPONJGzkSmE" 58 | }, 59 | "source": [ 60 | "### **Download data**" 61 | ] 62 | }, 63 | { 64 | "cell_type": "code", 65 | "metadata": { 66 | "id": "rbTX-qXJi7dd" 67 | }, 68 | "source": [ 69 | "!rm -rf data.*\n", 70 | "!rm -rf /content/*.png\n", 71 | "!rm -rf *trainer.py\n", 72 | "!rm -r /content/train_data\n", 73 | "!rm -r /content/test_data\n", 74 | "!rm -rf untitled_project\n", 75 | "!mkdir /content/train_data\n", 76 | "!mkdir /content/test_data\n", 77 | "!sudo rm -r /content/tfauto" 78 | ], 79 | "execution_count": null, 80 | "outputs": [] 81 | }, 82 | { 83 | "cell_type": "code", 84 | "metadata": { 85 | "id": "PY1LdrO-kWa3" 86 | }, 87 | "source": [ 88 | "# GRIR\n", 89 | "%%bash\n", 90 | "cd /content/train_data\n", 91 | "wget https://raw.githubusercontent.com/rafiqhasan/AI_DL_ML_Repo/master/Datasets/GRIR/train.csv\n", 92 | "\n", 93 | "cd ../test_data\n", 94 | "wget https://raw.githubusercontent.com/rafiqhasan/AI_DL_ML_Repo/master/Datasets/GRIR/eval.csv" 95 | ], 96 | "execution_count": null, 97 | "outputs": [] 98 | }, 99 | { 100 | "cell_type": "code", 101 | "metadata": { 102 | "id": "az8gJw7NlnCZ" 103 | }, 104 | "source": [ 105 | "##Initialize TFAuto with root and Data path\n", 106 | "tfa = TFAuto(train_data_path='/content/train_data/', test_data_path='/content/test_data/', path_root='/content/tfauto')" 107 | ], 108 | "execution_count": null, 109 | "outputs": [] 110 | }, 111 | { 112 | "cell_type": "code", 113 | "metadata": { 114 | "id": "_ddQF8rylpKS" 115 | }, 116 | "source": [ 117 | "##Step 1\n", 118 | "##Run Data setup -> Infer Schema, find anomalies, create profile and show viz\n", 119 | "tfa.step_data_explore(viz=False)" 120 | ], 121 | "execution_count": null, 122 | "outputs": [] 123 | }, 124 | { 125 | "cell_type": "code", 126 | "metadata": { 127 | "id": "NSj07bKPlryx" 128 | }, 129 | "source": [ 130 | "##Step 2\n", 131 | "##Run Model Training ->\n", 132 | "tfa.step_model_build(label_column = 'STATUS', model_type='CLASSIFICATION') ##--> Default model_complexity\n", 133 | "# tfa.step_model_build(label_column = 'STATUS', model_type='CLASSIFICATION', model_complexity = 0) ##--> Model_complexity = 0 ( Simple model - No HPT )" 134 | ], 135 | "execution_count": null, 136 | "outputs": [] 137 | }, 138 | { 139 | "cell_type": "code", 140 | "metadata": { 141 | "id": "NpOsECxjl3MH" 142 | }, 143 | "source": [ 144 | "##Step 3\n", 145 | "##Show model What-If Tool\n", 146 | "tfa.step_model_whatif()" 147 | ], 148 | "execution_count": null, 149 | "outputs": [] 150 | }, 151 | { 152 | "cell_type": "code", 153 | "metadata": { 154 | "id": "MJk0dJ1PmBeq" 155 | }, 156 | "source": [ 157 | "#Check signature\n", 158 | "!saved_model_cli show --dir \"/content/tfauto/model/1\" --all" 159 | ], 160 | "execution_count": null, 161 | "outputs": [] 162 | }, 163 | { 164 | "cell_type": "markdown", 165 | "metadata": { 166 | "id": "CDczgmy4knkU" 167 | }, 168 | "source": [ 169 | "## **Tensorflow Model Serving**" 170 | ] 171 | }, 172 | { 173 | "cell_type": "code", 174 | "metadata": { 175 | "id": "At1xykbBqiEZ" 176 | }, 177 | "source": [ 178 | "!apt-get remove tensorflow-model-server\n", 179 | "!echo \"deb http://storage.googleapis.com/tensorflow-serving-apt stable tensorflow-model-server tensorflow-model-server-universal\" | tee /etc/apt/sources.list.d/tensorflow-serving.list && \\\n", 180 | "curl https://storage.googleapis.com/tensorflow-serving-apt/tensorflow-serving.release.pub.gpg | apt-key add -\n", 181 | "!apt update\n", 182 | "\n", 183 | "!apt-get install tensorflow-model-server" 184 | ], 185 | "execution_count": null, 186 | "outputs": [] 187 | }, 188 | { 189 | "cell_type": "code", 190 | "metadata": { 191 | "id": "QgWjBqkvks3D" 192 | }, 193 | "source": [ 194 | "###Start Tensorflow server\n", 195 | "# %%bash --bg \n", 196 | "# export TF_CPP_MIN_VLOG_LEVEL=0\n", 197 | "\n", 198 | "%%bash --bg \n", 199 | "nohup tensorflow_model_server \\\n", 200 | " --rest_api_port=8501 \\\n", 201 | " --model_name=model \\\n", 202 | " --model_base_path=\"/content/tfauto/model\" >server.log 2>&1" 203 | ], 204 | "execution_count": null, 205 | "outputs": [] 206 | }, 207 | { 208 | "cell_type": "code", 209 | "metadata": { 210 | "id": "SU3J6T-zk1UR" 211 | }, 212 | "source": [ 213 | "!tail server.log" 214 | ], 215 | "execution_count": null, 216 | "outputs": [] 217 | }, 218 | { 219 | "cell_type": "code", 220 | "metadata": { 221 | "id": "BMG1-nA7k3fn" 222 | }, 223 | "source": [ 224 | "import json\n", 225 | "import requests\n", 226 | "\n", 227 | "#Create payload\n", 228 | "data_py = {\"inputs\":{'WERKS': [[\"ML01\"]],\n", 229 | " 'DIFGRIRD': [[-80]],\n", 230 | " 'SCENARIO': [[3]],\n", 231 | " 'TOTIRQTY': [[80]],\n", 232 | " 'VSTATU': [[1]],\n", 233 | " 'EKGRP': [[\"A\"]],\n", 234 | " 'TOTGRQTY': [[0]],\n", 235 | " 'VPATD': [[30]],\n", 236 | " 'EKORG': [[1]],\n", 237 | " 'NODLGR': [[0]],\n", 238 | " 'DIFGRIRV': [[-38100]],\n", 239 | " 'NODLIR': [[90]],\n", 240 | " 'KTOKK': [[1]]}}\n", 241 | "\n", 242 | "data = json.dumps(data_py)\n", 243 | "print(\"payload: \", data)\n", 244 | "\n", 245 | "#Run request on TMS\n", 246 | "headers = {\"content-type\": \"application/json\"}\n", 247 | "json_response = requests.post('http://localhost:8501/v1/models/model:predict', data=data, headers=headers)\n", 248 | "json_response.text" 249 | ], 250 | "execution_count": null, 251 | "outputs": [] 252 | }, 253 | { 254 | "cell_type": "code", 255 | "metadata": { 256 | "id": "W441stydlIh0" 257 | }, 258 | "source": [ 259 | "" 260 | ], 261 | "execution_count": null, 262 | "outputs": [] 263 | } 264 | ] 265 | } -------------------------------------------------------------------------------- /tutorials/TFAuto_|_Regression.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "nbformat": 4, 3 | "nbformat_minor": 0, 4 | "metadata": { 5 | "colab": { 6 | "name": "TFAuto | Demo and Library testing.ipynb", 7 | "provenance": [], 8 | "collapsed_sections": [], 9 | "authorship_tag": "ABX9TyMuQ65ItlfK8se1uePCq6eE", 10 | "include_colab_link": true 11 | }, 12 | "kernelspec": { 13 | "name": "python3", 14 | "display_name": "Python 3" 15 | }, 16 | "language_info": { 17 | "name": "python" 18 | } 19 | }, 20 | "cells": [ 21 | { 22 | "cell_type": "markdown", 23 | "metadata": { 24 | "id": "view-in-github", 25 | "colab_type": "text" 26 | }, 27 | "source": [ 28 | "\"Open" 29 | ] 30 | }, 31 | { 32 | "cell_type": "code", 33 | "metadata": { 34 | "id": "qfbOrnbYi1CF" 35 | }, 36 | "source": [ 37 | "# !pip install git+https://github.com/rafiqhasan/auto-tensorflow.git\n", 38 | "!pip install auto-tensorflow" 39 | ], 40 | "execution_count": null, 41 | "outputs": [] 42 | }, 43 | { 44 | "cell_type": "code", 45 | "metadata": { 46 | "id": "JPfi5hIckYxY" 47 | }, 48 | "source": [ 49 | "from auto_tensorflow.tfa import TFAuto" 50 | ], 51 | "execution_count": null, 52 | "outputs": [] 53 | }, 54 | { 55 | "cell_type": "markdown", 56 | "metadata": { 57 | "id": "UwPONJGzkSmE" 58 | }, 59 | "source": [ 60 | "### **Download data**" 61 | ] 62 | }, 63 | { 64 | "cell_type": "code", 65 | "metadata": { 66 | "id": "rbTX-qXJi7dd" 67 | }, 68 | "source": [ 69 | "!rm -rf data.*\n", 70 | "!rm -rf /content/*.png\n", 71 | "!rm -rf *trainer.py\n", 72 | "!rm -r /content/train_data\n", 73 | "!rm -r /content/test_data\n", 74 | "!rm -rf untitled_project\n", 75 | "!mkdir /content/train_data\n", 76 | "!mkdir /content/test_data\n", 77 | "!sudo rm -r /content/tfauto" 78 | ], 79 | "execution_count": null, 80 | "outputs": [] 81 | }, 82 | { 83 | "cell_type": "code", 84 | "metadata": { 85 | "id": "PY1LdrO-kWa3" 86 | }, 87 | "source": [ 88 | "# # House price\n", 89 | "%%bash\n", 90 | "cd /content/train_data\n", 91 | "wget https://raw.githubusercontent.com/rafiqhasan/AI_DL_ML_Repo/master/Datasets/house_price/data.csv\n", 92 | "\n", 93 | "cd ../test_data\n", 94 | "wget https://raw.githubusercontent.com/rafiqhasan/AI_DL_ML_Repo/master/Datasets/house_price/data.csv ##Taken same data for demonstration purposes only" 95 | ], 96 | "execution_count": null, 97 | "outputs": [] 98 | }, 99 | { 100 | "cell_type": "code", 101 | "metadata": { 102 | "id": "az8gJw7NlnCZ" 103 | }, 104 | "source": [ 105 | "##Initialize TFAuto with root and Data path\n", 106 | "tfa = TFAuto(train_data_path='/content/train_data/', test_data_path='/content/test_data/', path_root='/content/tfauto')" 107 | ], 108 | "execution_count": null, 109 | "outputs": [] 110 | }, 111 | { 112 | "cell_type": "code", 113 | "metadata": { 114 | "id": "_ddQF8rylpKS" 115 | }, 116 | "source": [ 117 | "##Step 1\n", 118 | "##Run Data setup -> Infer Schema, find anomalies, create profile and show viz\n", 119 | "tfa.step_data_explore(viz=False)" 120 | ], 121 | "execution_count": null, 122 | "outputs": [] 123 | }, 124 | { 125 | "cell_type": "code", 126 | "metadata": { 127 | "id": "NSj07bKPlryx" 128 | }, 129 | "source": [ 130 | "##Step 2\n", 131 | "##Run Model Training ->\n", 132 | "tfa.step_model_build(label_column = 'price', model_type='REGRESSION') ##--> Default model_complexity\n", 133 | "# tfa.step_model_build(label_column = 'price', model_type='REGRESSION', model_complexity=0) ##--> Model_complexity = 0 ( Simple model - No HPT )" 134 | ], 135 | "execution_count": null, 136 | "outputs": [] 137 | }, 138 | { 139 | "cell_type": "code", 140 | "metadata": { 141 | "id": "NpOsECxjl3MH" 142 | }, 143 | "source": [ 144 | "##Step 3\n", 145 | "##Show model What-If Tool\n", 146 | "tfa.step_model_whatif()" 147 | ], 148 | "execution_count": null, 149 | "outputs": [] 150 | }, 151 | { 152 | "cell_type": "code", 153 | "metadata": { 154 | "id": "MJk0dJ1PmBeq" 155 | }, 156 | "source": [ 157 | "#Check signature\n", 158 | "!saved_model_cli show --dir \"/content/tfauto/model/1\" --all" 159 | ], 160 | "execution_count": null, 161 | "outputs": [] 162 | }, 163 | { 164 | "cell_type": "markdown", 165 | "metadata": { 166 | "id": "CDczgmy4knkU" 167 | }, 168 | "source": [ 169 | "## **Tensorflow Model Serving**" 170 | ] 171 | }, 172 | { 173 | "cell_type": "code", 174 | "metadata": { 175 | "id": "At1xykbBqiEZ" 176 | }, 177 | "source": [ 178 | "!apt-get remove tensorflow-model-server\n", 179 | "!echo \"deb http://storage.googleapis.com/tensorflow-serving-apt stable tensorflow-model-server tensorflow-model-server-universal\" | tee /etc/apt/sources.list.d/tensorflow-serving.list && \\\n", 180 | "curl https://storage.googleapis.com/tensorflow-serving-apt/tensorflow-serving.release.pub.gpg | apt-key add -\n", 181 | "!apt update\n", 182 | "\n", 183 | "!apt-get install tensorflow-model-server" 184 | ], 185 | "execution_count": null, 186 | "outputs": [] 187 | }, 188 | { 189 | "cell_type": "code", 190 | "metadata": { 191 | "id": "QgWjBqkvks3D" 192 | }, 193 | "source": [ 194 | "###Start Tensorflow server\n", 195 | "# %%bash --bg \n", 196 | "# export TF_CPP_MIN_VLOG_LEVEL=0\n", 197 | "\n", 198 | "%%bash --bg \n", 199 | "nohup tensorflow_model_server \\\n", 200 | " --rest_api_port=8502 \\\n", 201 | " --model_name=model \\\n", 202 | " --model_base_path=\"/content/tfauto/model\" >server.log 2>&1" 203 | ], 204 | "execution_count": null, 205 | "outputs": [] 206 | }, 207 | { 208 | "cell_type": "code", 209 | "metadata": { 210 | "id": "SU3J6T-zk1UR" 211 | }, 212 | "source": [ 213 | "!tail server.log" 214 | ], 215 | "execution_count": null, 216 | "outputs": [] 217 | }, 218 | { 219 | "cell_type": "code", 220 | "metadata": { 221 | "id": "BMG1-nA7k3fn" 222 | }, 223 | "source": [ 224 | "import json\n", 225 | "import requests\n", 226 | "\n", 227 | "#Create payload\n", 228 | "data_py = {\"inputs\":{'bedrooms': [[3]],\n", 229 | " 'bathrooms': [[2.0]],\n", 230 | " 'sqft_living': [[1180]],\n", 231 | " 'sqft_lot': [[5650]],\n", 232 | " 'floors': [[2.0]],\n", 233 | " 'waterfront': [[1]],\n", 234 | " 'view': [[1]],\n", 235 | " 'condition': [[3]],\n", 236 | " 'grade': [[7]],\n", 237 | " 'sqft_above': [[1180]],\n", 238 | " 'sqft_basement': [[0]],\n", 239 | " 'yr_built': [[1997]],\n", 240 | " 'sqft_living15': [[1340]],\n", 241 | " 'sqft_lot15': [[5650]]\n", 242 | " }}\n", 243 | " \n", 244 | "data = json.dumps(data_py)\n", 245 | "print(\"payload: \", data)\n", 246 | "\n", 247 | "#Run request on TMS\n", 248 | "headers = {\"content-type\": \"application/json\"}\n", 249 | "json_response = requests.post('http://localhost:8502/v1/models/model:predict', data=data, headers=headers)\n", 250 | "json_response.text" 251 | ], 252 | "execution_count": null, 253 | "outputs": [] 254 | }, 255 | { 256 | "cell_type": "code", 257 | "metadata": { 258 | "id": "W441stydlIh0" 259 | }, 260 | "source": [ 261 | "" 262 | ], 263 | "execution_count": null, 264 | "outputs": [] 265 | } 266 | ] 267 | } --------------------------------------------------------------------------------